code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Steamex.SteamID do
@moduledoc """
Various utility functions related to SteamIDs.
"""
@doc """
Converts a 64-bit community SteamID to the legacy SteamID format.
## Examples
iex> Steamex.SteamID.community_id_to_steam_id(76561197961358433)
"STEAM_0:1:546352"
"""
@spec community_id_to_steam_id(pos_integer) :: binary
def community_id_to_steam_id(community_id) do
steam_id1 = rem(community_id, 2)
steam_id2 = community_id - 76561197960265728
unless steam_id2 > 0 do
raise "SteamID #{community_id} is too small."
end
steam_id2 = div(steam_id2 - steam_id1, 2)
"STEAM_0:#{steam_id1}:#{steam_id2}"
end
@doc """
Converts a 64-bit community SteamID to the modern SteamID format (aka SteamID 3)
## Examples
iex> Steamex.SteamID.community_id_to_steam_id3(76561197961358433)
"[U:1:1092705]"
"""
@spec community_id_to_steam_id3(pos_integer) :: binary
def community_id_to_steam_id3(community_id) do
unless rem(community_id, 2) == 1 do
raise "SteamID3 only supports public universe"
end
steam_id2 = community_id - 76561197960265728
unless steam_id2 > 0 do
raise "SteamID #{community_id} is too small."
end
"[U:1:#{steam_id2}]"
end
@doc """
Converts a SteamID as reported by game servers or a SteamID3 to a 64-bit
community SteamID.
## Examples
iex> Steamex.SteamID.steam_id_to_community_id("STEAM_0:1:546352")
76561197961358433
iex> Steamex.SteamID.steam_id_to_community_id("[U:1:1092705]")
76561197961358433
"""
@spec steam_id_to_community_id(binary) :: pos_integer
def steam_id_to_community_id(<<"STEAM_", _ :: binary-size(1),":", steam_id1 :: binary-size(1), ":", steam_id2 :: binary>>) do
{steam_id1, ""} = Integer.parse(steam_id1)
{steam_id2, ""} = Integer.parse(steam_id2)
steam_id1 + steam_id2 * 2 + 76561197960265728
end
def steam_id_to_community_id(<<"[U:", steam_id1 :: binary-size(1), ":", steam_id2 :: binary>>) do
{steam_id1, ""} = Integer.parse(steam_id1)
{steam_id2, "]"} = Integer.parse(steam_id2)
steam_id1 + steam_id2 + 76561197960265727
end
def steam_id_to_community_id(steam_id) do
raise "Cannot convert SteamID \"#{steam_id}\" to a community ID."
end
@doc """
Returns the base URL for the given 64-bit community SteamID or custom URL.
## Examples
iex> Steamex.SteamID.base_url(76561197961358433)
"http://steamcommunity.com/profiles/76561197961358433"
iex> Steamex.SteamID.base_url("antipax")
"http://steamcommunity.com/id/antipax"
"""
@spec base_url(pos_integer | binary) :: binary
def base_url(community_id_or_custom_url) when is_integer community_id_or_custom_url do
"http://steamcommunity.com/profiles/#{community_id_or_custom_url}"
end
def base_url(community_id_or_custom_url) when is_binary community_id_or_custom_url do
"http://steamcommunity.com/id/#{community_id_or_custom_url}"
end
end
|
lib/steamex/steam_id.ex
| 0.550366 | 0.412471 |
steam_id.ex
|
starcoder
|
defmodule HPack do
@moduledoc """
Implementation of the [HPack](https://http2.github.io/http2-spec/compression.html) protocol, a compression format for efficiently representing HTTP header fields, to be used in HTTP/2.
"""
use Bitwise
alias HPack.Huffman
alias HPack.Table
@type name() :: String.t()
@type value() :: String.t()
@type header() :: {name(), value()}
@type headers() :: [header()]
@type header_block_fragment :: binary
@doc """
Encodes a list of headers into a `header block fragment` as specified in RFC 7541.
Returns the `header block fragment`.
### Examples
iex> ctx = HPack.Table.new(1000)
iex> HPack.encode([{":method", "GET"}], ctx)
{:ok, %HPack.Table{size: 1000, table: []}, << 0b10000010 >>}
"""
@spec encode(headers(), Table.t()) ::
{:ok, Table.t(), header_block_fragment()} | {:error, :encode_error}
def encode(headers, table), do: encode(table, headers, <<>>)
defp encode(table, [], hbf), do: {:ok, table, hbf}
defp encode(table, [{name, value} | headers], hbf) do
{table, partial} =
case Table.find(name, value, table) do
{:fullindex, index} -> encode_indexed(table, index)
{:keyindex, index} -> encode_literal_indexed(table, index, value)
{:error, :not_found} -> encode_literal_not_indexed(table, name, value)
end
encode(table, headers, hbf <> partial)
end
defp encode(_table, _headers, _hbf), do: {:error, :encode_error}
defp encode_indexed(table, index), do: {table, <<1::1, encode_int7(index)::bitstring>>}
defp encode_literal_indexed(table, index, value) do
with {:ok, {name, _}} <- Table.lookup(index, table),
{:ok, table} <- Table.add({name, value}, table) do
{table, <<0::1, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, encode_int6(index)::bitstring, encode_string(value)::binary>>}
end
end
defp encode_literal_not_indexed(table, name, value) do
with {:ok, table} <- Table.add({name, value}, table),
do:
{table,
<<0::1, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, encode_string(name)::binary, encode_string(value)::binary>>}
end
# defp encode_literal_never_indexed(key, value)
defp encode_string(string) do
with {:ok, huffman} <- Huffman.encode(string) do
length = byte_size(huffman)
<<fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, encode_int7(length)::bitstring, huffman::binary>>
end
end
defp encode_int6(i) when i < 0b111111, do: <<i::6>>
defp encode_int6(i), do: <<0b111111::6, encode_big_int(i - 0b111111)::bitstring>>
defp encode_int7(i) when i < 0b1111111, do: <<i::7>>
defp encode_int7(i), do: <<0b1111111::7, encode_big_int(i - 0b1111111)::bitstring>>
defp encode_big_int(i) when i < 0b10000000, do: <<0::1, i::7>>
defp encode_big_int(i), do: <<fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, i::7, encode_big_int(i >>> 7)::binary>>
@doc """
Decodes a `header block fragment` as specified in RFC 7541.
Returns the decoded headers as a List.
### Examples
iex> ctx = HPack.Table.new(1000)
iex> HPack.decode(<< 0x82 >>, ctx)
{:ok, %HPack.Table{size: 1000, table: []}, [{":method", "GET"}]}
"""
@spec decode(header_block_fragment(), Table.t(), Table.size() | nil) ::
{:ok, Table.t(), headers()} | {:error, :decode_error}
def decode(hbf, table, max_size \\ nil)
# 0 1 2 3 4 5 6 7
# +---+---+---+---+---+---+---+---+
# | 0 | 0 | 1 | Max size (5+) |
# +---+---------------------------+
# Figure 12: Maximum Dynamic Table Size Change
def decode(<<0::2, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, rest::bitstring>>, table, max_size) do
with {:ok, {size, rest}} <- parse_int5(rest),
{:ok, table} <- Table.resize(size, table, max_size),
do: decode(rest, table, max_size)
end
def decode(hbf, table, _max_size) do
parse(table, hbf, [])
|> case do
{:ok, _table, _headers} = result -> result
_ -> {:error, :decode_error}
end
end
defp parse(table, <<>>, headers), do: {:ok, table, Enum.reverse(headers)}
# 0 1 2 3 4 5 6 7
# +---+---+---+---+---+---+---+---+
# | 1 | Index (7+) |
# +---+---------------------------+
# Figure 5: Indexed Header Field
defp parse(table, <<1::1, rest::bitstring>>, headers) do
with {:ok, {index, rest}} <- parse_int7(rest),
{:ok, {header, value}} <- Table.lookup(index, table),
do: parse(table, rest, [{header, value} | headers])
end
# 0 1 2 3 4 5 6 7
# +---+---+---+---+---+---+---+---+
# | 0 | 1 | 0 |
# +---+---+-----------------------+
# | H | Name Length (7+) |
# +---+---------------------------+
# | Name String (Length octets) |
# +---+---------------------------+
# | H | Value Length (7+) |
# +---+---------------------------+
# | Value String (Length octets) |
# +-------------------------------+
# Figure 7: Literal Header Field with Incremental Indexing β New Name
defp parse(table, <<0::1, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, rest::binary>>, headers) do
with {:ok, {name, rest}} <- parse_string(rest),
{:ok, {value, more_headers}} <- parse_string(rest) do
with {:ok, table} <- Table.add({name, value}, table),
do: parse(table, more_headers, [{name, value} | headers])
end
end
# 0 1 2 3 4 5 6 7
# +---+---+---+---+---+---+---+---+
# | 0 | 1 | Index (6+) |
# +---+---+-----------------------+
# | H | Value Length (7+) |
# +---+---------------------------+
# | Value String (Length octets) |
# +-------------------------------+
# Figure 6: Literal Header Field with Incremental Indexing β Indexed Name
defp parse(table, <<0::1, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, rest::bitstring>>, headers) do
with {:ok, {index, rest}} <- parse_int6(rest),
{:ok, {value, more_headers}} <- parse_string(rest),
{:ok, {name, _}} <- Table.lookup(index, table),
{:ok, table} <- Table.add({name, value}, table) do
parse(table, more_headers, [{name, value} | headers])
end
end
# 0 1 2 3 4 5 6 7
# +---+---+---+---+---+---+---+---+
# | 0 | 0 | 0 | 0 | 0 |
# +---+---+-----------------------+
# | H | Name Length (7+) |
# +---+---------------------------+
# | Name String (Length octets) |
# +---+---------------------------+
# | H | Value Length (7+) |
# +---+---------------------------+
# | Value String (Length octets) |
# +-------------------------------+
# Figure 9: Literal Header Field without Indexing β New Name
defp parse(table, <<fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, rest::binary>>, headers) do
with {:ok, {name, rest}} <- parse_string(rest),
{:ok, {value, more_headers}} <- parse_string(rest),
do: parse(table, more_headers, [{name, value} | headers])
end
# 0 1 2 3 4 5 6 7
# +---+---+---+---+---+---+---+---+
# | 0 | 0 | 0 | 0 | Index (4+) |
# +---+---+-----------------------+
# | H | Value Length (7+) |
# +---+---------------------------+
# | Value String (Length octets) |
# +-------------------------------+
# Figure 8: Literal Header Field without Indexing β Indexed Name
defp parse(table, <<0::4, rest::bitstring>>, headers) do
with {:ok, {index, rest}} <- parse_int4(rest),
{:ok, {value, more_headers}} <- parse_string(rest),
{:ok, {name, _}} <- Table.lookup(index, table),
do: parse(table, more_headers, [{name, value} | headers])
end
# 0 1 2 3 4 5 6 7
# +---+---+---+---+---+---+---+---+
# | 0 | 0 | 0 | 1 | 0 |
# +---+---+-----------------------+
# | H | Name Length (7+) |
# +---+---------------------------+
# | Name String (Length octets) |
# +---+---------------------------+
# | H | Value Length (7+) |
# +---+---------------------------+
# | Value String (Length octets) |
# +-------------------------------+
# Figure 11: Literal Header Field Never Indexed β New Name
defp parse(table, <<fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, rest::binary>>, headers) do
with {:ok, {name, rest}} <- parse_string(rest),
{:ok, {value, more_headers}} <- parse_string(rest),
do: parse(table, more_headers, [{name, value} | headers])
end
# 0 1 2 3 4 5 6 7
# +---+---+---+---+---+---+---+---+
# | 0 | 0 | 0 | 1 | Index (4+) |
# +---+---+-----------------------+
# | H | Value Length (7+) |
# +---+---------------------------+
# | Value String (Length octets) |
# +-------------------------------+
# Figure 10: Literal Header Field Never Indexed β Indexed Name
defp parse(table, <<0::3, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, rest::bitstring>>, headers) do
with {:ok, {index, rest}} <- parse_int4(rest),
{:ok, {value, more_headers}} <- parse_string(rest),
{:ok, {name, _}} <- Table.lookup(index, table),
do: parse(table, more_headers, [{name, value} | headers])
end
defp parse(_table, _binary, _headers), do: {:error, :decode_error}
defp parse_string(<<0::1, rest::bitstring>>) do
with {:ok, {length, rest}} <- parse_int7(rest),
<<value::binary-size(length), rest::binary>> <- rest,
do: {:ok, {value, rest}}
end
defp parse_string(<<1::1, rest::bitstring>>) do
with {:ok, {length, rest}} <- parse_int7(rest),
<<value::binary-size(length), rest::binary>> <- rest,
{:ok, encoded} <- Huffman.decode(value),
do: {:ok, {encoded, rest}}
end
defp parse_string(_binary), do: {:error, :decode_error}
defp parse_int4(<<0b1111::4, rest::binary>>), do: parse_big_int(rest, 15, 0)
defp parse_int4(<<int::4, rest::binary>>), do: {:ok, {int, rest}}
defp parse_int4(_binary), do: {:error, :decode_error}
defp parse_int5(<<0b11111::5, rest::binary>>), do: parse_big_int(rest, 31, 0)
defp parse_int5(<<int::5, rest::binary>>), do: {:ok, {int, rest}}
defp parse_int5(_binary), do: {:error, :decode_error}
defp parse_int6(<<0b111111::6, rest::binary>>), do: parse_big_int(rest, 63, 0)
defp parse_int6(<<int::6, rest::binary>>), do: {:ok, {int, rest}}
defp parse_int6(_binary), do: {:error, :decode_error}
defp parse_int7(<<0b1111111::7, rest::binary>>), do: parse_big_int(rest, 127, 0)
defp parse_int7(<<int::7, rest::binary>>), do: {:ok, {int, rest}}
defp parse_int7(_binary), do: {:error, :decode_error}
defp parse_big_int(<<0::1, value::7, rest::binary>>, int, m),
do: {:ok, {int + (value <<< m), rest}}
defp parse_big_int(<<1::1, value::7, rest::binary>>, int, m),
do: parse_big_int(rest, int + (value <<< m), m + 7)
defp parse_big_int(_binary, _int, _m), do: {:error, :decode_error}
end
|
lib/hpack.ex
| 0.886844 | 0.616128 |
hpack.ex
|
starcoder
|
defmodule Stream do
@moduledoc """
Module for creating and composing streams.
Streams are composable, lazy enumerables. Any enumerable that generates
items one by one during enumeration is called a stream. For example,
Elixir's `Range` is a stream:
iex> range = 1..5
1..5
iex> Enum.map range, &(&1 * 2)
[2,4,6,8,10]
In the example above, as we mapped over the range, the elements being
enumerated were created one by one, during enumeration. The `Stream`
module allows us to map the range, without triggering its enumeration:
iex> range = 1..3
iex> stream = Stream.map(range, &(&1 * 2))
iex> Enum.map(stream, &(&1 + 1))
[3,5,7]
Notice we started with a range and then we created a stream that is
meant to multiply each item in the range by 2. At this point, no
computation was done yet. Just when `Enum.map/2` is called we
enumerate over each item in the range, multiplying it by 2 and adding 1.
We say the functions in `Stream` are *lazy* and the functions in `Enum`
are *eager*.
Due to their laziness, streams are useful when working with large
(or even infinite) collections. When chaining many operations with `Enum`,
intermediate lists are created, while `Stream` creates a recipe of
computations that are executed at a later moment. Let's see another
example:
1..3 |>
Enum.map(&IO.inspect(&1)) |>
Enum.map(&(&1 * 2)) |>
Enum.map(&IO.inspect(&1))
1
2
3
2
4
6
#=> [2,4,6]
Notice that we first printed each item in the list, then multiplied each
element by 2 and finally printed each new value. In this example, the list
was iterated three times. Let's see an example with streams:
stream = 1..3 |>
Stream.map(&IO.inspect(&1)) |>
Stream.map(&(&1 * 2)) |>
Stream.map(&IO.inspect(&1))
Enum.to_list(stream)
1
2
2
4
3
6
#=> [2,4,6]
Although the end result is the same, the order in which the items were
printed changed! With streams, we print the first item and then print
its double. In this example, the list was iterated just once!
That's what we meant when we first said that streams are composable,
lazy enumerables. Notice we could call `Stream.map/2` multiple times,
effectively composing the streams and they are lazy. The computations
are performed only when you call a function from the `Enum` module.
## Creating Streams
There are many functions in Elixir's standard library that return
streams, some examples are:
* `IO.stream/1` - Streams input lines, one by one;
* `URI.query_decoder/1` - Decodes a query string, pair by pair;
This module also allows us to create streams from any enumerable:
iex> stream = Stream.map([1,2,3], &(&1 * 2))
iex> Enum.map(stream, &(&1 + 1))
[3,5,7]
By simply passing a list (which is an enumerable) as the first argument
to `Stream.map/2`, we have automatically created a stream that will
multiply the items in the list by 2 on enumeration.
This module also provides other functions for creating streams, such as
`Stream.cycle/1`.
"""
defrecord Lazy, [:enumerable, :fun, :acc]
defimpl Enumerable, for: Lazy do
def reduce(Lazy[] = lazy, acc, fun) do
do_reduce(lazy, acc, fun, 0)
end
def count(Lazy[] = lazy) do
do_reduce(lazy, 0, fn _, acc -> acc + 1 end, 0)
end
def member?(Lazy[] = lazy, value) do
do_reduce(lazy, false, fn(entry, _) ->
if entry === value, do: throw({ :stream_lazy, 0, true }), else: false
end, 0)
end
defp do_reduce(Lazy[enumerable: enumerable, fun: f1, acc: nil], acc, fun, nesting) do
do_reduce(enumerable, acc, f1.(fun), nesting)
end
defp do_reduce(Lazy[enumerable: enumerable, fun: f1, acc: side], acc, fun, nesting) do
do_reduce(enumerable, { acc, side }, f1.(fun, nesting), nesting + 1)
end
defp do_reduce(enumerable, acc, fun, nesting) do
Enumerable.reduce(enumerable, acc, fun) |> remove_nesting(nesting)
catch
{ :stream_lazy, nesting, res } -> remove_nesting(res, nesting)
end
defp remove_nesting(acc, 0), do: acc
defp remove_nesting(acc, nesting), do: remove_nesting(elem(acc, 0), nesting - 1)
end
@type t :: Lazy.t | (acc, (element, acc -> acc) -> acc)
@type acc :: any
@type element :: any
@type index :: non_neg_integer
@type default :: any
@doc """
Creates a stream that enumerates each enumerable in an enumerable.
## Examples
iex> stream = Stream.concat([1..3, 4..6, 7..9])
iex> Enum.to_list(stream)
[1,2,3,4,5,6,7,8,9]
"""
@spec concat(Enumerable.t) :: t
def concat(enumerables) do
&do_concat(enumerables, &1, &2)
end
@doc """
Creates a stream that enumerates the first argument, followed by the second.
## Examples
iex> stream = Stream.concat(1..3, 4..6)
iex> Enum.to_list(stream)
[1,2,3,4,5,6]
iex> stream1 = Stream.cycle([1, 2, 3])
iex> stream2 = Stream.cycle([4, 5, 6])
iex> stream = Stream.concat(stream1, stream2)
iex> Enum.take(stream, 6)
[1,2,3,1,2,3]
"""
@spec concat(Enumerable.t, Enumerable.t) :: t
def concat(first, second) do
&do_concat([first, second], &1, &2)
end
defp do_concat(enumerables, acc, fun) do
Enumerable.reduce(enumerables, acc, &Enumerable.reduce(&1, &2, fun))
end
@doc """
Creates a stream that cycles through the given enumerable,
infinitely.
## Examples
iex> stream = Stream.cycle([1,2,3])
iex> Enum.take(stream, 5)
[1,2,3,1,2]
"""
@spec cycle(Enumerable.t) :: t
def cycle(enumerable) do
&do_cycle(enumerable, &1, &2)
end
defp do_cycle(enumerable, acc, fun) do
acc = Enumerable.reduce(enumerable, acc, fun)
do_cycle(enumerable, acc, fun)
end
@doc """
Lazily drops the next `n` items from the enumerable.
## Examples
iex> stream = Stream.drop(1..10, 5)
iex> Enum.to_list(stream)
[6,7,8,9,10]
"""
@spec drop(Enumerable.t, non_neg_integer) :: t
def drop(enumerable, n) when n >= 0 do
Lazy[enumerable: enumerable,
fun: fn(f1, _) ->
fn
_entry, { acc, n } when n > 0 ->
{ acc, n - 1 }
entry, { acc, n } ->
{ f1.(entry, acc), n }
end
end,
acc: n]
end
@doc """
Lazily drops elements of the enumerable while the given
function returns true.
## Examples
iex> stream = Stream.drop_while(1..10, &(&1 <= 5))
iex> Enum.to_list(stream)
[6,7,8,9,10]
"""
@spec drop_while(Enumerable.t, (element -> as_boolean(term))) :: t
def drop_while(enumerable, f) do
Lazy[enumerable: enumerable,
fun: fn(f1, _) ->
fn
entry, { acc, true } ->
if f.(entry), do: { acc, true }, else: { f1.(entry, acc), false }
entry, { acc, false } ->
{ f1.(entry, acc), false }
end
end,
acc: true]
end
@doc """
Creates a stream that will filter elements according to
the given function on enumeration.
## Examples
iex> stream = Stream.filter([1, 2, 3], fn(x) -> rem(x, 2) == 0 end)
iex> Enum.to_list(stream)
[2]
"""
@spec filter(Enumerable.t, (element -> as_boolean(term))) :: t
def filter(enumerable, f) do
Lazy[enumerable: enumerable,
fun: fn(f1) ->
fn(entry, acc) ->
if f.(entry), do: f1.(entry, acc), else: acc
end
end]
end
@doc """
Emit a sequence of values, starting with `start_value`. Successive
values are generated by calling `next_fun` on the previous value.
## Examples
iex> Stream.iterate(0, &(&1+1)) |> Enum.take(5)
[0,1,2,3,4]
"""
@spec iterate(element, (element -> element)) :: t
def iterate(start_value, next_fun) do
fn acc, fun ->
do_iterate(start_value, next_fun, fun.(start_value, acc), fun)
end
end
defp do_iterate(value, next_fun, acc, fun) do
next = next_fun.(value)
do_iterate(next, next_fun, fun.(next, acc), fun)
end
@doc """
Creates a stream that will apply the given function on
enumeration.
## Examples
iex> stream = Stream.map([1, 2, 3], fn(x) -> x * 2 end)
iex> Enum.to_list(stream)
[2,4,6]
"""
@spec map(Enumerable.t, (element -> any)) :: t
def map(enumerable, f) do
Lazy[enumerable: enumerable,
fun: fn(f1) ->
fn(entry, acc) ->
f1.(f.(entry), acc)
end
end]
end
@doc """
Creates a stream that will apply the given function on enumeration and
flatten the result.
## Examples
iex> stream = Stream.flat_map([1, 2, 3], fn(x) -> [x, x * 2] end)
iex> Enum.to_list(stream)
[1, 2, 2, 4, 3, 6]
"""
@spec flat_map(Enumerable.t, (element -> any)) :: t
def flat_map(enumerable, f) do
Lazy[enumerable: enumerable,
fun: fn(f1) ->
fn(entry, acc) -> do_flat_map(f.(entry), acc, f1) end
end]
end
defp do_flat_map(Lazy[] = lazy, acc, f1) do
try do
Enumerable.reduce(lazy, acc, fn x, y ->
try do
f1.(x, y)
catch
{ :stream_lazy, nesting, rest } ->
throw({ :stream_flat_map, nesting, rest })
end
end)
catch
{ :stream_flat_map, nesting, rest } ->
throw({ :stream_lazy, nesting, rest })
end
end
defp do_flat_map(enum, acc, f1) do
Enumerable.reduce(enum, acc, f1)
end
@doc """
Creates a stream that will reject elements according to
the given function on enumeration.
## Examples
iex> stream = Stream.reject([1, 2, 3], fn(x) -> rem(x, 2) == 0 end)
iex> Enum.to_list(stream)
[1,3]
"""
@spec reject(Enumerable.t, (element -> as_boolean(term))) :: t
def reject(enumerable, f) do
Lazy[enumerable: enumerable,
fun: fn(f1) ->
fn(entry, acc) ->
unless f.(entry), do: f1.(entry, acc), else: acc
end
end]
end
@doc """
Returns a stream generated by calling `generator_fun` repeatedly.
## Examples
iex> Stream.repeatedly(&:random.uniform/0) |> Enum.take(3)
[0.4435846174457203, 0.7230402056221108, 0.94581636451987]
"""
@spec repeatedly((() -> element)) :: t
def repeatedly(generator_fun)
when is_function(generator_fun, 0) do
&do_repeatedly(generator_fun, &1, &2)
end
defp do_repeatedly(generator_fun, acc, fun) do
do_repeatedly(generator_fun, fun.(generator_fun.(), acc), fun)
end
@doc """
Lazily takes the next `n` items from the enumerable and stops
enumeration.
## Examples
iex> stream = Stream.take(1..100, 5)
iex> Enum.to_list(stream)
[1,2,3,4,5]
iex> stream = Stream.cycle([1, 2, 3]) |> Stream.take(5)
iex> Enum.to_list(stream)
[1,2,3,1,2]
"""
@spec take(Enumerable.t, non_neg_integer) :: t
def take(_enumerable, 0), do: Lazy[enumerable: [], fun: &(&1)]
def take(enumerable, n) when n > 0 do
Lazy[enumerable: enumerable,
fun: fn(f1, nesting) ->
fn(entry, { acc, n }) ->
res = f1.(entry, acc)
if n > 1, do: { res, n-1 }, else: throw { :stream_lazy, nesting, res }
end
end,
acc: n]
end
@doc """
Lazily takes elements of the enumerable while the given
function returns true.
## Examples
iex> stream = Stream.take_while(1..100, &(&1 <= 5))
iex> Enum.to_list(stream)
[1,2,3,4,5]
"""
@spec take_while(Enumerable.t, (element -> as_boolean(term))) :: t
def take_while(enumerable, f) do
Lazy[enumerable: enumerable,
fun: fn(f1, nesting) ->
fn(entry, { acc, true }) ->
if f.(entry) do
{ f1.(entry, acc), true }
else
throw { :stream_lazy, nesting, acc }
end
end
end,
acc: true]
end
@doc """
Emit a sequence of values and accumulators. Successive values are generated by
calling `next_fun` with the previous accumulator.
If the return value is nil iteration ends.
## Examples
iex> Stream.unfold(5, fn 0 -> nil; n -> {n, n-1} end) |> Enum.to_list()
[5, 4, 3, 2, 1]
"""
@spec unfold(acc, (acc -> { element, acc } | nil)) :: t
def unfold(acc, f) do
fn acc1, f1 ->
do_unfold(acc, f, acc1, f1)
end
end
defp do_unfold(gen_acc, gen_fun, acc, fun) do
case gen_fun.(gen_acc) do
nil -> acc
{ v, new_gen_acc } -> do_unfold(new_gen_acc, gen_fun, fun.(v, acc), fun)
end
end
@doc """
Creates a stream where each item in the enumerable will
be accompanied by its index.
## Examples
iex> stream = Stream.with_index([1, 2, 3])
iex> Enum.to_list(stream)
[{1,0},{2,1},{3,2}]
"""
@spec with_index(Enumerable.t) :: t
def with_index(enumerable) do
Lazy[enumerable: enumerable,
fun: fn(f1, _) ->
fn(entry, { acc, counter }) ->
acc = f1.({ entry, counter }, acc)
{ acc, counter + 1 }
end
end,
acc: 0]
end
end
|
lib/elixir/lib/stream.ex
| 0.844024 | 0.669069 |
stream.ex
|
starcoder
|
defmodule Exoddic do
@moduledoc """
A means for working with odds and probability.
In particular, a means to convert between different representations.
"""
@typedoc """
A keyword list with conversion options
The `to` and `from` formats are identified by atoms corresponding to
the converter module names. They default to `:prob`
- `from`: the supplied input format
- `to`: the desired output format
- `for_display`: whether to nicely format the output as a string, defaults to `true`
"""
@type exoddic_options :: [from: atom, to: atom, for_display: boolean]
@spec parse_options(exoddic_options) :: {atom, atom, boolean}
defp parse_options(options) do
{module_from_options(options, :from), module_from_options(options, :to),
Keyword.get(options, :for_display, true)}
end
@spec module_from_options(exoddic_options, atom) :: atom
defp module_from_options(options, which) do
Module.concat([
__MODULE__,
Converter,
options |> Keyword.get(which, :prob) |> Atom.to_string() |> String.capitalize()
])
end
@doc """
Convert values among the various supported odds formats.
Conversion amounts provided as strings will receive a best effort attempt at conversion to
an appropriate number.
"""
@spec convert(number | String.t(), exoddic_options) :: String.t() | float
def convert(amount, options \\ []) do
{from_module, to_module, for_display} = parse_options(options)
final_amount = amount |> normalize |> from_module.to_prob |> to_module.from_prob
if for_display, do: to_module.for_display(final_amount), else: final_amount
end
@spec normalize(number | String.t()) :: float
# Guarantee float
defp normalize(amount) when is_number(amount), do: amount / 1.0
defp normalize(amount) when is_bitstring(amount) do
captures =
Regex.named_captures(
~r/^(?<s>[\+-])?(?<n>[\d\.]+)(?<q>[\/:-])?(?<d>[\d\.]+)?(?<p>%)?$/,
amount
)
value_from_captures(captures) * modifier_from_captures(captures)
end
defp modifier_from_captures(cap) do
case cap do
# Both sounds crazy
%{"s" => "-", "p" => "%"} ->
-1.0 / 100.0
%{"s" => "-"} ->
-1.0
%{"p" => "%"} ->
1 / 100
# Unmodified: covers nil, a "+" sign, etc.
_ ->
1.0
end
end
defp value_from_captures(cap) do
case cap do
# Not even close
nil ->
0.0
# Does not parse a numerator
%{"n" => ""} ->
0.0
# No quotient operator, just numerator
%{"q" => "", "n" => n} ->
fparse(n)
# Quotient without denominator, failure
%{"d" => ""} ->
0.0
%{"n" => n, "d" => d} ->
fparse(n) / fparse(d)
end
end
@spec fparse(String.t()) :: float
# This should be reasonable given how we parsed the above.
defp fparse(str), do: str |> Float.parse() |> elem(0)
end
|
lib/exoddic.ex
| 0.894216 | 0.75005 |
exoddic.ex
|
starcoder
|
defmodule Cldr.Unit.Test.PreferenceData do
@moduledoc false
@preference_test_data "test/support/data/preference_test_data.txt"
@external_resource @preference_test_data
@offset 1
def preference_test_data do
@preference_test_data
|> File.read!()
|> String.split("\n")
|> Enum.map(&String.trim/1)
end
def preferences do
preference_test_data()
|> Enum.with_index()
|> Enum.map(&parse_test/1)
|> Enum.reject(&is_nil/1)
end
@fields [:quantity, :usage, :region, :input_rational, :input_double, :input_unit, :output]
def parse_test({"", _}) do
nil
end
def parse_test({<<"#", _rest::binary>>, _}) do
nil
end
def parse_test({test, index}) do
test
|> String.split(";")
|> Enum.map(&String.trim/1)
|> zip(@fields)
|> Enum.map(&transform/1)
|> set_output_units()
|> Map.new()
|> Map.put(:line, index + @offset)
end
def zip(data, fields) do
{input, output} = :lists.split(6, data)
fields
|> Enum.zip(input ++ [output])
end
def set_output_units(test) do
output = Keyword.get(test, :output)
units = Enum.map(output, &(elem(&1, 0) |> String.replace("-", "_") |> String.to_atom()))
Keyword.put(test, :output_units, units)
end
def transform(
{:output, [first_rational, first_unit, output_rational, output_double, output_unit]}
) do
{:output,
[
{first_unit, [first_rational, nil]},
{output_unit, [output_rational, output_double]}
]}
end
def transform({:output, [output_rational, output_double, output_unit]}) do
{:output, [{output_unit, [output_rational, output_double]}]}
end
def transform({:input_double, string}) do
{:input_double, String.to_float(string)}
end
def transform({:input_rational, string}) do
{:input_rational, to_rational(string)}
end
def transform({:region, string}) do
{:region, String.to_atom(string)}
end
def transform({:usage, string}) do
{:usage, String.to_atom(String.replace(string, "-", "_"))}
end
def transform({:input_unit, string}) do
{:input_unit, String.replace(string, "-", "_")}
end
def transform(x) do
x
end
def to_rational(string) do
rational =
string
|> String.split("/")
|> Enum.map(&String.trim/1)
|> Enum.map(&String.to_integer/1)
case rational do
[numerator, denominator] -> Ratio.new(numerator, denominator)
[integer] -> integer
_other -> raise ArgumentError, "Can't convert #{inspect(string)} to a rational"
end
end
end
|
test/support/parse_preference_data.ex
| 0.677154 | 0.534309 |
parse_preference_data.ex
|
starcoder
|
defmodule Ockam.Channel.Protocol do
@moduledoc "Represents a Noise protocol configuration"
@type noise_pattern ::
:nn | :kn | :nk | :kk | :nx | :kx | :xn | :in | :xk | :ik | :xx | :ix | :psk
@type noise_msg :: {:in | :out, [Ockam.Channel.Handshake.token()]}
defstruct pattern: :xx, dh: :x25519, cipher: :aes_256_gcm, hash: :sha256
@type t :: %__MODULE__{
pattern: noise_pattern(),
dh: Ockam.Channel.Handshake.curve(),
cipher: Ockam.Channel.CipherState.cipher(),
hash: Ockam.Channel.HashState.hash()
}
def cipher(%__MODULE__{cipher: cipher}), do: cipher
def dh(%__MODULE__{dh: dh}), do: dh
def hash(%__MODULE__{hash: hash}), do: hash
def pattern(%__MODULE__{pattern: pattern}), do: pattern
def name(%__MODULE__{pattern: pattern, dh: dh, cipher: cipher, hash: hash}) do
to_name(pattern, dh, cipher, hash)
end
def from_name(<<"Noise_", rest::binary>>) do
do_from_name(rest)
end
def from_name(<<"NoisePSK_", rest::binary>>) do
do_from_name(rest)
end
defp do_from_name(rest) do
case String.split(rest, "_", parts: 4) do
[pattern_s, dh_s, cipher_s, hash_s] ->
with {:ok, pattern} <- parse_pattern(pattern_s),
{:ok, dh} <- parse_dh(dh_s),
{:ok, cipher} <- parse_cipher(cipher_s),
{:ok, hash} <- parse_hash(hash_s) do
if supported(pattern, dh, cipher, hash) do
{:ok,
%__MODULE__{
pattern: pattern,
dh: dh,
cipher: cipher,
hash: hash
}}
else
{:error, {__MODULE__, :unsupported_pattern}}
end
end
_ ->
{:error, {__MODULE__, :unrecognized_name}}
end
end
def msgs(role, %__MODULE__{pattern: pattern}) do
{_pre, msgs} = protocol(pattern)
role_adapt(role, msgs)
end
def pre_msgs(role, %__MODULE__{pattern: pattern}) do
{pre, _msgs} = protocol(pattern)
role_adapt(role, pre)
end
## Private
defp to_name(pattern, dh, cipher, hash) do
<<"Noise_", pattern_name(pattern)::binary, "_", dh_name(dh)::binary, "_",
cipher_name(cipher)::binary, "_", hash_name(hash)::binary>>
end
defp pattern_name(pattern) when is_atom(pattern) do
[simple | rest] =
pattern
|> Atom.to_string()
|> String.split("_", parts: 2)
case rest do
[] -> String.upcase(simple)
[bin] -> <<String.upcase(simple)::binary, "+", bin::binary>>
end
end
defp parse_pattern(pattern) when is_binary(pattern) do
[init | mod2] = String.split(pattern, "+", parts: 2)
[simple | mod1] = String.split(init, ~r/[^A-Z]/, parts: 2)
simple = String.downcase(simple)
case {mod1, mod2} do
{[], _} -> {:ok, String.to_existing_atom(simple)}
{[mod1s], [mod2s]} -> {:ok, String.to_existing_atom(simple <> "_" <> mod1s <> "_" <> mod2s)}
{[mod1s], []} -> {:ok, String.to_existing_atom(simple <> "_" <> mod1s)}
end
end
defp dh_name(:x25519), do: "25519"
defp dh_name(:x448), do: "448"
defp parse_dh("25519"), do: {:ok, :x25519}
defp parse_dh("448"), do: {:ok, :x448}
defp cipher_name(:aes_256_gcm), do: "AESGCM"
defp cipher_name(:chachapoly), do: "ChaChaPoly"
defp parse_cipher("AESGCM"), do: {:ok, :aes_256_gcm}
defp parse_cipher("ChaChaPoly"), do: {:ok, :chachapoly}
defp hash_name(:sha256), do: "SHA256"
defp hash_name(:sha512), do: "SHA512"
defp hash_name(:blake2s), do: "BLAKE2s"
defp hash_name(:blake2b), do: "BLAKE2b"
defp parse_hash(hash) when is_binary(hash) do
atom =
hash
|> String.downcase()
|> String.to_existing_atom()
{:ok, atom}
end
defp role_adapt(:initiator, msgs), do: msgs
defp role_adapt(:responder, msgs) do
Enum.map(msgs, fn
{:in, msg} -> {:out, msg}
{:out, msg} -> {:in, msg}
end)
end
defp protocol(:nn) do
{[], [{:out, [:e]}, {:in, [:e, :ee]}]}
end
defp protocol(:kn) do
{[{:out, [:s]}], [{:out, [:e]}, {:in, [:e, :ee, :se]}]}
end
defp protocol(:nk) do
{[{:in, [:s]}], [{:out, [:e, :es]}, {:in, [:e, :ee]}]}
end
defp protocol(:kk) do
{[{:out, [:s]}, {:in, [:s]}], [{:out, [:e, :es, :ss]}, {:in, [:e, :ee, :se]}]}
end
defp protocol(:nx) do
{[], [{:out, [:e]}, {:in, [:e, :ee, :s, :es]}]}
end
defp protocol(:kx) do
{[{:out, [:s]}], [{:out, [:e]}, {:in, [:e, :ee, :se, :s, :es]}]}
end
defp protocol(:xn) do
{[], [{:out, [:e]}, {:in, [:e, :ee]}, {:out, [:s, :se]}]}
end
defp protocol(:in) do
{[], [{:out, [:e, :s]}, {:in, [:e, :ee, :se]}]}
end
defp protocol(:xk) do
{[{:in, [:s]}], [{:out, [:e, :es]}, {:in, [:e, :ee]}, {:out, [:s, :se]}]}
end
defp protocol(:ik) do
{[{:in, [:s]}], [{:out, [:e, :es, :s, :ss]}, {:in, [:e, :ee, :se]}]}
end
defp protocol(:xx) do
{[], [{:out, [:e]}, {:in, [:e, :ee, :s, :es]}, {:out, [:s, :se]}]}
end
defp protocol(:ix) do
{[], [{:out, [:e, :s]}, {:in, [:e, :ee, :se, :s, :es]}]}
end
defp supported(pattern, dh, cipher, hash) do
with true <- pattern in [:xx],
true <- dh in [:x25519],
true <- cipher in [:aes_256_gcm],
true <- hash in [:sha256] do
true
end
end
end
|
implementations/elixir/lib/channel/protocol.ex
| 0.819785 | 0.446193 |
protocol.ex
|
starcoder
|
defmodule AWS.Lightsail do
@moduledoc """
Amazon Lightsail is the easiest way to get started with AWS for developers
who just need virtual private servers. Lightsail includes everything you
need to launch your project quickly - a virtual machine, SSD-based storage,
data transfer, DNS management, and a static IP - for a low, predictable
price. You manage those Lightsail servers through the Lightsail console or
by using the API or command-line interface (CLI).
For more information about Lightsail concepts and tasks, see the [Lightsail
Dev Guide](http://lightsail.aws.amazon.com/ls/docs).
To use the Lightsail API or the CLI, you will need to use AWS Identity and
Access Management (IAM) to generate access keys. For details about how to
set this up, see the [Lightsail Dev
Guide](http://lightsail.aws.amazon.com/ls/docs/how-to/articles/lightsail-how-to-set-up-access-keys-to-use-sdk-api-cli).
"""
@doc """
Allocates a static IP address.
"""
def allocate_static_ip(client, input, options \\ []) do
request(client, "AllocateStaticIp", input, options)
end
@doc """
Attaches a static IP address to a specific Amazon Lightsail instance.
"""
def attach_static_ip(client, input, options \\ []) do
request(client, "AttachStaticIp", input, options)
end
@doc """
Closes the public ports on a specific Amazon Lightsail instance.
"""
def close_instance_public_ports(client, input, options \\ []) do
request(client, "CloseInstancePublicPorts", input, options)
end
@doc """
Creates a domain resource for the specified domain (e.g., example.com).
"""
def create_domain(client, input, options \\ []) do
request(client, "CreateDomain", input, options)
end
@doc """
Creates one of the following entry records associated with the domain: A
record, CNAME record, TXT record, or MX record.
"""
def create_domain_entry(client, input, options \\ []) do
request(client, "CreateDomainEntry", input, options)
end
@doc """
Creates a snapshot of a specific virtual private server, or *instance*. You
can use a snapshot to create a new instance that is based on that snapshot.
"""
def create_instance_snapshot(client, input, options \\ []) do
request(client, "CreateInstanceSnapshot", input, options)
end
@doc """
Creates one or more Amazon Lightsail virtual private servers, or
*instances*.
"""
def create_instances(client, input, options \\ []) do
request(client, "CreateInstances", input, options)
end
@doc """
Uses a specific snapshot as a blueprint for creating one or more new
instances that are based on that identical configuration.
"""
def create_instances_from_snapshot(client, input, options \\ []) do
request(client, "CreateInstancesFromSnapshot", input, options)
end
@doc """
Creates sn SSH key pair.
"""
def create_key_pair(client, input, options \\ []) do
request(client, "CreateKeyPair", input, options)
end
@doc """
Deletes the specified domain recordset and all of its domain records.
"""
def delete_domain(client, input, options \\ []) do
request(client, "DeleteDomain", input, options)
end
@doc """
Deletes a specific domain entry.
"""
def delete_domain_entry(client, input, options \\ []) do
request(client, "DeleteDomainEntry", input, options)
end
@doc """
Deletes a specific Amazon Lightsail virtual private server, or *instance*.
"""
def delete_instance(client, input, options \\ []) do
request(client, "DeleteInstance", input, options)
end
@doc """
Deletes a specific snapshot of a virtual private server (or *instance*).
"""
def delete_instance_snapshot(client, input, options \\ []) do
request(client, "DeleteInstanceSnapshot", input, options)
end
@doc """
Deletes a specific SSH key pair.
"""
def delete_key_pair(client, input, options \\ []) do
request(client, "DeleteKeyPair", input, options)
end
@doc """
Detaches a static IP from the Amazon Lightsail instance to which it is
attached.
"""
def detach_static_ip(client, input, options \\ []) do
request(client, "DetachStaticIp", input, options)
end
@doc """
Downloads the default SSH key pair from the user's account.
"""
def download_default_key_pair(client, input, options \\ []) do
request(client, "DownloadDefaultKeyPair", input, options)
end
@doc """
Returns the names of all active (not deleted) resources.
"""
def get_active_names(client, input, options \\ []) do
request(client, "GetActiveNames", input, options)
end
@doc """
Returns the list of available instance images, or *blueprints*. You can use
a blueprint to create a new virtual private server already running a
specific operating system, as well as a preinstalled app or development
stack. The software each instance is running depends on the blueprint image
you choose.
"""
def get_blueprints(client, input, options \\ []) do
request(client, "GetBlueprints", input, options)
end
@doc """
Returns the list of bundles that are available for purchase. A bundle
describes the specs for your virtual private server (or *instance*).
"""
def get_bundles(client, input, options \\ []) do
request(client, "GetBundles", input, options)
end
@doc """
Returns information about a specific domain recordset.
"""
def get_domain(client, input, options \\ []) do
request(client, "GetDomain", input, options)
end
@doc """
Returns a list of all domains in the user's account.
"""
def get_domains(client, input, options \\ []) do
request(client, "GetDomains", input, options)
end
@doc """
Returns information about a specific Amazon Lightsail instance, which is a
virtual private server.
"""
def get_instance(client, input, options \\ []) do
request(client, "GetInstance", input, options)
end
@doc """
Returns temporary SSH keys you can use to connect to a specific virtual
private server, or *instance*.
"""
def get_instance_access_details(client, input, options \\ []) do
request(client, "GetInstanceAccessDetails", input, options)
end
@doc """
Returns the data points for the specified Amazon Lightsail instance metric,
given an instance name.
"""
def get_instance_metric_data(client, input, options \\ []) do
request(client, "GetInstanceMetricData", input, options)
end
@doc """
Returns the port states for a specific virtual private server, or
*instance*.
"""
def get_instance_port_states(client, input, options \\ []) do
request(client, "GetInstancePortStates", input, options)
end
@doc """
Returns information about a specific instance snapshot.
"""
def get_instance_snapshot(client, input, options \\ []) do
request(client, "GetInstanceSnapshot", input, options)
end
@doc """
Returns all instance snapshots for the user's account.
"""
def get_instance_snapshots(client, input, options \\ []) do
request(client, "GetInstanceSnapshots", input, options)
end
@doc """
Returns the state of a specific instance. Works on one instance at a time.
"""
def get_instance_state(client, input, options \\ []) do
request(client, "GetInstanceState", input, options)
end
@doc """
Returns information about all Amazon Lightsail virtual private servers, or
*instances*.
"""
def get_instances(client, input, options \\ []) do
request(client, "GetInstances", input, options)
end
@doc """
Returns information about a specific key pair.
"""
def get_key_pair(client, input, options \\ []) do
request(client, "GetKeyPair", input, options)
end
@doc """
Returns information about all key pairs in the user's account.
"""
def get_key_pairs(client, input, options \\ []) do
request(client, "GetKeyPairs", input, options)
end
@doc """
Returns information about a specific operation. Operations include events
such as when you create an instance, allocate a static IP, attach a static
IP, and so on.
"""
def get_operation(client, input, options \\ []) do
request(client, "GetOperation", input, options)
end
@doc """
Returns information about all operations.
Results are returned from oldest to newest, up to a maximum of 200. Results
can be paged by making each subsequent call to `GetOperations` use the
maximum (last) `statusChangedAt` value from the previous request.
"""
def get_operations(client, input, options \\ []) do
request(client, "GetOperations", input, options)
end
@doc """
Gets operations for a specific resource (e.g., an instance or a static IP).
"""
def get_operations_for_resource(client, input, options \\ []) do
request(client, "GetOperationsForResource", input, options)
end
@doc """
Returns a list of all valid regions for Amazon Lightsail.
"""
def get_regions(client, input, options \\ []) do
request(client, "GetRegions", input, options)
end
@doc """
Returns information about a specific static IP.
"""
def get_static_ip(client, input, options \\ []) do
request(client, "GetStaticIp", input, options)
end
@doc """
Returns information about all static IPs in the user's account.
"""
def get_static_ips(client, input, options \\ []) do
request(client, "GetStaticIps", input, options)
end
@doc """
Imports a public SSH key from a specific key pair.
"""
def import_key_pair(client, input, options \\ []) do
request(client, "ImportKeyPair", input, options)
end
@doc """
Returns a Boolean value indicating whether your Lightsail VPC is peered.
"""
def is_vpc_peered(client, input, options \\ []) do
request(client, "IsVpcPeered", input, options)
end
@doc """
Adds public ports to an Amazon Lightsail instance.
"""
def open_instance_public_ports(client, input, options \\ []) do
request(client, "OpenInstancePublicPorts", input, options)
end
@doc """
Tries to peer the Lightsail VPC with the user's default VPC.
"""
def peer_vpc(client, input, options \\ []) do
request(client, "PeerVpc", input, options)
end
@doc """
Restarts a specific instance. When your Amazon Lightsail instance is
finished rebooting, Lightsail assigns a new public IP address. To use the
same IP address after restarting, create a static IP address and attach it
to the instance.
"""
def reboot_instance(client, input, options \\ []) do
request(client, "RebootInstance", input, options)
end
@doc """
Deletes a specific static IP from your account.
"""
def release_static_ip(client, input, options \\ []) do
request(client, "ReleaseStaticIp", input, options)
end
@doc """
Starts a specific Amazon Lightsail instance from a stopped state. To
restart an instance, use the reboot instance operation.
"""
def start_instance(client, input, options \\ []) do
request(client, "StartInstance", input, options)
end
@doc """
Stops a specific Amazon Lightsail instance that is currently running.
"""
def stop_instance(client, input, options \\ []) do
request(client, "StopInstance", input, options)
end
@doc """
Attempts to unpeer the Lightsail VPC from the user's default VPC.
"""
def unpeer_vpc(client, input, options \\ []) do
request(client, "UnpeerVpc", input, options)
end
@doc """
Updates a domain recordset after it is created.
"""
def update_domain_entry(client, input, options \\ []) do
request(client, "UpdateDomainEntry", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "lightsail"}
host = get_host("lightsail", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "Lightsail_20161128.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/lightsail.ex
| 0.804406 | 0.511168 |
lightsail.ex
|
starcoder
|
defmodule MuonTrap do
@moduledoc """
MuonTrap protects you from lost and out of control OS processes.
You can use it as a `System.cmd/3` replacement or to pull OS processes into
an Erlang supervision tree via `MuonTrap.Daemon`. Either way, if the Erlang
process that runs the command dies, then the OS processes will die as well.
MuonTrap tries very hard to kill OS processes so that remnants don't hang
around the system when your Erlang code thinks they should be gone. MuonTrap
can use the Linux kernel's `cgroup` feature to contain the child process and
all of its children. From there, you can limit CPU and memory and other
resources to the process group.
MuonTrap does not require `cgroups` but keep in mind that OS processes can
escape. It is, however, still an improvement over `System.cmd/3` which does
not have a mechanism for dealing it OS processes that do not monitor their
stdin for when to close.
For more information, see the documentation for `MuonTrap.cmd/3` and
`MuonTrap.Daemon`
## Configuring cgroups
On most Linux distributions, use `cgcreate` to create a new cgroup. You can
name them almost anything. The command below creates one named `muontrap` for
the current user. It supports memory and CPU controls.
```sh
sudo cgcreate -a $(whoami) -g memory,cpu:muontrap
```
Nerves systems do not contain `cgcreate` by default. Due to the simpler Linux
setup, it may be sufficient to run `File.mkdir_p(cgroup_path)` to create a
cgroup. For example:
```elixir
File.mkdir_p("/sys/fs/cgroup/memory/muontrap")
```
This creates the cgroup path, `muontrap` under the `memory` controller. If
you do not have the `"/sys/fs/cgroup"` directory, you will need to mount it
or update your `erlinit.config` to mount it for you. See a newer official
system for an example.
"""
@doc ~S"""
Executes a command like `System.cmd/3` via the `muontrap` wrapper.
## Options
* `:cgroup_controllers` - run the command under the specified cgroup controllers. Defaults to `[]`.
* `:cgroup_base` - create a temporary path under the specified cgroup path
* `:cgroup_path` - explicitly specify a path to use. Use `:cgroup_base`, unless you must control the path.
* `:cgroup_sets` - set a cgroup controller parameter before running the command
* `:delay_to_sigkill` - milliseconds before sending a SIGKILL to a child process if it doesn't exit with a SIGTERM (default 500 ms)
* `:force_close_port_after` - milliseconds before sending forcing the closing of a `Port`. Unless specified, the `Port` will never be closed (by us)
* `:uid` - run the command using the specified uid or username
* `:gid` - run the command using the specified gid or group
The following `System.cmd/3` options are also available:
* `:into` - injects the result into the given collectable, defaults to `""`
* `:cd` - the directory to run the command in
* `:env` - an enumerable of tuples containing environment key-value as binary
* `:arg0` - sets the command arg0
* `:stderr_to_stdout` - redirects stderr to stdout when `true`
* `:parallelism` - when `true`, the VM will schedule port tasks to improve
parallelism in the system. If set to `false`, the VM will try to perform
commands immediately, improving latency at the expense of parallelism.
The default can be set on system startup by passing the "+spp" argument
to `--erl`.
## Examples
Run a command:
```elixir
iex> MuonTrap.cmd("echo", ["hello"])
{"hello\n", 0}
```
The next examples only run on Linux. To try this out, create new cgroups:
```sh
sudo cgcreate -a $(whoami) -g memory,cpu:muontrap
```
Run a command, but limit memory so severely that it doesn't work (for demo
purposes, obviously):
```elixir
iex-donttest> MuonTrap.cmd("echo", ["hello"], cgroup_controllers: ["memory"], cgroup_path: "muontrap/test", cgroup_sets: [{"memory", "memory.limit_in_bytes", "8192"}])
{"", 1}
```
"""
@spec cmd(binary(), [binary()], keyword()) ::
{Collectable.t(), exit_status :: non_neg_integer()}
def cmd(command, args, opts \\ []) when is_binary(command) and is_list(args) do
options = MuonTrap.Options.validate(:cmd, command, args, opts)
MuonTrap.Port.cmd(options)
end
@doc """
Return the absolute path to the muontrap executable.
Call this if you want to invoke the `muontrap` port binary manually.
"""
defdelegate muontrap_path, to: MuonTrap.Port
end
|
lib/muontrap.ex
| 0.817283 | 0.828731 |
muontrap.ex
|
starcoder
|
defmodule Penelope.ML.CRF.Tagger do
@moduledoc """
The CRF tagger is a thin wrapper over the CRFSuite library for sequence
inference. It provides the ability to train sequence models, use them
for inference, and import/export them.
Features (Xs) are represented as lists of sequences (lists). Each sequence
entry can contain a string (for simple word-based features), a list of
stringable values (list features), or maps (for named features per sequence
item).
Labels (Ys) are represented as lists of sequences of strings. Each label
must correspond to an entry in the feature lists.
Models are compiled/exported to/from a map containing a binary blob
that is maintained by CRF suite. Training parameters are analogs of those
used by the sklearn-crfsuite library. For more information, see:
http://www.chokkan.org/software/crfsuite/
https://sklearn-crfsuite.readthedocs.io/en/latest/
"""
alias Penelope.NIF
@doc """
trains a CRF model and returns it as a compiled model
options:
|key |default |
|--------------------------|--------------------|
|`algorithm` |`:lbfgs` |
|`min_freq` |0.0 |
|`all_possible_states` |false |
|`all_possible_transitions`|false |
|`c1` |0.0 |
|`c2` |0.0 |
|`max_iterations` |depends on algorithm|
|`num_memories` |6 |
|`epsilon` |1e-5 |
|`period` |10 |
|`delta` |1e-5 |
|`linesearch` |:more_thuente |
|`max_linesearch` |20 |
|`calibration_eta` |0.1 |
|`calibration_rate` |2.0 |
|`calibration_samples` |1000 |
|`calibration_candidates` |10 |
|`calibration_max_trials` |20 |
|`pa_type` |1 |
|`c` |1.0 |
|`error_sensitive` |true |
|`averaging` |true |
|`variance` |1.0 |
|`gamma` |1.0 |
|`verbose` |false |
algorithms:
`:lbfgs`, `:l2sgd`, `:ap`, `:pa`, `:arow`
linesearch:
`:more_thuente`, `:backtracking`, `:strong_backtracking`
for more information on parameters, see
https://sklearn-crfsuite.readthedocs.io/en/latest/api.html
"""
@spec fit(
context :: map,
x :: [[String.t() | list | map]],
y :: [[String.t()]],
options :: keyword
) :: map
def fit(context, x, y, options \\ []) do
if length(x) !== length(y), do: raise(ArgumentError, "mismatched x/y")
x = transform(%{}, context, x)
params = fit_params(x, y, options)
model = NIF.crf_train(x, y, params)
%{crf: model}
end
@spec transform(
model :: map,
context :: map,
x :: [[String.t() | list | map]]
) :: [[map]]
def transform(_model, _context, x) do
Enum.map(x, fn x -> Enum.map(x, &featurize/1) end)
end
@doc """
extracts model parameters from compiled model
These parameters are simple elixir objects and can later be passed to
`compile` to prepare the model for inference.
"""
@spec export(%{crf: reference}) :: map
def export(%{crf: crf}) do
crf
|> NIF.crf_export()
|> Map.update!(:model, &Base.encode64/1)
|> Map.new(fn {k, v} -> {to_string(k), v} end)
end
@doc """
compiles a pre-trained model
"""
@spec compile(params :: map) :: map
def compile(params) do
model =
params
|> Map.new(fn {k, v} -> {String.to_existing_atom(k), v} end)
|> Map.update!(:model, &Base.decode64!/1)
|> NIF.crf_compile()
%{crf: model}
end
@doc """
predicts a list of target sequences from a list of feature sequences
returns the predicted sequences and their probability
"""
@spec predict_sequence(
%{crf: reference},
context :: map,
x :: [[String.t() | list | map]]
) :: [{[String.t()], float}]
def predict_sequence(model, _context, x) do
Enum.map(x, &do_predict_sequence(model, &1))
end
defp do_predict_sequence(_model, []) do
{[], 1.0}
end
defp do_predict_sequence(%{crf: model}, x) do
NIF.crf_predict(model, Enum.map(x, &featurize/1))
end
defp fit_params(_x, _y, options) do
algorithm = Keyword.get(options, :algorithm, :lbfgs)
min_freq = Keyword.get(options, :min_freq, 0) / 1
all_states? = Keyword.get(options, :all_possible_states?, false)
all_transitions? =
Keyword.get(options, :all_possible_transitions?, false)
c1 = Keyword.get(options, :c1, 0.0) / 1
c2 = Keyword.get(options, :c2, 0.0) / 1
max_iter =
Keyword.get(options, :max_iterations, max_iterations(algorithm))
num_memories = Keyword.get(options, :num_memories, 6)
epsilon = Keyword.get(options, :epsilon, 1.0e-5) / 1
period = Keyword.get(options, :period, 10)
delta = Keyword.get(options, :delta, 1.0e-5) / 1
linesearch = Keyword.get(options, :linesearch, :more_thuente)
max_linesearch = Keyword.get(options, :max_linesearch, 20)
calibration_eta = Keyword.get(options, :calibration_eta, 0.1) / 1
calibration_rate = Keyword.get(options, :calibration_rate, 2.0) / 1
calibration_samples = Keyword.get(options, :calibration_samples, 1000)
calibration_candidates =
Keyword.get(options, :calibration_candidates, 10)
calibration_max_trials =
Keyword.get(options, :calibration_max_trials, 20)
pa_type = Keyword.get(options, :pa_type, 1)
c = Keyword.get(options, :c, 1.0) / 1
error_sensitive? = Keyword.get(options, :error_sensitive?, true)
averaging? = Keyword.get(options, :averaging?, true)
variance = Keyword.get(options, :variance, 1.0) / 1
gamma = Keyword.get(options, :gamma, 1.0) / 1
verbose = Keyword.get(options, :verbose, false)
%{
algorithm: algorithm,
min_freq: min_freq,
all_possible_states?: all_states?,
all_possible_transitions?: all_transitions?,
c1: c1,
c2: c2,
max_iterations: max_iter,
num_memories: num_memories,
epsilon: epsilon,
period: period,
delta: delta,
linesearch: linesearch_param(linesearch),
max_linesearch: max_linesearch,
calibration_eta: calibration_eta,
calibration_rate: calibration_rate,
calibration_samples: calibration_samples,
calibration_candidates: calibration_candidates,
calibration_max_trials: calibration_max_trials,
pa_type: pa_type,
c: c,
error_sensitive?: error_sensitive?,
averaging?: averaging?,
variance: variance,
gamma: gamma,
verbose: verbose
}
end
defp max_iterations(algorithm) do
case algorithm do
:lbfgs -> 2_147_483_647
:l2sgd -> 1000
:ap -> 100
:pa -> 100
:arow -> 100
end
end
defp linesearch_param(linesearch) do
case linesearch do
:more_thuente -> :MoreThuente
:backtracking -> :Backtracking
:strong_backtracking -> :StrongBacktracking
end
end
# convert from a set of feature formats to the standard crfsuite format:
# [%{"feature1" => 1.0, "feature2" => 1.5, ...}, ...]
# . simple tokens:
# "t1" -> %{"t1" => 1.0}
# . list per token:
# ["f1", "f2", ...] -> %{"f1" => 1.0, "f2" => 1.0, ...}
# . map per token:
# %{"f1" => "v1", "f2" -> 1.5, ...} -> %{"f1-v1" => 1.0, "f2" => 1.5, ...}
defp featurize(x) when is_map(x) do
Map.new(x, fn {k, v} -> hd(Map.to_list(featurize(k, v))) end)
end
defp featurize(x) when is_list(x) do
Map.new(x, fn v -> hd(Map.to_list(featurize(v, 1))) end)
end
defp featurize(x) do
featurize(x, 1)
end
defp featurize(k, v) when is_number(v) do
%{to_string(k) => v / 1}
end
defp featurize(k, v) do
featurize("#{k}-#{v}", 1)
end
end
|
lib/penelope/ml/crf/tagger.ex
| 0.923411 | 0.81257 |
tagger.ex
|
starcoder
|
defmodule ExMpesa.TransactionStatus do
@moduledoc """
Use this api to check the transaction status.
"""
import ExMpesa.MpesaBase
import ExMpesa.Util
@doc """
Transaction Status Query
## Requirement Params
- `CommandID`[String] - Takes only 'TransactionStatusQuery' command id
- `timeout_url` [URL] - The path that stores information of time out transaction. Takes the form of
https://ip or domain:port/path
- `result_url`[URL] - The path that stores information of transaction. Example https://ip or domain:port/path
- `Initiator` [Alpha-Numeric] - The name of Initiator to initiating the request. This is the credential/username
used to authenticate the transaction request
- `security credential` - To generate security_credential, head over to https://developer.safaricom.co.ke/test_credentials, then Initiator Security Password for your environment.
`config.exs`
```elixir
config :ex_mpesa,
cert: "",
transaction_status: [
initiator_name: "",
password: "",
timeout_url: "",
result_url: "",
security_credential: ""
]
```
Alternatively, generate security credential using certificate
`cert` - This is the M-Pesa public key certificate used to encrypt your plain password.
There are 2 types of certificates.
- sandox - https://developer.safaricom.co.ke/sites/default/files/cert/cert_sandbox/cert.cer .
- production - https://developer.safaricom.co.ke/sites/default/files/cert/cert_prod/cert.cer .
`password` - This is a plain unencrypted password.
Environment
- production - set password from the organization portal.
- sandbox - use your own custom password
## Parameters
The following are the parameters required for this method, the rest are fetched from config
files.
- `transaction_id` [Alpha-Numeric] - Unique identifier to identify a transaction on M-Pesa Alpha-Numeric LKXXXX1234
- `receiver_party` [Numeric] - Organization/MSISDN receiving the transaction, can be
-Shortcode (6 digits)
-MSISDN (12 Digits)
- `identifier_type` [Numeric] - Type of organization receiving the transaction can be the folowing:
1 β MSISDN
2 β Till Number
4 β Organization short code
- `remarks`[String] - Comments that are sent along with the transaction, can be a sequence of characters up to 100
- `occasion` [ String] - Optional Parameter String sequence of characters up to 100
## Example
iex> ExMpesa.TransactionStatus.request(%{transaction_id: "SOME7803", receiver_party: "600247", identifier_type: 4, remarks: "TransactionReversal", occasion: "TransactionReversal"})
{:ok,
%{
"ConversationID" => "AG_20201010_000056be35a7b266b43e",
"OriginatorConversationID" => "27288-72545279-2",
"ResponseCode" => "0",
"ResponseDescription" => "Accept the service request successfully."
}
}
"""
def request(params) do
case get_security_credential_for(:transaction_status) do
nil -> {:error, "cannot generate security_credential due to missing configuration fields"}
security_credential -> query(security_credential, params)
end
end
defp query(
security_credential,
%{
transaction_id: transaction_id,
receiver_party: receiver_party,
identifier_type: identifier_type,
remarks: remarks
} = params
) do
occasion = Map.get(params, :occasion, nil)
payload = %{
"CommandID" => "TransactionStatusQuery",
"PartyA" => receiver_party,
"IdentifierType" => identifier_type,
"Remarks" => remarks,
"SecurityCredential" => security_credential,
"Initiator" => Application.get_env(:ex_mpesa, :transaction_status)[:initiator_name],
"QueueTimeOutURL" => Application.get_env(:ex_mpesa, :transaction_status)[:timeout_url],
"ResultURL" => Application.get_env(:ex_mpesa, :transaction_status)[:result_url],
"TransactionID" => transaction_id,
"Occasion" => occasion
}
make_request("/mpesa/transactionstatus/v1/query", payload)
end
defp query(_security_credential, _) do
{:error,
"Some Required Parameter missing, check whether you have 'transaction_id', 'receiver_party', 'identifier_type', 'remarks'"}
end
end
|
lib/ex_mpesa/transaction_status.ex
| 0.856377 | 0.739516 |
transaction_status.ex
|
starcoder
|
defmodule Timber.Formatter do
@moduledoc """
Provides utilities for formatting log lines as JSON text
This formatter is designed for use with the default `:console` backend provided by
Elixir Logger. To use this, you'll need to configure the console backend to call
the `Timber.Formatter.format/4` function instead of its default formatting function.
This is done with a simple configuration change. You'll also need to let `:console`
know that `:all` metadata keys should be passed to the formatter.
The result of the configuration looks like:
```elixir
config :logger, backends: [:console]
config :logger, :console,
format: {Timber.Formatter, :format},
metadata: :all
```
Further configuration options available on this module are documented below.
## Configuration Recommendations: Development vs. Production
In a standard Elixir project, you will probably have different configuration files
for your development and production setups. These configuration files typically
take the form of `config/dev.exs` and `config/prod.exs` which override defaults set
in `config/config.exs`.
Timber's defaults are production ready, but the production settings also assume that
you'll be viewing the logs through the Timber console, so they forego some niceties
that help when developing locally. Therefore, we recommend that you only include
the `Timber.Formatter` in your production environments.
## Transport Configuration Options
The following options are available when configuring the formatter:
#### `escape_new_lines`
When `true`, new lines characters are escaped as `\\n`.
When `false`, new lines characters are left alone.
This circumvents issues with output devices (like Heroku Logplex) that will tranform
line breaks into multiple log lines.
The default depends on on the environment variable `HEROKU`. If the environment variable
is present, this will be set to `true`. Otherwise, this defaults to `false`. Setting the
value in your application configuration will always override the initialized setting.
"""
@default_escape_new_lines false
alias Timber.LogEntry
@type configuration :: %{
required(:escape_new_lines) => boolean
}
@doc """
Handles formatting a log for the `Logger` application
This function allows you to integrate Timber with the default `:console` backend
distributed with the Elixir `Logger` application. By default, lines are printed
as encoded JSON strings,.
"""
def format(level, message, ts, metadata) do
configuration = get_configuration()
log_entry = LogEntry.new(ts, level, message, metadata)
line_output =
log_entry
|> LogEntry.encode_to_binary!(:json)
|> escape_new_lines(configuration.escape_new_lines)
# Prevents the final new line from being escaped
[line_output, ?\n]
end
@spec get_configuration() :: configuration
defp get_configuration() do
options = Application.get_env(:timber, __MODULE__, [])
escape_new_lines = Keyword.get(options, :escape_new_lines, @default_escape_new_lines)
%{
escape_new_lines: escape_new_lines
}
end
@spec escape_new_lines(IO.chardata(), boolean) :: IO.chardata()
defp escape_new_lines(message, false),
do: message
defp escape_new_lines(message, true) do
message
|> to_string()
|> String.replace("\n", "\\n")
end
end
|
lib/timber/formatter.ex
| 0.806662 | 0.812682 |
formatter.ex
|
starcoder
|
defmodule WebDriver.Error do
@status_codes [
{ 0, :success },
{ 6, :no_such_driver },
{ 7, :no_such_element },
{ 8, :no_such_frame },
{ 9, :unknown_command },
{ 10, :stale_element_reference },
{ 11, :element_not_visible },
{ 12, :invalid_element_state },
{ 13, :unknown_error },
{ 15, :element_not_selectable },
{ 17, :javascript_error },
{ 19, :x_path_lookup_error },
{ 21, :timeout },
{ 23, :no_such_window },
{ 24, :invalid_cookie_domain },
{ 25, :unable_to_set_cookie },
{ 26, :unexpected_alert_open },
{ 27, :no_alert_open_error },
{ 28, :script_timeout },
{ 29, :invalid_element_coordinates },
{ 30, :ime_not_available },
{ 31, :ime_engine_activation_failed },
{ 32, :invalid_selector },
{ 33, :session_not_created_exception },
{ 34, :move_target_out_of_bounds }
]
@moduledoc """
Error handling for WebDriver.
The error codes that are returned from the server are managed by this
module.
The codes that can be returned are:
* :success
* :no_such_driver
* :no_such_element
* :no_such_frame
* :unknown_command
* :stale_element_reference
* :element_not_visible
* :invalid_element_state
* :unknown_error
* :element_not_selectable
* :javascript_error
* :x_path_lookup_error
* :timeout
* :no_such_window
* :invalid_cookie_domain
* :unable_to_set_cookie
* :unexpected_alert_open
* :no_alert_open_error
* :script_timeout
* :invalid_element_coordinates
* :ime_not_available
* :ime_engine_activation_failed
* :invalid_selector
* :session_not_created_exception
* :move_target_out_of_bounds
"""
defmodule ErrorMessage do
defstruct message: "", screen: "", class: "", stack_trace: []
end
@doc """
Create an ErrorMessage record from raw protocol error data.
"""
def build_message([{"message", message},{"screen", screen},{"class", class},{"stackTrace", stack_trace}])do
ErrorMessage[ message: message, screen: screen, class: class, stack_trace: stack_trace ]
end
@doc """
Convert a code number to a status code summary atom.
"""
def summary code do
status_codes = Enum.into @status_codes, HashDict.new
{:ok, val} = HashDict.fetch status_codes, code
val
end
end
|
lib/webdriver/error.ex
| 0.571049 | 0.438905 |
error.ex
|
starcoder
|
defmodule Faqcheck.Referrals.OperatingHours do
use Ecto.Schema
@timestamps_opts [type: :utc_datetime]
use EnumType
import Ecto.Changeset
import Faqcheck.Schema
defenum Weekday, :integer do
value Monday, 0
value Tuesday, 1
value Wednesday, 2
value Thursday, 3
value Friday, 4
value Saturday, 5
value Sunday, 6
value Today, 7
value Any, 8
default Monday
end
schema "operating_hours" do
field :weekday, Weekday, default: Weekday.default
field :opens, :time, default: Time.new!(8, 0, 0)
field :closes, :time
field :valid_from, :utc_datetime
field :valid_to, :utc_datetime
field :always_open, :boolean
field :week_regularity, :integer
timestamps()
belongs_to :facility, Faqcheck.Referrals.Facility
schema_versions()
end
def changeset(hours, attrs) do
hours
|> cast(attrs, [:weekday, :opens, :closes, :valid_from, :valid_to, :always_open, :week_regularity])
|> Weekday.validate(:weekday)
|> validate_required([:weekday, :opens])
|> Faqcheck.Repo.versions()
end
@doc """
Produces a best guess for the next hours that would be listed after
the ones that already exist, for instance the same hours but on the
subsequent weekday.
## Examples
iex> Faqcheck.Referrals.OperatingHours.next([])
%Faqcheck.Referrals.OperatingHours{}
iex> Faqcheck.Referrals.OperatingHours.next([
...> %Faqcheck.Referrals.OperatingHours{
...> weekday: Faqcheck.Referrals.OperatingHours.Weekday.Tuesday,
...> opens: ~T[08:30:00]
...> }
...> ])
%Faqcheck.Referrals.OperatingHours{
weekday: Faqcheck.Referrals.OperatingHours.Weekday.Wednesday,
opens: ~T[08:30:00]
}
"""
def next(hours) do
case List.last(hours) do
nil -> %Faqcheck.Referrals.OperatingHours{}
prev -> Map.update(
prev, :weekday,
Weekday.Monday,
&(Weekday.from(rem(&1.value + 1, 7))))
end
end
def always_open do
%Faqcheck.Referrals.OperatingHours{
weekday: Faqcheck.Referrals.OperatingHours.Weekday.Any,
opens: ~T[00:00:00],
closes: ~T[23:59:59],
always_open: true,
}
end
@doc """
Given string descriptions for operating hours, generate
a set of OperatingHours.
## Examples
iex> from_description("Tues", nil, "10:30:00", "05:00:00")
[
%Faqcheck.Referrals.OperatingHours{
weekday: Faqcheck.Referrals.OperatingHours.Weekday.Tuesday,
opens: ~T[10:30:00],
closes: ~T[17:00:00],
},
]
"""
def from_description(first_day_str, last_day_str, opens_str, closes_str) do
opens = parse_hour(opens_str)
given_closes = parse_hour(closes_str)
closes = case Time.compare(opens, given_closes) do
:gt -> given_closes
|> Time.add(12 * 60 * 60, :second)
|> Time.truncate(:second)
_ -> given_closes
end
first_day = parse_day(first_day_str)
if is_nil(last_day_str) or last_day_str == "" do
[
%Faqcheck.Referrals.OperatingHours{
weekday: first_day,
opens: opens,
closes: closes,
}
]
else
last_day = parse_day(last_day_str)
Enum.map(
first_day.value..last_day.value,
&%Faqcheck.Referrals.OperatingHours{
weekday: Weekday.from(&1),
opens: opens,
closes: closes,
})
end
end
@doc """
Parse time of day from a string description.
## Examples
iex> parse_hour("10:30:00")
~T[10:30:00]
iex> parse_hour("10:30")
~T[10:30:00]
iex> parse_hour("5")
~T[05:00:00]
"""
def parse_hour(str) do
segments = String.split(str, ":")
if length(segments) < 3 do
parse_hour(str <> ":00")
else
hours_seg = hd(segments)
hours = String.length(hours_seg) == 2 && hours_seg || "0" <> hours_seg
Time.from_iso8601!(Enum.join([hours | tl(segments)], ":"))
end
end
@doc """
Parse opening and closing hours from a string description.
## Examples
iex> parse_hours("9-5")
{~T[09:00:00], ~T[17:00:00]}
iex> parse_hours("1pm-5pm")
{~T[13:00:00], ~T[17:00:00]}
iex> parse_hours("8AM-5PM")
{~T[08:00:00], ~T[17:00:00]}
"""
def parse_hours(str) do
hours = String.split(str, "-", parts: 2)
|> Enum.map(fn s ->
s = s |> String.trim()
hour = s
|> String.trim_trailing("pm")
|> String.trim_trailing("PM")
|> String.trim_trailing("am")
|> String.trim_trailing("AM")
|> String.trim_trailing("noon")
|> String.trim()
|> parse_hour()
if (String.ends_with?(s, "pm") or String.ends_with?(s, "PM")) and !String.starts_with?(s, "12") do
plus_12h hour
else
hour
end
end)
case hours do
[opens, closes] -> order_hours(opens, closes)
[opens] -> {opens, nil}
_ -> raise "hours could not be parsed: #{str}"
end
end
def order_hours(opens, closes) do
case Time.compare(opens, closes) do
:gt -> {
opens,
plus_12h(closes),
}
_ -> {
opens,
closes,
}
end
end
def plus_12h(t) do
t
|> Time.add(12 * 60 * 60, :second)
|> Time.truncate(:second)
end
@doc """
Parse a weekday from a string description.
## Examples
iex> parse_day("Mon")
Faqcheck.Referrals.OperatingHours.Weekday.Monday
iex> parse_day("Tues")
Faqcheck.Referrals.OperatingHours.Weekday.Tuesday
iex> parse_day("TH")
Faqcheck.Referrals.OperatingHours.Weekday.Thursday
"""
def parse_day(str) do
case str do
s when s in ["Today", "today"] -> Weekday.Today
s when s in ["Su", "Sun", "sun", "Sunday", "sunday"] -> Weekday.Sunday
s when s in ["M", "Mo", "Mon", "mon", "Monday", "monday"] -> Weekday.Monday
s when s in ["T", "Tu", "Tue", "tue", "Tues", "tues", "Tuesday", "tuesday"] -> Weekday.Tuesday
s when s in ["W", "Wed", "wed", "Weds", "weds", "Wednesday", "wednesday"] -> Weekday.Wednesday
s when s in ["R", "Th", "TH", "Thu", "thu", "Thurs", "thurs", "Thursday", "thursday"] -> Weekday.Thursday
s when s in ["F", "Fr", "Fri", "fri", "Friday", "friday"] -> Weekday.Friday
s when s in ["S", "Sat", "sat", "Saturday", "saturday"] -> Weekday.Saturday
s when s in ["Su", "Sun", "sun", "Sunday", "sunday"] -> Weekday.Sunday
_ -> raise "unknown weekday format: #{str}"
end
end
def parse_days(str) do
parts = String.split(str, "-", parts: 2)
case length parts do
1 -> [parts |> Enum.at(0) |> String.trim() |> parse_day()]
2 ->
first_day = parts |> Enum.at(0) |> String.trim() |> parse_day()
last_day = parts |> Enum.at(1) |> String.trim() |> parse_day()
first_day.value..last_day.value |> Enum.map(&Weekday.from/1)
_ -> raise "expected day (M) or day range (M-W), got: #{str}"
end
end
def hours_str(t) do
if is_nil(t) do
""
else
Calendar.strftime(t, "%I:%M %p")
end
end
def format_hours(hours) do
hours
|> Enum.group_by(fn h -> {h.weekday, h.week_regularity} end)
|> Enum.map(fn {{day, regularity}, hs} ->
{day,
regularity,
hs
|> Enum.map(fn h ->
if h.always_open do
"24 hours"
else
"#{hours_str h.opens} - #{hours_str h.closes}"
end
end)
|> Enum.join(", ")}
end)
|> Enum.sort_by(fn {d, _r, _h} -> d.value end)
end
end
|
apps/faqcheck/lib/faqcheck/referrals/operating_hours.ex
| 0.535584 | 0.476214 |
operating_hours.ex
|
starcoder
|
defmodule Grizzly.ZWave.CommandClasses.Indicator do
@moduledoc """
"Indicator" Command Class
The Indicator Command Class is used to help end users to monitor the operation or condition of the
application provided by a supporting node.
"""
@behaviour Grizzly.ZWave.CommandClass
alias Grizzly.ZWave.DecodeError
@type value :: byte | :on | :off | :restore
@type resource :: [indicator_id: indicator_id, property_id: property_id, value: byte]
@type indicator_id ::
byte
| :armed
| :disarmed
| :ready
| :fault
| :busy
| :enter_id
| :enter_pin
| :code_accepted
| :armed_stay
| :armed_away
| :alarming
| :alarming_burglar
| :alarming_smoke_fire
| :alarming_co
| :bypass_challenger
| :entry_delay
| :alarming_medical
| :alarming_freeze_warning
| :alarming_water_leak
| :alarming_panic
| :zone_1_armed
| :zone_2_armed
| :zone_3_armed
| :zone_4_armed
| :zone_5_armed
| :zone_6_armed
| :zone_7_armed
| :zone_8_armed
| :lcd_backlight
| :button_1
| :button_2
| :button_3
| :button_4
| :button_5
| :button_6
| :button_7
| :button_8
| :button_9
| :button_10
| :button_11
| :button_12
| :node_identify
| :sound_1
| :sound_2
| :sound_3
| :sound_4
| :sound_5
| :sound_6
| :sound_7
| :sound_8
| :sound_9
| :sound_10
| :sound_11
| :sound_12
| :sound_13
| :sound_14
| :sound_15
| :sound_16
| :sound_17
| :sound_18
| :sound_19
| :sound_20
| :sound_21
| :sound_22
| :sound_23
| :sound_24
| :sound_25
| :sound_26
| :sound_27
| :sound_28
| :sound_29
| :sound_30
| :sound_31
| :sound_32
| :undefined
@type property_id ::
byte
| :multilevel
| :binary
| :toggling_periods
| :toggling_cycles
| :toggling_on_time
| :timeout_minutes
| :timeout_seconds
| :timeout_hundredths_second
| :sound_level
| :low_power
| :undefined
@impl true
def byte(), do: 0x87
@impl true
def name(), do: :indicator
def indicator_id_to_byte(:undefined), do: 0x00
def indicator_id_to_byte(:armed), do: 0x01
def indicator_id_to_byte(:disarmed), do: 0x02
def indicator_id_to_byte(:ready), do: 0x03
def indicator_id_to_byte(:fault), do: 0x04
def indicator_id_to_byte(:busy), do: 0x05
def indicator_id_to_byte(:enter_id), do: 0x06
def indicator_id_to_byte(:enter_pin), do: 0x07
def indicator_id_to_byte(:code_accepted), do: 0x08
def indicator_id_to_byte(:code_not_accepted), do: 0x09
def indicator_id_to_byte(:armed_stay), do: 0x0A
def indicator_id_to_byte(:armed_away), do: 0x0B
def indicator_id_to_byte(:alarming), do: 0x0C
def indicator_id_to_byte(:alarming_burglar), do: 0x0D
def indicator_id_to_byte(:alarming_smoke_fire), do: 0x0E
def indicator_id_to_byte(:alarming_co), do: 0x0F
def indicator_id_to_byte(:bypass_challenge), do: 0x10
def indicator_id_to_byte(:entry_delay), do: 0x11
def indicator_id_to_byte(:exit_delay), do: 0x12
def indicator_id_to_byte(:alarming_medical), do: 0x13
def indicator_id_to_byte(:alarming_freeze_warning), do: 0x14
def indicator_id_to_byte(:alarming_water_leak), do: 0x15
def indicator_id_to_byte(:alarming_panic), do: 0x16
def indicator_id_to_byte(:zone_1_armed), do: 0x20
def indicator_id_to_byte(:zone_2_armed), do: 0x21
def indicator_id_to_byte(:zone_3_armed), do: 0x22
def indicator_id_to_byte(:zone_4_armed), do: 0x23
def indicator_id_to_byte(:zone_5_armed), do: 0x24
def indicator_id_to_byte(:zone_6_armed), do: 0x25
def indicator_id_to_byte(:zone_7_armed), do: 0x26
def indicator_id_to_byte(:zone_8_armed), do: 0x27
def indicator_id_to_byte(:lcd_backlight), do: 0x30
def indicator_id_to_byte(:button_backlit_letters), do: 0x40
def indicator_id_to_byte(:button_backlit_digits), do: 0x41
def indicator_id_to_byte(:button_backlit_commands), do: 0x42
def indicator_id_to_byte(:button_1), do: 0x43
def indicator_id_to_byte(:button_2), do: 0x44
def indicator_id_to_byte(:button_3), do: 0x45
def indicator_id_to_byte(:button_4), do: 0x46
def indicator_id_to_byte(:button_5), do: 0x47
def indicator_id_to_byte(:button_6), do: 0x48
def indicator_id_to_byte(:button_7), do: 0x49
def indicator_id_to_byte(:button_8), do: 0x4A
def indicator_id_to_byte(:button_9), do: 0x4B
def indicator_id_to_byte(:button_10), do: 0x4C
def indicator_id_to_byte(:button_11), do: 0x4D
def indicator_id_to_byte(:button_12), do: 0x4E
def indicator_id_to_byte(:node_identify), do: 0x50
def indicator_id_to_byte(:sound_1), do: 0x60
def indicator_id_to_byte(:sound_2), do: 0x61
def indicator_id_to_byte(:sound_3), do: 0x62
def indicator_id_to_byte(:sound_4), do: 0x63
def indicator_id_to_byte(:sound_5), do: 0x64
def indicator_id_to_byte(:sound_6), do: 0x65
def indicator_id_to_byte(:sound_7), do: 0x66
def indicator_id_to_byte(:sound_8), do: 0x67
def indicator_id_to_byte(:sound_9), do: 0x68
def indicator_id_to_byte(:sound_10), do: 0x69
def indicator_id_to_byte(:sound_11), do: 0x6A
def indicator_id_to_byte(:sound_12), do: 0x6B
def indicator_id_to_byte(:sound_13), do: 0x6C
def indicator_id_to_byte(:sound_14), do: 0x6D
def indicator_id_to_byte(:sound_15), do: 0x6E
def indicator_id_to_byte(:sound_16), do: 0x6F
def indicator_id_to_byte(:sound_17), do: 0x70
def indicator_id_to_byte(:sound_18), do: 0x71
def indicator_id_to_byte(:sound_19), do: 0x72
def indicator_id_to_byte(:sound_20), do: 0x73
def indicator_id_to_byte(:sound_21), do: 0x74
def indicator_id_to_byte(:sound_22), do: 0x75
def indicator_id_to_byte(:sound_23), do: 0x76
def indicator_id_to_byte(:sound_24), do: 0x77
def indicator_id_to_byte(:sound_25), do: 0x78
def indicator_id_to_byte(:sound_26), do: 0x79
def indicator_id_to_byte(:sound_27), do: 0x7A
def indicator_id_to_byte(:sound_28), do: 0x7B
def indicator_id_to_byte(:sound_29), do: 0x7C
def indicator_id_to_byte(:sound_30), do: 0x7D
def indicator_id_to_byte(:sound_31), do: 0x7E
def indicator_id_to_byte(:sound_32), do: 0x7F
def indicator_id_to_byte(:buzzer), do: 0xF0
def indicator_id_to_byte(byte) when byte in 0x00..0xF0, do: byte
def property_id_to_byte(:undefined), do: 0x00
def property_id_to_byte(:multilevel), do: 0x01
def property_id_to_byte(:binary), do: 0x02
def property_id_to_byte(:toggling_periods), do: 0x03
def property_id_to_byte(:toggling_cycles), do: 0x04
def property_id_to_byte(:toggling_on_time), do: 0x05
def property_id_to_byte(:timeout_minutes), do: 0x06
def property_id_to_byte(:timeout_seconds), do: 0x07
def property_id_to_byte(:timeout_hundredths_second), do: 0x08
def property_id_to_byte(:sound_level), do: 0x09
def property_id_to_byte(:low_power), do: 0x0A
def property_id_to_byte(byte) when byte in 0..10, do: byte
def value_to_byte(:off, :multilevel), do: 0x00
def value_to_byte(:restore, :multilevel), do: 0xFF
def value_to_byte(byte, :multilevel) when byte in 0x00..0x63, do: byte
def value_to_byte(0xFF, :multilevel), do: 0xFF
def value_to_byte(:off, :binary), do: 0x00
def value_to_byte(:on, :binary), do: 0xFF
def value_to_byte(byte, :binary) when byte in 0x00..0x63, do: byte
def value_to_byte(0xFF, :binary), do: 0xFF
def value_to_byte(byte, _property_id), do: byte
def indicator_id_from_byte(0x01), do: {:ok, :armed}
def indicator_id_from_byte(0x02), do: {:ok, :disarmed}
def indicator_id_from_byte(0x03), do: {:ok, :ready}
def indicator_id_from_byte(0x04), do: {:ok, :fault}
def indicator_id_from_byte(0x05), do: {:ok, :busy}
def indicator_id_from_byte(0x06), do: {:ok, :enter_id}
def indicator_id_from_byte(0x07), do: {:ok, :enter_pin}
def indicator_id_from_byte(0x08), do: {:ok, :code_accepted}
def indicator_id_from_byte(0x09), do: {:ok, :code_not_accepted}
def indicator_id_from_byte(0x0A), do: {:ok, :armed_stay}
def indicator_id_from_byte(0x0B), do: {:ok, :armed_away}
def indicator_id_from_byte(0x0C), do: {:ok, :alarming}
def indicator_id_from_byte(0x0D), do: {:ok, :alarming_burglar}
def indicator_id_from_byte(0x0E), do: {:ok, :alarming_smoke_fire}
def indicator_id_from_byte(0x0F), do: {:ok, :alarming_co}
def indicator_id_from_byte(0x10), do: {:ok, :bypass_challenge}
def indicator_id_from_byte(0x11), do: {:ok, :entry_delay}
def indicator_id_from_byte(0x12), do: {:ok, :exit_delay}
def indicator_id_from_byte(0x13), do: {:ok, :alarming_medical}
def indicator_id_from_byte(0x14), do: {:ok, :alarming_freeze_warning}
def indicator_id_from_byte(0x15), do: {:ok, :alarming_water_leak}
def indicator_id_from_byte(0x16), do: {:ok, :alarming_panic}
def indicator_id_from_byte(0x20), do: {:ok, :zone_1_armed}
def indicator_id_from_byte(0x21), do: {:ok, :zone_2_armed}
def indicator_id_from_byte(0x22), do: {:ok, :zone_3_armed}
def indicator_id_from_byte(0x23), do: {:ok, :zone_4_armed}
def indicator_id_from_byte(0x24), do: {:ok, :zone_5_armed}
def indicator_id_from_byte(0x25), do: {:ok, :zone_6_armed}
def indicator_id_from_byte(0x26), do: {:ok, :zone_7_armed}
def indicator_id_from_byte(0x27), do: {:ok, :zone_8_armed}
def indicator_id_from_byte(0x30), do: {:ok, :lcd_backlight}
def indicator_id_from_byte(0x40), do: {:ok, :button_backlit_letters}
def indicator_id_from_byte(0x41), do: {:ok, :button_backlit_digits}
def indicator_id_from_byte(0x42), do: {:ok, :button_backlit_commands}
def indicator_id_from_byte(0x43), do: {:ok, :button_1}
def indicator_id_from_byte(0x44), do: {:ok, :button_2}
def indicator_id_from_byte(0x45), do: {:ok, :button_3}
def indicator_id_from_byte(0x46), do: {:ok, :button_4}
def indicator_id_from_byte(0x47), do: {:ok, :button_5}
def indicator_id_from_byte(0x48), do: {:ok, :button_6}
def indicator_id_from_byte(0x49), do: {:ok, :button_7}
def indicator_id_from_byte(0x4A), do: {:ok, :button_8}
def indicator_id_from_byte(0x4B), do: {:ok, :button_9}
def indicator_id_from_byte(0x4C), do: {:ok, :button_10}
def indicator_id_from_byte(0x4D), do: {:ok, :button_11}
def indicator_id_from_byte(0x4E), do: {:ok, :button_12}
def indicator_id_from_byte(0x50), do: {:ok, :node_identify}
def indicator_id_from_byte(0x60), do: {:ok, :sound_1}
def indicator_id_from_byte(0x61), do: {:ok, :sound_2}
def indicator_id_from_byte(0x62), do: {:ok, :sound_3}
def indicator_id_from_byte(0x63), do: {:ok, :sound_4}
def indicator_id_from_byte(0x64), do: {:ok, :sound_5}
def indicator_id_from_byte(0x65), do: {:ok, :sound_6}
def indicator_id_from_byte(0x66), do: {:ok, :sound_7}
def indicator_id_from_byte(0x67), do: {:ok, :sound_8}
def indicator_id_from_byte(0x68), do: {:ok, :sound_9}
def indicator_id_from_byte(0x69), do: {:ok, :sound_10}
def indicator_id_from_byte(0x6A), do: {:ok, :sound_11}
def indicator_id_from_byte(0x6B), do: {:ok, :sound_12}
def indicator_id_from_byte(0x6C), do: {:ok, :sound_13}
def indicator_id_from_byte(0x6D), do: {:ok, :sound_14}
def indicator_id_from_byte(0x6E), do: {:ok, :sound_15}
def indicator_id_from_byte(0x6F), do: {:ok, :sound_16}
def indicator_id_from_byte(0x70), do: {:ok, :sound_17}
def indicator_id_from_byte(0x71), do: {:ok, :sound_18}
def indicator_id_from_byte(0x72), do: {:ok, :sound_19}
def indicator_id_from_byte(0x73), do: {:ok, :sound_20}
def indicator_id_from_byte(0x74), do: {:ok, :sound_21}
def indicator_id_from_byte(0x75), do: {:ok, :sound_22}
def indicator_id_from_byte(0x76), do: {:ok, :sound_23}
def indicator_id_from_byte(0x77), do: {:ok, :sound_24}
def indicator_id_from_byte(0x78), do: {:ok, :sound_25}
def indicator_id_from_byte(0x79), do: {:ok, :sound_26}
def indicator_id_from_byte(0x7A), do: {:ok, :sound_27}
def indicator_id_from_byte(0x7B), do: {:ok, :sound_28}
def indicator_id_from_byte(0x7C), do: {:ok, :sound_29}
def indicator_id_from_byte(0x7D), do: {:ok, :sound_30}
def indicator_id_from_byte(0x7E), do: {:ok, :sound_31}
def indicator_id_from_byte(0x7F), do: {:ok, :sound_32}
def indicator_id_from_byte(0xF0), do: {:ok, :buzzer}
# Devices can return an indicator id == 0
def indicator_id_from_byte(0x00), do: {:ok, :undefined}
def indicator_id_from_byte(byte) when byte in 0x80..0x9F, do: {:ok, byte}
def indicator_id_from_byte(byte),
do: {:error, %DecodeError{value: byte, param: :indicator_id, command: nil}}
def property_id_from_byte(0x01), do: {:ok, :multilevel}
def property_id_from_byte(0x02), do: {:ok, :binary}
def property_id_from_byte(0x03), do: {:ok, :toggling_periods}
def property_id_from_byte(0x04), do: {:ok, :toggling_cycles}
def property_id_from_byte(0x05), do: {:ok, :toggling_on_time}
def property_id_from_byte(0x06), do: {:ok, :timeout_minutes}
def property_id_from_byte(0x07), do: {:ok, :timeout_seconds}
def property_id_from_byte(0x08), do: {:ok, :timeout_hundredths_second}
def property_id_from_byte(0x09), do: {:ok, :sound_level}
def property_id_from_byte(0x0A), do: {:ok, :low_power}
def property_id_from_byte(0x00), do: {:ok, :undefined}
def property_id_from_byte(byte),
do: {:error, %DecodeError{value: byte, param: :property_id, command: nil}}
def value_from_byte(0x00, :multilevel), do: {:ok, :off}
def value_from_byte(0xFF, :multilevel), do: {:ok, :restore}
def value_from_byte(byte, :multilevel) when byte in 0x01..0x63, do: {:ok, byte}
def value_from_byte(byte, :multilevel),
do: {:error, %DecodeError{value: byte, param: :value, command: nil}}
def value_from_byte(0x00, :binary), do: {:ok, :off}
def value_from_byte(0xFF, :binary), do: {:ok, :on}
def value_from_byte(byte, :binary),
do: {:error, %DecodeError{value: byte, param: :value, command: nil}}
def value_from_byte(byte, _property_id), do: {:ok, byte}
end
|
lib/grizzly/zwave/command_classes/indicator.ex
| 0.73412 | 0.449393 |
indicator.ex
|
starcoder
|
defmodule DistributedTasks do
@moduledoc """
Distributed tasks provide a nice way to run a unique task across elixir cluster.
Distributed tasks allow us to start a process running our task and be sure only one instance of it will be running across cluster. Nodes for tasks are picked by a consistent hashing algorithm which evenly distributes the tasks. Callback function can be added to a task to store the result. Unique name can be assigned to the task and status of the task can be tracked using this name.
Orchestrator process is part of this library. It is started on one node of a cluster randomly and registered in Global registry. This process is responsible for starting workers executing tasks. If the node where the orchestrator lives dies, the process is restarted automatically on another node. Even that this transition should take millisecond, it is possible that you can get a raise while trying to start a new task with error `** (EXIT) no process: the process is not alive or there's no process currently associated with the given name`.
Upon starting the worker process, it is registered in distributed Registry uniquely identified by a given name. In case name is not provided, random uuid is used as a name. Starting another task with the same name would result in `{:error, {:running, pid}}` tuple.
Orchestrator process keeps track of all the running task names in state and also for 1 minute keeps identifiers for all the finished tasks. If it dies for any reason, new process is started and it reads registry to restore state. State of finished processes is lost, but currently running processes are secured.
"""
alias DistributedTasks.DefaultImpl
@behaviour DistributedTasks.Behaviour
@doc """
Spins up a process that's running given function on one of the nodes of the cluster.
Jobs are separated evenly between nodes.
If node with running process goes down, the progress is not saved.
opts can contain:
* [name: "unique_name"]
if you want to track the task by specific unique name
* [callback: fn _process_name, _calculation_result -> IO.inspect("done") end]
if you want to run some code when task is finished
"""
@impl true
def start_async(mod, fun, args, opts) do
current_impl().start_async(mod, fun, args, opts)
end
@doc """
Returns status of a process.
"""
@impl true
def get_status(name) do
current_impl().get_status(name)
end
defp current_impl() do
Application.get_env(:distributed_tasks, :impl, DefaultImpl)
end
end
|
lib/distributed_tasks.ex
| 0.844794 | 0.641647 |
distributed_tasks.ex
|
starcoder
|
defmodule Dynamo.Filters.Session do
@moduledoc """
The session filter. When added to your Dynamo, this filter allows
you to fetch the session and to serialize it back.
When initialized, this filter supports a set of options:
* `key` - The name of the session cookie. This option is required;
Besides is supports the following cookie related options:
* `secure` - Marks the cookie as secure;
* `domain` - The domain to which the cookie applies;
* `path` - The path to which the cookie applies;
* `http_only` - If the cookie is sent only via http. Default to true;
The options above may also be set during the request, by using
`Dynamo.HTTP.Session.configure_session/3`. For instance, to
mark a session as secure, one can do:
configure_session(conn, :secure, true)
"""
defexception CookieOverflowError, message: "the session cookie exceeds the 4kb limit"
@limit 4096
@session :dynamo_session
@opts :dynamo_session_opts
@doc false
def new(store, opts) do
unless key = opts[:key] do
raise ArgumentError, message: "Expected session key to be given"
end
{ __MODULE__, store, key, store.setup(opts) }
end
@doc false
def prepare(conn, { __MODULE__, store, key, opts }) do
conn
.fetchable(:session, fetch(&1, store, key, opts))
.before_send(serialize(&1, store, key, opts))
end
defp fetch(conn, store, key, opts) do
conn = conn.fetch(:cookies)
{ id, value } =
case conn.req_cookies[key] do
nil -> { nil, [] }
cookie -> store.get_session(cookie, opts)
end
conn.put_private(@session, value).put_private(@opts, { id, false, [] })
end
defp serialize(conn, store, key, opts) do
if session = conn.private[@session] do
{ id, written, user_opts } = conn.private[@opts]
if user_opts[:renew] && id do
id = nil
written = true
store.delete_session(id, opts)
end
if written do
opts = user_opts ++ opts
value = store.put_session(id, session, opts)
unless conn.req_cookies[key] == value do
if size(value) > @limit, do: raise(CookieOverflowError)
conn.put_resp_cookie(key, value, opts)
end
end
end || conn
end
end
|
lib/dynamo/filters/session.ex
| 0.779825 | 0.456046 |
session.ex
|
starcoder
|
defmodule Abit.Counter do
@moduledoc """
Use `:atomics` as an array of counters with N bits per counter.
An `:atomics` is an array of 64 bit integers so the possible counters are below:
Possible counters:
bits | unsigned value range | signed value range
2 | 0..3 | -2..1
4 | 0..15 | -8..7
8 | 0..255 | -128..127
16 | 0..65535 | -32768..32767
32 | 0..4294967295 | -2147483648..2147483647
If you need 64 bit counters use
[Erlang counters](http://erlang.org/doc/man/counters.html)
The option `:wrap_around` is set to `false` by default. With these
small-ish counters this is a safe default.
When `:wrap_around` is `false` using `put/3` or `add/3` when the value
would be out of bounds the error tuple `{:error, :value_out_of_bounds}`
will be returned and the stored counter value will not change.
While Erlang `:atomics` are 1 indexed, `Abit.Counter` counters are 0 indexed.
## Enumerable protocol
`Abit.Counter` implements the Enumerable protocol, so all Enum functions can be used:
iex> c = Abit.Counter.new(1000, 16, signed: false)
iex> c |> Abit.Counter.put(700, 54321)
iex> c |> Enum.max()
54321
## Examples
iex> c = Abit.Counter.new(1000, 8, signed: false)
iex> c |> Abit.Counter.put(0, 100)
{:ok, {0, 100}}
iex> c |> Abit.Counter.add(0, 100)
{:ok, {0, 200}}
iex> c |> Abit.Counter.add(0, 100)
{:error, :value_out_of_bounds}
"""
@bit_sizes [2, 4, 8, 16, 32]
alias Abit.Counter
@keys [:atomics_ref, :signed, :wrap_around, :size, :counters_bit_size, :min, :max]
@enforce_keys @keys
defstruct @keys
@type t :: %__MODULE__{
atomics_ref: reference,
signed: boolean,
wrap_around: boolean,
size: pos_integer,
counters_bit_size: 2 | 4 | 8 | 16 | 32,
min: integer,
max: pos_integer
}
@doc """
Returns a new `%Abit.Counter{}` struct.
* `size` - minimum number of counters to have, counters will fully fill the `:atomics`.
Check the `:size` key in the returned `%Abit.Counter{}` for the exact number of counters
* `counters_bit_size` - how many bits a counter should use
## Options
* `:signed` - whether to have signed or unsigned counters. Defaults to `true`.
* `:wrap_around` - whether counters should wrap around. Defaults to `false`.
## Examples
Abit.Counter.new(100, 8) # minimum 100 counters; 8 bits signed
Abit.Counter.new(10_000, 16, signed: false) # minimum 10_000 counters; 16 bits unsigned
Abit.Counter.new(10_000, 16, wrap_around: false) # don't wrap around
"""
@spec new(non_neg_integer, 2 | 4 | 8 | 16 | 32, list) :: t
def new(size, counters_bit_size, options \\ [])
when is_integer(size) and is_integer(counters_bit_size) do
if counters_bit_size not in @bit_sizes do
raise ArgumentError,
"You can't create an %Abit.Counter{} with counters_bit_size #{counters_bit_size}." <>
"Possible values are #{inspect(@bit_sizes)}"
end
signed = options |> Keyword.get(:signed, true)
wrap_around = options |> Keyword.get(:wrap_around, false)
atomics_size = Float.ceil(size / (64 / counters_bit_size)) |> round()
atomics_ref = :atomics.new(atomics_size, signed: false)
{min, max} = counter_range(signed, counters_bit_size)
%Counter{
atomics_ref: atomics_ref,
signed: signed,
wrap_around: wrap_around,
size: atomics_size * round(64 / counters_bit_size),
counters_bit_size: counters_bit_size,
min: min,
max: max
}
end
@doc """
Returns the value of counter at `index`.
## Examples
iex> c = Abit.Counter.new(10, 8)
iex> c |> Abit.Counter.get(7)
0
"""
@spec get(t, non_neg_integer) :: integer
def get(
%Counter{atomics_ref: atomics_ref, signed: signed, counters_bit_size: counters_bit_size},
index
)
when index >= 0 do
{atomics_index, bit_index} = Abit.bit_position(counters_bit_size * index)
atomics_value = :atomics.get(atomics_ref, atomics_index)
get_value(signed, counters_bit_size, bit_index, <<atomics_value::64>>)
end
@doc """
Puts the value into the counter at `index`.
Returns `{:ok, {index, final_value}}` or `{:error, :value_out_of_bounds}` if
option `wrap_around` is `false` and value is out of bounds.
## Examples
iex> c = Abit.Counter.new(10, 8)
iex> c |> Abit.Counter.put(7, -12)
{:ok, {7, -12}}
iex> c |> Abit.Counter.get(7)
-12
iex> c |> Abit.Counter.put(7, 128)
{:error, :value_out_of_bounds}
"""
@spec put(t, non_neg_integer, integer) ::
{:ok, {non_neg_integer, integer}} | {:error, :value_out_of_bounds}
def put(%Counter{wrap_around: false, min: min, max: max}, _, value)
when value < min or value > max do
{:error, :value_out_of_bounds}
end
def put(
%Counter{atomics_ref: atomics_ref, signed: signed, counters_bit_size: counters_bit_size},
index,
value
)
when index >= 0 do
{atomics_index, bit_index} = Abit.bit_position(counters_bit_size * index)
atomics_value = :atomics.get(atomics_ref, atomics_index)
{final_counter_value, <<next_atomics_value::64>>} =
put_value(signed, counters_bit_size, bit_index, <<atomics_value::64>>, value)
:atomics.put(atomics_ref, atomics_index, next_atomics_value)
{:ok, {index, final_counter_value}}
end
@doc """
Increments the value of the counter at `index` with `incr`.
Returns `{:ok, {index, final_value}}` or `{:error, :value_out_of_bounds}` if
option `wrap_around` is `false` and value is out of bounds.
## Examples
iex> c = Abit.Counter.new(10, 8)
iex> c |> Abit.Counter.add(7, -12)
{:ok, {7, -12}}
iex> c |> Abit.Counter.add(7, 36)
{:ok, {7, 24}}
iex> c |> Abit.Counter.put(1, 1000)
{:error, :value_out_of_bounds}
"""
@spec add(t, non_neg_integer, integer) ::
{:ok, {non_neg_integer, integer}} | {:error, :value_out_of_bounds}
def add(
counter = %Counter{
atomics_ref: atomics_ref,
signed: signed,
wrap_around: wrap_around,
counters_bit_size: counters_bit_size,
min: min,
max: max
},
index,
incr
)
when index >= 0 do
{atomics_index, bit_index} = Abit.bit_position(counters_bit_size * index)
atomics_value = :atomics.get(atomics_ref, atomics_index)
current_value = get_value(signed, counters_bit_size, bit_index, <<atomics_value::64>>)
next_value = current_value + incr
case {wrap_around, next_value < min or next_value > max} do
{false, true} ->
{:error, :value_out_of_bounds}
{_, _} ->
{final_counter_value, <<next_atomics_value::64>>} =
put_value(signed, counters_bit_size, bit_index, <<atomics_value::64>>, next_value)
case :atomics.compare_exchange(
atomics_ref,
atomics_index,
atomics_value,
next_atomics_value
) do
:ok ->
{:ok, {index, final_counter_value}}
_other_value ->
# The value at index was different. To keep the increment correct we retry.
add(counter, index, incr)
end
end
end
@doc """
Returns `true` if any counter has the value `integer`,
`false` otherwise.
## Examples
iex> c = Abit.Counter.new(100, 8)
iex> c |> Abit.Counter.member?(0)
true
iex> c |> Abit.Counter.member?(80)
false
"""
@doc since: "0.2.4"
@spec member?(t, integer) :: boolean
def member?(
%Counter{
atomics_ref: atomics_ref,
min: min,
max: max
} = counter,
int
)
when is_integer(int) do
case int do
i when i < min ->
false
i when i > max ->
false
_else ->
do_member?(counter, int, 1, :atomics.info(atomics_ref).size)
end
end
defp do_member?(counter, int, index, index) do
int in get_all_at_atomic(counter, index)
end
defp do_member?(counter, int, index, atomics_size) do
case int in get_all_at_atomic(counter, index) do
true -> true
false -> do_member?(counter, int, index + 1, atomics_size)
end
end
@doc """
Returns all counters from atomics at index.
Index of atomics are one-based.
## Examples
iex> c = Abit.Counter.new(100, 8)
iex> c |> Abit.Counter.put(3, -70)
iex> c |> Abit.Counter.get_all_at_atomic(1)
[0, 0, 0, 0, -70, 0, 0, 0]
"""
@doc since: "0.2.4"
@spec get_all_at_atomic(t, pos_integer) :: list(integer)
def get_all_at_atomic(
%Counter{atomics_ref: atomics_ref, signed: signed, counters_bit_size: bit_size},
atomic_index
)
when is_integer(atomic_index) do
atomic = :atomics.get(atomics_ref, atomic_index)
integer_to_counters(atomic, signed, bit_size)
end
defimpl Enumerable do
@moduledoc false
@moduledoc since: "0.2.4"
alias Abit.Counter
def count(%Counter{size: size}) do
{:ok, size}
end
def member?(%Counter{} = counter, int) when is_integer(int) do
{:ok, Counter.member?(counter, int)}
end
def slice(%Counter{size: size} = counter) do
{
:ok,
size,
fn start, length ->
do_slice(counter, start, length)
end
}
end
defp do_slice(_, _, 0), do: []
defp do_slice(counter, index, length) do
[counter |> Counter.get(index) | do_slice(counter, index + 1, length - 1)]
end
def reduce(%Counter{atomics_ref: atomics_ref} = counter, acc, fun) do
size = :atomics.info(atomics_ref).size
do_reduce({counter, [], 0, size}, acc, fun)
end
def do_reduce(_, {:halt, acc}, _fun), do: {:halted, acc}
def do_reduce(tuple, {:suspend, acc}, fun), do: {:suspended, acc, &do_reduce(tuple, &1, fun)}
def do_reduce({_, [], size, size}, {:cont, acc}, _fun), do: {:done, acc}
def do_reduce({counter, [h | tl], index, size}, {:cont, acc}, fun) do
do_reduce(
{counter, tl, index, size},
fun.(h, acc),
fun
)
end
def do_reduce({counter, [], index, size}, {:cont, acc}, fun) do
[h | tl] = Counter.get_all_at_atomic(counter, index + 1)
do_reduce(
{counter, tl, index + 1, size},
fun.(h, acc),
fun
)
end
end
@bit_sizes
|> Enum.each(fn counters_bit_size ->
0..63
|> Enum.filter(fn n -> rem(n, counters_bit_size) == 0 end)
|> Enum.each(fn bit_index ->
bit_left_start = bit_index + counters_bit_size
left_bits = 64 - bit_left_start
right_bits = bit_left_start - counters_bit_size
defp unquote(:get_value)(
false,
unquote(counters_bit_size),
unquote(bit_index),
<<_::unquote(left_bits), value::unquote(counters_bit_size), _::unquote(right_bits)>>
) do
value
end
defp unquote(:get_value)(
true,
unquote(counters_bit_size),
unquote(bit_index),
<<_left::unquote(left_bits), value::unquote(counters_bit_size)-signed,
_right::unquote(right_bits)>>
) do
value
end
defp unquote(:put_value)(
false,
unquote(counters_bit_size),
unquote(bit_index),
<<left::unquote(left_bits), _current_value::unquote(counters_bit_size),
right::unquote(right_bits)>>,
new_value
) do
<<final_counter_value::unquote(counters_bit_size)>> =
<<new_value::unquote(counters_bit_size)>>
{
final_counter_value,
<<left::unquote(left_bits), new_value::unquote(counters_bit_size),
right::unquote(right_bits)>>
}
end
defp unquote(:put_value)(
true,
unquote(counters_bit_size),
unquote(bit_index),
<<left::unquote(left_bits), _current_value::unquote(counters_bit_size)-signed,
right::unquote(right_bits)>>,
new_value
) do
<<final_counter_value::unquote(counters_bit_size)-signed>> =
<<new_value::unquote(counters_bit_size)-signed>>
{
final_counter_value,
<<left::unquote(left_bits), new_value::unquote(counters_bit_size)-signed,
right::unquote(right_bits)>>
}
end
end)
end)
defp integer_to_counters(integer, signed, bit_size) do
do_integer_to_counters(<<integer::64>>, signed, bit_size)
end
for bit_size <- @bit_sizes do
defp do_integer_to_counters(
<<int::unquote(bit_size), rest::bitstring>>,
false,
unquote(bit_size)
) do
[int | do_integer_to_counters(rest, false, unquote(bit_size))]
end
defp do_integer_to_counters(
<<int::unquote(bit_size)-signed, rest::bitstring>>,
true,
unquote(bit_size)
) do
[int | do_integer_to_counters(rest, true, unquote(bit_size))]
end
defp do_integer_to_counters(<<>>, _, _), do: []
end
# Returns {min, max} range of counters for given signed & bit_size
defp counter_range(signed, bit_size) do
import Bitwise
case signed do
false -> {0, (1 <<< bit_size) - 1}
true -> {-(1 <<< (bit_size - 1)), (1 <<< (bit_size - 1)) - 1}
end
end
end
|
lib/abit/counter.ex
| 0.941868 | 0.586582 |
counter.ex
|
starcoder
|
defmodule Server.MPCwallet do
@moduledoc """
Library for the server part of the MPC-based API keys.
Glossary of terms:
- MPC: Multi-party computation. A method to have multiple parties compute a function while keeping their inputs private. For our case, the function is creating a digital signature and the (private) inputs are secret shares.
- DH: Diffie-Hellman key exchange. A method to derive a shared secret key over an insecure channel.
- Paillier: A public-key, additive homomorphic cryptosystem. Allows the client to conduct operations on the ciphertext.
- r: Random value shared between client and server (derived via DH), from which the r value of the ECDSA signature is derived.
- k: Server part of the nonce used in the signature. This should be handled like a secret key, i.e., store securely, delete/zeroize after use, ..
- curve: Elliptic curve to be used in an ECDSA signature. Currently we support secp256k1 (for BTC and ETH) and secp256r1 (for NEO).
- presig: A presignature generated on the client that can be finalized to a conventional signature by the server.
- rpool: Pool of random values shared between client and server that allows to generate signatures with a single message.
- r, s: a conventional ECDSA signature.
- recovery_id: 2 bits that help recovering the public key from a signature, used in Ethereum to save space.
- correct_key_proof: ZK proof that the Paillier public key was generated correctly.
"""
use Rustler, otp_app: :server, crate: "mpc_wallet_elixir"
@doc ~S"""
Generate Paillier keypair with safe primes and proof that the keypair was generated correctly.
## Parameters
- none
## Returns
- Paillier secret key
- Paillier public key
- correct_key_proof: proof
"""
def generate_paillier_keypair_and_proof(), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Compute rpool values via Diffie-Hellman.
## Parameters
- client_dh_publics: list of DH public keys received from the client
- curve: Secp256k1 or Secp256r1 curve
## Returns
- rpool_new: map of rpool values to be added to the local pool
- server_dh_publics: list of public keys (to be sent to the client)
"""
def dh_rpool(_client_dh_publics, _curve), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Complete presignature to conventional ECDSA signature.
## Parameters
- paillier_sk: Paillier secret key
- presig: presignature received from client
- r: random value shared between server and client
- k: server part of the nonce used in the signature
- curve: Secp256k1 or Secp256r1 curve
- public_key: public key under which the completed signature is (supposed to be) valid
- msg_hash: hash of the message under which the completed signature is (supposed to be) valid
## Returns
- r: r part of a conventional ECDSA signature
- s: s part of a conventional ECDSA signature
- recovery_id: 2 bits that help recovering the public key from a signature
"""
def complete_sig(_paillier_sk, _presig, _r, _k, _curve, _pubkey, _msg_hash), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Verify conventional ECDSA signature.
## Parameters
- r: r part of a conventional ECDSA signature
- s: s part of a conventional ECDSA signature
- pubkey: ECDSA public key
- msg_hash: hash of the message
- curve: Secp256k1 or Secp256r1 curve
## Returns
- ok | error: boolean indicating success
"""
def verify(_r, _s, _pubkey, _msg_hash, _curve), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Generate key pairs for Diffie-Hellman.
## Parameters
- n: number of key pairs to generate
- curve: Secp256k1 or Secp256r1 curve
## Returns
- dh_secrets: list of (n) secret keys
- dh_publics: list of (n) public keys
First public key corresponds to first secret key, ..
"""
def dh_init(_n, _curve), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Compute presignature of a message.
## Parameters
- apikey: API key struct
- msg_hash: hash of the message to be signed
- curve: Secp256k1 or Secp256r1 curve
## Returns
- presig: presignature that is to be completed by the server
- r: message-independent part of the signature that was used to compute the presignature
"""
def compute_presig(_apikey, _msg_hash, _curve), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
(re-)Fill pool of r-values from dh secret and public values.
## Parameters
- client_dh_secrets: list of client DH secret keys
- server_dh_publics: list of DH public keys received from the server
- curve: Secp256k1 or Secp256r1 curve
- paillier_pk: Paillier public key
## Returns
- ok | error: boolean indicating success
"""
def fill_rpool(_client_dh_secrets, _server_dh_publics, _curve, _paillier_pk), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Initialize API childkey creation by setting the full secret key.
## Parameters
- secret_key: full secret key
## Returns
- api_childkey_creator: API childkey creation struct
"""
def init_api_childkey_creator(_secret_key), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Initialize API childkey creation by setting the full secret key and the paillier public key, assuming that the paillier public key has been verified before.
## Parameters
- secret_key: full secret key
- paillier_pk: Paillier public key
## Returns
- api_childkey_creator: API childkey creation struct
"""
def init_api_childkey_creator_with_verified_paillier(_secret_key, _paillier_pk), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Verify that the Paillier public key was generated correctly.
## Parameters
- api_childkeyc_reator: API childkey creation struct
- paillier_pk: Paillier public key
- correct_key_proof: ZK proof that the Paillier public key was generated correctly
## Returns
- api_childkey_creator: API key creation struct
"""
def verify_paillier(_api_childkey_creator, _paillier_pk, _correct_key_proof), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Create API childkey.
## Parameters
- api_childkey_creator: API childkey creation struct
- curve: Secp256k1 or Secp256r1 curve
## Returns
- api_childkey: API childkey struct
"""
def create_api_childkey(_api_childkey_creator, _curve), do: :erlang.nif_error(:nif_not_loaded)
@doc ~S"""
Derive public key from given secret key.
## Parameters
- secret_key: (full) secret key)
- curve: Secp256k1 or Secp256r1 curve
## Returns
- public_key: corresponding public key
"""
def publickey_from_secretkey(_secret_key, _curve), do: :erlang.nif_error(:nif_not_loaded)
end
|
mpc-wallet/demo/server/lib/mpcwallet.ex
| 0.753467 | 0.529811 |
mpcwallet.ex
|
starcoder
|
defmodule Geometry.GeometryCollectionZ do
@moduledoc """
A collection set of 3D geometries.
`GeometryCollectionZ` implements the protocols `Enumerable` and `Collectable`.
## Examples
iex> Enum.map(
...> GeometryCollectionZ.new([
...> PointZ.new(11, 12, 13),
...> LineStringZ.new([
...> PointZ.new(21, 22, 23),
...> PointZ.new(31, 32, 33)
...> ])
...> ]),
...> fn
...> %PointZ{} -> :point
...> %LineStringZ{} -> :line_string
...> end
...> ) |> Enum.sort()
[:line_string, :point]
iex> Enum.into([PointZ.new(1, 2, 3)], GeometryCollectionZ.new())
%GeometryCollectionZ{
geometries: MapSet.new([%PointZ{coordinate: [1, 2, 3]}])
}
"""
alias Geometry.{
GeoJson,
GeometryCollectionZ,
WKB,
WKT
}
defstruct geometries: MapSet.new()
@type t :: %GeometryCollectionZ{geometries: MapSet.t(Geometry.t())}
@doc """
Creates an empty `GeometryCollectionZ`.
## Examples
iex> GeometryCollectionZ.new()
%GeometryCollectionZ{geometries: MapSet.new()}
"""
@spec new :: t()
def new, do: %GeometryCollectionZ{}
@doc """
Creates an empty `GeometryCollectionZ`.
## Examples
iex> GeometryCollectionZ.new([
...> PointZ.new(1, 2, 3),
...> LineStringZ.new([PointZ.new(1, 1, 1), PointZ.new(2, 2, 2)])
...> ])
%GeometryCollectionZ{geometries: MapSet.new([
%PointZ{coordinate: [1, 2, 3]},
%LineStringZ{points: [[1, 1, 1], [2, 2, 2]]}
])}
"""
@spec new([Geometry.t()]) :: t()
def new(geometries), do: %GeometryCollectionZ{geometries: MapSet.new(geometries)}
@doc """
Returns `true` if the given `GeometryCollectionZ` is empty.
## Examples
iex> GeometryCollectionZ.empty?(GeometryCollectionZ.new())
true
iex> GeometryCollectionZ.empty?(GeometryCollectionZ.new([PointZ.new(1, 2, 3)]))
false
"""
@spec empty?(t()) :: boolean
def empty?(%GeometryCollectionZ{geometries: geometries}), do: Enum.empty?(geometries)
@doc """
Returns the WKT representation for a `GeometryCollectionZ`. With option
`:srid` an EWKT representation with the SRID is returned.
## Examples
iex> GeometryCollectionZ.to_wkt(GeometryCollectionZ.new())
"GeometryCollection Z EMPTY"
iex> GeometryCollectionZ.to_wkt(
...> GeometryCollectionZ.new([
...> PointZ.new(1.1, 1.2, 1.3),
...> PointZ.new(2.1, 2.2, 2.3)
...> ])
...> )
"GeometryCollection Z (Point Z (1.1 1.2 1.3), Point Z (2.1 2.2 2.3))"
iex> GeometryCollectionZ.to_wkt(
...> GeometryCollectionZ.new([PointZ.new(1.1, 2.2, 3.3)]),
...> srid: 4711)
"SRID=4711;GeometryCollection Z (Point Z (1.1 2.2 3.3))"
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%GeometryCollectionZ{geometries: geometries}, opts \\ []) do
WKT.to_ewkt(
<<
"GeometryCollection Z ",
geometries |> MapSet.to_list() |> to_wkt_geometries()::binary()
>>,
opts
)
end
@doc """
Returns an `:ok` tuple with the `GeometryCollectionZ` from the given WKT
string. Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> GeometryCollectionZ.from_wkt(
...> "GeometryCollection Z (Point Z (1.1 2.2 3.3))")
{
:ok,
%GeometryCollectionZ{
geometries: MapSet.new([%PointZ{coordinate: [1.1, 2.2, 3.3]}])
}
}
iex> GeometryCollectionZ.from_wkt(
...> "SRID=123;GeometryCollection Z (Point Z (1.1 2.2 3.3))")
{:ok, {
%GeometryCollectionZ{
geometries: MapSet.new([%PointZ{coordinate: [1.1, 2.2, 3.3]}])
},
123
}}
iex> GeometryCollectionZ.from_wkt("GeometryCollection Z EMPTY")
{:ok, %GeometryCollectionZ{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, GeometryCollectionZ)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, GeometryCollectionZ) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `GeometryCollectionZ`.
## Examples
iex> GeometryCollectionZ.to_geo_json(
...> GeometryCollectionZ.new([PointZ.new(1.1, 2.2, 3.3)]))
%{
"type" => "GeometryCollection",
"geometries" => [
%{
"type" => "Point",
"coordinates" => [1.1, 2.2, 3.3]
}
]
}
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%GeometryCollectionZ{geometries: geometries}) do
%{
"type" => "GeometryCollection",
"geometries" =>
Enum.map(geometries, fn geometry ->
Geometry.to_geo_json(geometry)
end)
}
end
@doc """
Returns an `:ok` tuple with the `GeometryCollectionZ` from the given GeoJSON
term. Otherwise returns an `:error` tuple.
## Examples
iex> ~s({
...> "type": "GeometryCollection",
...> "geometries": [
...> {"type": "Point", "coordinates": [1.1, 2.2, 3.3]}
...> ]
...> })
iex> |> Jason.decode!()
iex> |> GeometryCollectionZ.from_geo_json()
{
:ok,
%GeometryCollectionZ{
geometries: MapSet.new([%PointZ{coordinate: [1.1, 2.2, 3.3]}])
}
}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json) do
GeoJson.to_geometry_collection(json, GeometryCollectionZ, type: :z)
end
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_geometry_collection(json, GeometryCollectionZ, type: :z) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKB representation for a `GeometryCollectionZ`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:ndr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZ.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid()]
def to_wkb(%GeometryCollectionZ{geometries: geometries}, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_geometries(geometries, endian, mode)::binary()
>>
end
@doc """
Returns an `:ok` tuple with the `GeometryCollectionZ` from the given WKB
string. Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZ.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, GeometryCollectionZ)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, GeometryCollectionZ) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the number of elements in `GeometryCollectionZ`.
## Examples
iex> GeometryCollectionZ.size(
...> GeometryCollectionZ.new([
...> PointZ.new(11, 12, 13),
...> LineStringZ.new([
...> PointZ.new(21, 22, 23),
...> PointZ.new(31, 32, 33)
...> ])
...> ])
...> )
2
"""
@spec size(t()) :: non_neg_integer()
def size(%GeometryCollectionZ{geometries: geometries}), do: MapSet.size(geometries)
@doc """
Checks if `GeometryCollectionZ` contains `geometry`.
## Examples
iex> GeometryCollectionZ.member?(
...> GeometryCollectionZ.new([
...> PointZ.new(11, 12, 13),
...> LineStringZ.new([
...> PointZ.new(21, 22, 23),
...> PointZ.new(31, 32, 33)
...> ])
...> ]),
...> PointZ.new(11, 12, 13)
...> )
true
iex> GeometryCollectionZ.member?(
...> GeometryCollectionZ.new([
...> PointZ.new(11, 12, 13),
...> LineStringZ.new([
...> PointZ.new(21, 22, 23),
...> PointZ.new(31, 32, 33)
...> ])
...> ]),
...> PointZ.new(1, 2, 3)
...> )
false
"""
@spec member?(t(), Geometry.t()) :: boolean()
def member?(%GeometryCollectionZ{geometries: geometries}, geometry),
do: MapSet.member?(geometries, geometry)
@doc """
Converts `GeometryCollectionZ` to a list.
## Examples
iex> GeometryCollectionZ.to_list(
...> GeometryCollectionZ.new([
...> PointZ.new(11, 12, 13)
...> ])
...> )
[%PointZ{coordinate: [11, 12, 13]}]
"""
@spec to_list(t()) :: [Geometry.t()]
def to_list(%GeometryCollectionZ{geometries: geometries}), do: MapSet.to_list(geometries)
@compile {:inline, to_wkt_geometries: 1}
defp to_wkt_geometries([]), do: "EMPTY"
defp to_wkt_geometries([geometry | geometries]) do
<<"(",
Enum.reduce(geometries, Geometry.to_wkt(geometry), fn %module{} = geometry, acc ->
<<acc::binary(), ", ", module.to_wkt(geometry)::binary()>>
end)::binary(), ")">>
end
@compile {:inline, to_wkb_geometries: 3}
defp to_wkb_geometries(geometries, endian, mode) do
Enum.reduce(geometries, WKB.length(geometries, endian, mode), fn %module{} = geometry, acc ->
<<acc::binary(), module.to_wkb(geometry, endian: endian, mode: mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "80000007"
{:ndr, false} -> "07000080"
{:xdr, true} -> "A0000007"
{:ndr, true} -> "070000A0"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0x80000007::big-integer-size(32)>>
{:ndr, false} -> <<0x80000007::little-integer-size(32)>>
{:xdr, true} -> <<0xA0000007::big-integer-size(32)>>
{:ndr, true} -> <<0xA0000007::little-integer-size(32)>>
end
end
defimpl Enumerable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def count(geometry_collection) do
{:ok, GeometryCollectionZ.size(geometry_collection)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def member?(geometry_collection, val) do
{:ok, GeometryCollectionZ.member?(geometry_collection, val)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def slice(geometry_collection) do
size = GeometryCollectionZ.size(geometry_collection)
{:ok, size,
&Enumerable.List.slice(GeometryCollectionZ.to_list(geometry_collection), &1, &2, size)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def reduce(geometry_collection, acc, fun) do
Enumerable.List.reduce(GeometryCollectionZ.to_list(geometry_collection), acc, fun)
end
end
defimpl Collectable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def into(%GeometryCollectionZ{geometries: geometries}) do
fun = fn
list, {:cont, x} ->
[{x, []} | list]
list, :done ->
%GeometryCollectionZ{
geometries: %{geometries | map: Map.merge(geometries.map, Map.new(list))}
}
_list, :halt ->
:ok
end
{[], fun}
end
end
end
|
lib/geometry/geometry_collection_z.ex
| 0.962072 | 0.630059 |
geometry_collection_z.ex
|
starcoder
|
defmodule AWS.SageMaker do
@moduledoc """
Provides APIs for creating and managing Amazon SageMaker resources.
Other Resources:
<ul> <li> [Amazon SageMaker Developer
Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html#first-time-user)
</li> <li> [Amazon Augmented AI Runtime API
Reference](https://docs.aws.amazon.com/augmented-ai/2019-11-07/APIReference/Welcome.html)
</li> </ul>
"""
@doc """
Adds or overwrites one or more tags for the specified Amazon SageMaker
resource. You can add tags to notebook instances, training jobs,
hyperparameter tuning jobs, batch transform jobs, models, labeling jobs,
work teams, endpoint configurations, and endpoints.
Each tag consists of a key and an optional value. Tag keys must be unique
per resource. For more information about tags, see For more information,
see [AWS Tagging
Strategies](https://aws.amazon.com/answers/account-management/aws-tagging-strategies/).
<note> Tags that you add to a hyperparameter tuning job by calling this API
are also added to any training jobs that the hyperparameter tuning job
launches after you call this API, but not to training jobs that the
hyperparameter tuning job launched before you called this API. To make sure
that the tags associated with a hyperparameter tuning job are also added to
all training jobs that the hyperparameter tuning job launches, add the tags
when you first create the tuning job by specifying them in the `Tags`
parameter of `CreateHyperParameterTuningJob`
</note>
"""
def add_tags(client, input, options \\ []) do
request(client, "AddTags", input, options)
end
@doc """
Associates a trial component with a trial. A trial component can be
associated with multiple trials. To disassociate a trial component from a
trial, call the `DisassociateTrialComponent` API.
"""
def associate_trial_component(client, input, options \\ []) do
request(client, "AssociateTrialComponent", input, options)
end
@doc """
Create a machine learning algorithm that you can use in Amazon SageMaker
and list in the AWS Marketplace.
"""
def create_algorithm(client, input, options \\ []) do
request(client, "CreateAlgorithm", input, options)
end
@doc """
Creates a running App for the specified UserProfile. Supported Apps are
JupyterServer and KernelGateway. This operation is automatically invoked by
Amazon SageMaker Studio upon access to the associated Domain, and when new
kernel configurations are selected by the user. A user may have multiple
Apps active simultaneously.
"""
def create_app(client, input, options \\ []) do
request(client, "CreateApp", input, options)
end
@doc """
Creates an AutoPilot job.
After you run an AutoPilot job, you can find the best performing model by
calling , and then deploy that model by following the steps described in
[Step 6.1: Deploy the Model to Amazon SageMaker Hosting
Services](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html).
For information about how to use AutoPilot, see [Use AutoPilot to Automate
Model
Development](https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html).
"""
def create_auto_m_l_job(client, input, options \\ []) do
request(client, "CreateAutoMLJob", input, options)
end
@doc """
Creates a Git repository as a resource in your Amazon SageMaker account.
You can associate the repository with notebook instances so that you can
use Git source control for the notebooks you create. The Git repository is
a resource in your Amazon SageMaker account, so it can be associated with
more than one notebook instance, and it persists independently from the
lifecycle of any notebook instances it is associated with.
The repository can be hosted either in [AWS
CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html)
or in any other Git repository.
"""
def create_code_repository(client, input, options \\ []) do
request(client, "CreateCodeRepository", input, options)
end
@doc """
Starts a model compilation job. After the model has been compiled, Amazon
SageMaker saves the resulting model artifacts to an Amazon Simple Storage
Service (Amazon S3) bucket that you specify.
If you choose to host your model using Amazon SageMaker hosting services,
you can use the resulting model artifacts as part of the model. You can
also use the artifacts with AWS IoT Greengrass. In that case, deploy them
as an ML resource.
In the request body, you provide the following:
<ul> <li> A name for the compilation job
</li> <li> Information about the input model artifacts
</li> <li> The output location for the compiled model and the device
(target) that the model runs on
</li> <li> The Amazon Resource Name (ARN) of the IAM role that Amazon
SageMaker assumes to perform the model compilation job.
</li> </ul> You can also provide a `Tag` to track the model compilation
job's resource use and costs. The response body contains the
`CompilationJobArn` for the compiled job.
To stop a model compilation job, use `StopCompilationJob`. To get
information about a particular model compilation job, use
`DescribeCompilationJob`. To get information about multiple model
compilation jobs, use `ListCompilationJobs`.
"""
def create_compilation_job(client, input, options \\ []) do
request(client, "CreateCompilationJob", input, options)
end
@doc """
Creates a `Domain` used by SageMaker Studio. A domain consists of an
associated directory, a list of authorized users, and a variety of
security, application, policy, and Amazon Virtual Private Cloud (VPC)
configurations. An AWS account is limited to one domain per region. Users
within a domain can share notebook files and other artifacts with each
other.
When a domain is created, an Amazon Elastic File System (EFS) volume is
also created for use by all of the users within the domain. Each user
receives a private home directory within the EFS for notebooks, Git
repositories, and data files.
All traffic between the domain and the EFS volume is communicated through
the specified subnet IDs. All other traffic goes over the Internet through
an Amazon SageMaker system VPC. The EFS traffic uses the NFS/TCP protocol
over port 2049.
<important> NFS traffic over TCP on port 2049 needs to be allowed in both
inbound and outbound rules in order to launch a SageMaker Studio app
successfully.
</important>
"""
def create_domain(client, input, options \\ []) do
request(client, "CreateDomain", input, options)
end
@doc """
Creates an endpoint using the endpoint configuration specified in the
request. Amazon SageMaker uses the endpoint to provision resources and
deploy models. You create the endpoint configuration with the
`CreateEndpointConfig` API.
Use this API to deploy models using Amazon SageMaker hosting services.
For an example that calls this method when deploying a model to Amazon
SageMaker hosting services, see [Deploy the Model to Amazon SageMaker
Hosting Services (AWS SDK for Python (Boto
3)).](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto)
<note> You must not delete an `EndpointConfig` that is in use by an
endpoint that is live or while the `UpdateEndpoint` or `CreateEndpoint`
operations are being performed on the endpoint. To update an endpoint, you
must create a new `EndpointConfig`.
</note> The endpoint name must be unique within an AWS Region in your AWS
account.
When it receives the request, Amazon SageMaker creates the endpoint,
launches the resources (ML compute instances), and deploys the model(s) on
them.
<note> When you call `CreateEndpoint`, a load call is made to DynamoDB to
verify that your endpoint configuration exists. When you read data from a
DynamoDB table supporting [ `Eventually Consistent Reads`
](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html),
the response might not reflect the results of a recently completed write
operation. The response might include some stale data. If the dependent
entities are not yet in DynamoDB, this causes a validation error. If you
repeat your read request after a short time, the response should return the
latest data. So retry logic is recommended to handle these possible issues.
We also recommend that customers call `DescribeEndpointConfig` before
calling `CreateEndpoint` to minimize the potential impact of a DynamoDB
eventually consistent read.
</note> When Amazon SageMaker receives the request, it sets the endpoint
status to `Creating`. After it creates the endpoint, it sets the status to
`InService`. Amazon SageMaker can then process incoming requests for
inferences. To check the status of an endpoint, use the `DescribeEndpoint`
API.
If any of the models hosted at this endpoint get model data from an Amazon
S3 location, Amazon SageMaker uses AWS Security Token Service to download
model artifacts from the S3 path you provided. AWS STS is activated in your
IAM user account by default. If you previously deactivated AWS STS for a
region, you need to reactivate AWS STS for that region. For more
information, see [Activating and Deactivating AWS STS in an AWS
Region](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
in the *AWS Identity and Access Management User Guide*.
"""
def create_endpoint(client, input, options \\ []) do
request(client, "CreateEndpoint", input, options)
end
@doc """
Creates an endpoint configuration that Amazon SageMaker hosting services
uses to deploy models. In the configuration, you identify one or more
models, created using the `CreateModel` API, to deploy and the resources
that you want Amazon SageMaker to provision. Then you call the
`CreateEndpoint` API.
<note> Use this API if you want to use Amazon SageMaker hosting services to
deploy models into production.
</note> In the request, you define a `ProductionVariant`, for each model
that you want to deploy. Each `ProductionVariant` parameter also describes
the resources that you want Amazon SageMaker to provision. This includes
the number and type of ML compute instances to deploy.
If you are hosting multiple models, you also assign a `VariantWeight` to
specify how much traffic you want to allocate to each model. For example,
suppose that you want to host two models, A and B, and you assign traffic
weight 2 for model A and 1 for model B. Amazon SageMaker distributes
two-thirds of the traffic to Model A, and one-third to model B.
For an example that calls this method when deploying a model to Amazon
SageMaker hosting services, see [Deploy the Model to Amazon SageMaker
Hosting Services (AWS SDK for Python (Boto
3)).](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto)
<note> When you call `CreateEndpoint`, a load call is made to DynamoDB to
verify that your endpoint configuration exists. When you read data from a
DynamoDB table supporting [ `Eventually Consistent Reads`
](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html),
the response might not reflect the results of a recently completed write
operation. The response might include some stale data. If the dependent
entities are not yet in DynamoDB, this causes a validation error. If you
repeat your read request after a short time, the response should return the
latest data. So retry logic is recommended to handle these possible issues.
We also recommend that customers call `DescribeEndpointConfig` before
calling `CreateEndpoint` to minimize the potential impact of a DynamoDB
eventually consistent read.
</note>
"""
def create_endpoint_config(client, input, options \\ []) do
request(client, "CreateEndpointConfig", input, options)
end
@doc """
Creates an SageMaker *experiment*. An experiment is a collection of
*trials* that are observed, compared and evaluated as a group. A trial is a
set of steps, called *trial components*, that produce a machine learning
model.
The goal of an experiment is to determine the components that produce the
best model. Multiple trials are performed, each one isolating and measuring
the impact of a change to one or more inputs, while keeping the remaining
inputs constant.
When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK,
all experiments, trials, and trial components are automatically tracked,
logged, and indexed. When you use the AWS SDK for Python (Boto), you must
use the logging APIs provided by the SDK.
You can add tags to experiments, trials, trial components and then use the
`Search` API to search for the tags.
To add a description to an experiment, specify the optional `Description`
parameter. To add a description later, or to change the description, call
the `UpdateExperiment` API.
To get a list of all your experiments, call the `ListExperiments` API. To
view an experiment's properties, call the `DescribeExperiment` API. To get
a list of all the trials associated with an experiment, call the
`ListTrials` API. To create a trial call the `CreateTrial` API.
"""
def create_experiment(client, input, options \\ []) do
request(client, "CreateExperiment", input, options)
end
@doc """
Creates a flow definition.
"""
def create_flow_definition(client, input, options \\ []) do
request(client, "CreateFlowDefinition", input, options)
end
@doc """
Defines the settings you will use for the human review workflow user
interface. Reviewers will see a three-panel interface with an instruction
area, the item to review, and an input area.
"""
def create_human_task_ui(client, input, options \\ []) do
request(client, "CreateHumanTaskUi", input, options)
end
@doc """
Starts a hyperparameter tuning job. A hyperparameter tuning job finds the
best version of a model by running many training jobs on your dataset using
the algorithm you choose and values for hyperparameters within ranges that
you specify. It then chooses the hyperparameter values that result in a
model that performs the best, as measured by an objective metric that you
choose.
"""
def create_hyper_parameter_tuning_job(client, input, options \\ []) do
request(client, "CreateHyperParameterTuningJob", input, options)
end
@doc """
Creates a job that uses workers to label the data objects in your input
dataset. You can use the labeled data to train machine learning models.
You can select your workforce from one of three providers:
<ul> <li> A private workforce that you create. It can include employees,
contractors, and outside experts. Use a private workforce when want the
data to stay within your organization or when a specific set of skills is
required.
</li> <li> One or more vendors that you select from the AWS Marketplace.
Vendors provide expertise in specific areas.
</li> <li> The Amazon Mechanical Turk workforce. This is the largest
workforce, but it should only be used for public data or data that has been
stripped of any personally identifiable information.
</li> </ul> You can also use *automated data labeling* to reduce the number
of data objects that need to be labeled by a human. Automated data labeling
uses *active learning* to determine if a data object can be labeled by
machine or if it needs to be sent to a human worker. For more information,
see [Using Automated Data
Labeling](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-automated-labeling.html).
The data objects to be labeled are contained in an Amazon S3 bucket. You
create a *manifest file* that describes the location of each object. For
more information, see [Using Input and Output
Data](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-data.html).
The output can be used as the manifest file for another labeling job or as
training data for your machine learning models.
"""
def create_labeling_job(client, input, options \\ []) do
request(client, "CreateLabelingJob", input, options)
end
@doc """
Creates a model in Amazon SageMaker. In the request, you name the model and
describe a primary container. For the primary container, you specify the
Docker image that contains inference code, artifacts (from prior training),
and a custom environment map that the inference code uses when you deploy
the model for predictions.
Use this API to create a model if you want to use Amazon SageMaker hosting
services or run a batch transform job.
To host your model, you create an endpoint configuration with the
`CreateEndpointConfig` API, and then create an endpoint with the
`CreateEndpoint` API. Amazon SageMaker then deploys all of the containers
that you defined for the model in the hosting environment.
For an example that calls this method when deploying a model to Amazon
SageMaker hosting services, see [Deploy the Model to Amazon SageMaker
Hosting Services (AWS SDK for Python (Boto
3)).](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto)
To run a batch transform using your model, you start a job with the
`CreateTransformJob` API. Amazon SageMaker uses your model and your dataset
to get inferences which are then saved to a specified S3 location.
In the `CreateModel` request, you must define a container with the
`PrimaryContainer` parameter.
In the request, you also provide an IAM role that Amazon SageMaker can
assume to access model artifacts and docker image for deployment on ML
compute hosting instances or for batch transform jobs. In addition, you
also use the IAM role to manage permissions the inference code needs. For
example, if the inference code access any other AWS resources, you grant
necessary permissions via this role.
"""
def create_model(client, input, options \\ []) do
request(client, "CreateModel", input, options)
end
@doc """
Creates a model package that you can use to create Amazon SageMaker models
or list on AWS Marketplace. Buyers can subscribe to model packages listed
on AWS Marketplace to create models in Amazon SageMaker.
To create a model package by specifying a Docker container that contains
your inference code and the Amazon S3 location of your model artifacts,
provide values for `InferenceSpecification`. To create a model from an
algorithm resource that you created or subscribed to in AWS Marketplace,
provide a value for `SourceAlgorithmSpecification`.
"""
def create_model_package(client, input, options \\ []) do
request(client, "CreateModelPackage", input, options)
end
@doc """
Creates a schedule that regularly starts Amazon SageMaker Processing Jobs
to monitor the data captured for an Amazon SageMaker Endoint.
"""
def create_monitoring_schedule(client, input, options \\ []) do
request(client, "CreateMonitoringSchedule", input, options)
end
@doc """
Creates an Amazon SageMaker notebook instance. A notebook instance is a
machine learning (ML) compute instance running on a Jupyter notebook.
In a `CreateNotebookInstance` request, specify the type of ML compute
instance that you want to run. Amazon SageMaker launches the instance,
installs common libraries that you can use to explore datasets for model
training, and attaches an ML storage volume to the notebook instance.
Amazon SageMaker also provides a set of example notebooks. Each notebook
demonstrates how to use Amazon SageMaker with a specific algorithm or with
a machine learning framework.
After receiving the request, Amazon SageMaker does the following:
<ol> <li> Creates a network interface in the Amazon SageMaker VPC.
</li> <li> (Option) If you specified `SubnetId`, Amazon SageMaker creates a
network interface in your own VPC, which is inferred from the subnet ID
that you provide in the input. When creating this network interface, Amazon
SageMaker attaches the security group that you specified in the request to
the network interface that it creates in your VPC.
</li> <li> Launches an EC2 instance of the type specified in the request in
the Amazon SageMaker VPC. If you specified `SubnetId` of your VPC, Amazon
SageMaker specifies both network interfaces when launching this instance.
This enables inbound traffic from your own VPC to the notebook instance,
assuming that the security groups allow it.
</li> </ol> After creating the notebook instance, Amazon SageMaker returns
its Amazon Resource Name (ARN). You can't change the name of a notebook
instance after you create it.
After Amazon SageMaker creates the notebook instance, you can connect to
the Jupyter server and work in Jupyter notebooks. For example, you can
write code to explore a dataset that you can use for model training, train
a model, host models by creating Amazon SageMaker endpoints, and validate
hosted models.
For more information, see [How It
Works](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html).
"""
def create_notebook_instance(client, input, options \\ []) do
request(client, "CreateNotebookInstance", input, options)
end
@doc """
Creates a lifecycle configuration that you can associate with a notebook
instance. A *lifecycle configuration* is a collection of shell scripts that
run when you create or start a notebook instance.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the `$PATH` environment variable that is available to both
scripts is `/sbin:bin:/usr/sbin:/usr/bin`.
View CloudWatch Logs for notebook instance lifecycle configurations in log
group `/aws/sagemaker/NotebookInstances` in log stream
`[notebook-instance-name]/[LifecycleConfigHook]`.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a
script runs for longer than 5 minutes, it fails and the notebook instance
is not created or started.
For information about notebook instance lifestyle configurations, see [Step
2.1: (Optional) Customize a Notebook
Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html).
"""
def create_notebook_instance_lifecycle_config(client, input, options \\ []) do
request(client, "CreateNotebookInstanceLifecycleConfig", input, options)
end
@doc """
Creates a URL for a specified UserProfile in a Domain. When accessed in a
web browser, the user will be automatically signed in to Amazon SageMaker
Studio, and granted access to all of the Apps and files associated with the
Domain's Amazon Elastic File System (EFS) volume. This operation can only
be called when the authentication mode equals IAM.
"""
def create_presigned_domain_url(client, input, options \\ []) do
request(client, "CreatePresignedDomainUrl", input, options)
end
@doc """
Returns a URL that you can use to connect to the Jupyter server from a
notebook instance. In the Amazon SageMaker console, when you choose `Open`
next to a notebook instance, Amazon SageMaker opens a new tab showing the
Jupyter server home page from the notebook instance. The console uses this
API to get the URL and show the page.
The IAM role or user used to call this API defines the permissions to
access the notebook instance. Once the presigned URL is created, no
additional permission is required to access this URL. IAM authorization
policies for this API are also enforced for every HTTP request and
WebSocket frame that attempts to connect to the notebook instance.
You can restrict access to this API and to the URL that it returns to a
list of IP addresses that you specify. Use the `NotIpAddress` condition
operator and the `aws:SourceIP` condition context key to specify the list
of IP addresses that you want to have access to the notebook instance. For
more information, see [Limit Access to a Notebook Instance by IP
Address](https://docs.aws.amazon.com/sagemaker/latest/dg/security_iam_id-based-policy-examples.html#nbi-ip-filter).
<note> The URL that you get from a call to
`CreatePresignedNotebookInstanceUrl` is valid only for 5 minutes. If you
try to use the URL after the 5-minute limit expires, you are directed to
the AWS console sign-in page.
</note>
"""
def create_presigned_notebook_instance_url(client, input, options \\ []) do
request(client, "CreatePresignedNotebookInstanceUrl", input, options)
end
@doc """
Creates a processing job.
"""
def create_processing_job(client, input, options \\ []) do
request(client, "CreateProcessingJob", input, options)
end
@doc """
Starts a model training job. After training completes, Amazon SageMaker
saves the resulting model artifacts to an Amazon S3 location that you
specify.
If you choose to host your model using Amazon SageMaker hosting services,
you can use the resulting model artifacts as part of the model. You can
also use the artifacts in a machine learning service other than Amazon
SageMaker, provided that you know how to use them for inferences.
In the request body, you provide the following:
<ul> <li> `AlgorithmSpecification` - Identifies the training algorithm to
use.
</li> <li> `HyperParameters` - Specify these algorithm-specific parameters
to enable the estimation of model parameters during training.
Hyperparameters can be tuned to optimize this learning process. For a list
of hyperparameters for each training algorithm provided by Amazon
SageMaker, see
[Algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).
</li> <li> `InputDataConfig` - Describes the training dataset and the
Amazon S3, EFS, or FSx location where it is stored.
</li> <li> `OutputDataConfig` - Identifies the Amazon S3 bucket where you
want Amazon SageMaker to save the results of model training.
<p/> </li> <li> `ResourceConfig` - Identifies the resources, ML compute
instances, and ML storage volumes to deploy for model training. In
distributed training, you specify more than one instance.
</li> <li> `EnableManagedSpotTraining` - Optimize the cost of training
machine learning models by up to 80% by using Amazon EC2 Spot instances.
For more information, see [Managed Spot
Training](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html).
</li> <li> `RoleARN` - The Amazon Resource Number (ARN) that Amazon
SageMaker assumes to perform tasks on your behalf during model training.
You must grant this role the necessary permissions so that Amazon SageMaker
can successfully complete model training.
</li> <li> `StoppingCondition` - To help cap training costs, use
`MaxRuntimeInSeconds` to set a time limit for training. Use
`MaxWaitTimeInSeconds` to specify how long you are willing to wait for a
managed spot training job to complete.
</li> </ul> For more information about Amazon SageMaker, see [How It
Works](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html).
"""
def create_training_job(client, input, options \\ []) do
request(client, "CreateTrainingJob", input, options)
end
@doc """
Starts a transform job. A transform job uses a trained model to get
inferences on a dataset and saves these results to an Amazon S3 location
that you specify.
To perform batch transformations, you create a transform job and use the
data that you have readily available.
In the request body, you provide the following:
<ul> <li> `TransformJobName` - Identifies the transform job. The name must
be unique within an AWS Region in an AWS account.
</li> <li> `ModelName` - Identifies the model to use. `ModelName` must be
the name of an existing Amazon SageMaker model in the same AWS Region and
AWS account. For information on creating a model, see `CreateModel`.
</li> <li> `TransformInput` - Describes the dataset to be transformed and
the Amazon S3 location where it is stored.
</li> <li> `TransformOutput` - Identifies the Amazon S3 location where you
want Amazon SageMaker to save the results from the transform job.
</li> <li> `TransformResources` - Identifies the ML compute instances for
the transform job.
</li> </ul> For more information about how batch transformation works, see
[Batch
Transform](https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html).
"""
def create_transform_job(client, input, options \\ []) do
request(client, "CreateTransformJob", input, options)
end
@doc """
Creates an Amazon SageMaker *trial*. A trial is a set of steps called
*trial components* that produce a machine learning model. A trial is part
of a single Amazon SageMaker *experiment*.
When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK,
all experiments, trials, and trial components are automatically tracked,
logged, and indexed. When you use the AWS SDK for Python (Boto), you must
use the logging APIs provided by the SDK.
You can add tags to a trial and then use the `Search` API to search for the
tags.
To get a list of all your trials, call the `ListTrials` API. To view a
trial's properties, call the `DescribeTrial` API. To create a trial
component, call the `CreateTrialComponent` API.
"""
def create_trial(client, input, options \\ []) do
request(client, "CreateTrial", input, options)
end
@doc """
Creates a *trial component*, which is a stage of a machine learning
*trial*. A trial is composed of one or more trial components. A trial
component can be used in multiple trials.
Trial components include pre-processing jobs, training jobs, and batch
transform jobs.
When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK,
all experiments, trials, and trial components are automatically tracked,
logged, and indexed. When you use the AWS SDK for Python (Boto), you must
use the logging APIs provided by the SDK.
You can add tags to a trial component and then use the `Search` API to
search for the tags.
<note> `CreateTrialComponent` can only be invoked from within an Amazon
SageMaker managed environment. This includes Amazon SageMaker training
jobs, processing jobs, transform jobs, and Amazon SageMaker notebooks. A
call to `CreateTrialComponent` from outside one of these environments
results in an error.
</note>
"""
def create_trial_component(client, input, options \\ []) do
request(client, "CreateTrialComponent", input, options)
end
@doc """
Creates a user profile. A user profile represents a single user within a
domain, and is the main way to reference a "person" for the purposes of
sharing, reporting, and other user-oriented features. This entity is
created when a user onboards to Amazon SageMaker Studio. If an
administrator invites a person by email or imports them from SSO, a user
profile is automatically created. A user profile is the primary holder of
settings for an individual user and has a reference to the user's private
Amazon Elastic File System (EFS) home directory.
"""
def create_user_profile(client, input, options \\ []) do
request(client, "CreateUserProfile", input, options)
end
@doc """
Use this operation to create a workforce. This operation will return an
error if a workforce already exists in the AWS Region that you specify. You
can only create one workforce in each AWS Region.
If you want to create a new workforce in an AWS Region where the a
workforce already exists, use the API operation to delete the existing
workforce and then use this operation to create a new workforce.
To create a private workforce using Amazon Cognito, you must specify a
Cognito user pool in `CognitoConfig`. You can also create an Amazon Cognito
workforce using the Amazon SageMaker console. For more information, see [
Create a Private Workforce (Amazon
Cognito)](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html).
To create a private workforce using your own OIDC Identity Provider (IdP),
specify your IdP configuration in `OidcConfig`. You must create a OIDC IdP
workforce using this API operation. For more information, see [ Create a
Private Workforce (OIDC
IdP)](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private-oidc.html).
"""
def create_workforce(client, input, options \\ []) do
request(client, "CreateWorkforce", input, options)
end
@doc """
Creates a new work team for labeling your data. A work team is defined by
one or more Amazon Cognito user pools. You must first create the user pools
before you can create a work team.
You cannot create more than 25 work teams in an account and region.
"""
def create_workteam(client, input, options \\ []) do
request(client, "CreateWorkteam", input, options)
end
@doc """
Removes the specified algorithm from your account.
"""
def delete_algorithm(client, input, options \\ []) do
request(client, "DeleteAlgorithm", input, options)
end
@doc """
Used to stop and delete an app.
"""
def delete_app(client, input, options \\ []) do
request(client, "DeleteApp", input, options)
end
@doc """
Deletes the specified Git repository from your account.
"""
def delete_code_repository(client, input, options \\ []) do
request(client, "DeleteCodeRepository", input, options)
end
@doc """
Used to delete a domain. If you onboarded with IAM mode, you will need to
delete your domain to onboard again using SSO. Use with caution. All of the
members of the domain will lose access to their EFS volume, including data,
notebooks, and other artifacts.
"""
def delete_domain(client, input, options \\ []) do
request(client, "DeleteDomain", input, options)
end
@doc """
Deletes an endpoint. Amazon SageMaker frees up all of the resources that
were deployed when the endpoint was created.
Amazon SageMaker retires any custom KMS key grants associated with the
endpoint, meaning you don't need to use the
[RevokeGrant](http://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html)
API call.
"""
def delete_endpoint(client, input, options \\ []) do
request(client, "DeleteEndpoint", input, options)
end
@doc """
Deletes an endpoint configuration. The `DeleteEndpointConfig` API deletes
only the specified configuration. It does not delete endpoints created
using the configuration.
You must not delete an `EndpointConfig` in use by an endpoint that is live
or while the `UpdateEndpoint` or `CreateEndpoint` operations are being
performed on the endpoint. If you delete the `EndpointConfig` of an
endpoint that is active or being created or updated you may lose visibility
into the instance type the endpoint is using. The endpoint must be deleted
in order to stop incurring charges.
"""
def delete_endpoint_config(client, input, options \\ []) do
request(client, "DeleteEndpointConfig", input, options)
end
@doc """
Deletes an Amazon SageMaker experiment. All trials associated with the
experiment must be deleted first. Use the `ListTrials` API to get a list of
the trials associated with the experiment.
"""
def delete_experiment(client, input, options \\ []) do
request(client, "DeleteExperiment", input, options)
end
@doc """
Deletes the specified flow definition.
"""
def delete_flow_definition(client, input, options \\ []) do
request(client, "DeleteFlowDefinition", input, options)
end
@doc """
Use this operation to delete a human task user interface (worker task
template).
To see a list of human task user interfaces (work task templates) in your
account, use . When you delete a worker task template, it no longer appears
when you call `ListHumanTaskUis`.
"""
def delete_human_task_ui(client, input, options \\ []) do
request(client, "DeleteHumanTaskUi", input, options)
end
@doc """
Deletes a model. The `DeleteModel` API deletes only the model entry that
was created in Amazon SageMaker when you called the `CreateModel` API. It
does not delete model artifacts, inference code, or the IAM role that you
specified when creating the model.
"""
def delete_model(client, input, options \\ []) do
request(client, "DeleteModel", input, options)
end
@doc """
Deletes a model package.
A model package is used to create Amazon SageMaker models or list on AWS
Marketplace. Buyers can subscribe to model packages listed on AWS
Marketplace to create models in Amazon SageMaker.
"""
def delete_model_package(client, input, options \\ []) do
request(client, "DeleteModelPackage", input, options)
end
@doc """
Deletes a monitoring schedule. Also stops the schedule had not already been
stopped. This does not delete the job execution history of the monitoring
schedule.
"""
def delete_monitoring_schedule(client, input, options \\ []) do
request(client, "DeleteMonitoringSchedule", input, options)
end
@doc """
Deletes an Amazon SageMaker notebook instance. Before you can delete a
notebook instance, you must call the `StopNotebookInstance` API.
<important> When you delete a notebook instance, you lose all of your data.
Amazon SageMaker removes the ML compute instance, and deletes the ML
storage volume and the network interface associated with the notebook
instance.
</important>
"""
def delete_notebook_instance(client, input, options \\ []) do
request(client, "DeleteNotebookInstance", input, options)
end
@doc """
Deletes a notebook instance lifecycle configuration.
"""
def delete_notebook_instance_lifecycle_config(client, input, options \\ []) do
request(client, "DeleteNotebookInstanceLifecycleConfig", input, options)
end
@doc """
Deletes the specified tags from an Amazon SageMaker resource.
To list a resource's tags, use the `ListTags` API.
<note> When you call this API to delete tags from a hyperparameter tuning
job, the deleted tags are not removed from training jobs that the
hyperparameter tuning job launched before you called this API.
</note>
"""
def delete_tags(client, input, options \\ []) do
request(client, "DeleteTags", input, options)
end
@doc """
Deletes the specified trial. All trial components that make up the trial
must be deleted first. Use the `DescribeTrialComponent` API to get the list
of trial components.
"""
def delete_trial(client, input, options \\ []) do
request(client, "DeleteTrial", input, options)
end
@doc """
Deletes the specified trial component. A trial component must be
disassociated from all trials before the trial component can be deleted. To
disassociate a trial component from a trial, call the
`DisassociateTrialComponent` API.
"""
def delete_trial_component(client, input, options \\ []) do
request(client, "DeleteTrialComponent", input, options)
end
@doc """
Deletes a user profile. When a user profile is deleted, the user loses
access to their EFS volume, including data, notebooks, and other artifacts.
"""
def delete_user_profile(client, input, options \\ []) do
request(client, "DeleteUserProfile", input, options)
end
@doc """
Use this operation to delete a workforce.
If you want to create a new workforce in an AWS Region where the a
workforce already exists, use this operation to delete the existing
workforce and then use to create a new workforce.
"""
def delete_workforce(client, input, options \\ []) do
request(client, "DeleteWorkforce", input, options)
end
@doc """
Deletes an existing work team. This operation can't be undone.
"""
def delete_workteam(client, input, options \\ []) do
request(client, "DeleteWorkteam", input, options)
end
@doc """
Returns a description of the specified algorithm that is in your account.
"""
def describe_algorithm(client, input, options \\ []) do
request(client, "DescribeAlgorithm", input, options)
end
@doc """
Describes the app.
"""
def describe_app(client, input, options \\ []) do
request(client, "DescribeApp", input, options)
end
@doc """
Returns information about an Amazon SageMaker job.
"""
def describe_auto_m_l_job(client, input, options \\ []) do
request(client, "DescribeAutoMLJob", input, options)
end
@doc """
Gets details about the specified Git repository.
"""
def describe_code_repository(client, input, options \\ []) do
request(client, "DescribeCodeRepository", input, options)
end
@doc """
Returns information about a model compilation job.
To create a model compilation job, use `CreateCompilationJob`. To get
information about multiple model compilation jobs, use
`ListCompilationJobs`.
"""
def describe_compilation_job(client, input, options \\ []) do
request(client, "DescribeCompilationJob", input, options)
end
@doc """
The description of the domain.
"""
def describe_domain(client, input, options \\ []) do
request(client, "DescribeDomain", input, options)
end
@doc """
Returns the description of an endpoint.
"""
def describe_endpoint(client, input, options \\ []) do
request(client, "DescribeEndpoint", input, options)
end
@doc """
Returns the description of an endpoint configuration created using the
`CreateEndpointConfig` API.
"""
def describe_endpoint_config(client, input, options \\ []) do
request(client, "DescribeEndpointConfig", input, options)
end
@doc """
Provides a list of an experiment's properties.
"""
def describe_experiment(client, input, options \\ []) do
request(client, "DescribeExperiment", input, options)
end
@doc """
Returns information about the specified flow definition.
"""
def describe_flow_definition(client, input, options \\ []) do
request(client, "DescribeFlowDefinition", input, options)
end
@doc """
Returns information about the requested human task user interface (worker
task template).
"""
def describe_human_task_ui(client, input, options \\ []) do
request(client, "DescribeHumanTaskUi", input, options)
end
@doc """
Gets a description of a hyperparameter tuning job.
"""
def describe_hyper_parameter_tuning_job(client, input, options \\ []) do
request(client, "DescribeHyperParameterTuningJob", input, options)
end
@doc """
Gets information about a labeling job.
"""
def describe_labeling_job(client, input, options \\ []) do
request(client, "DescribeLabelingJob", input, options)
end
@doc """
Describes a model that you created using the `CreateModel` API.
"""
def describe_model(client, input, options \\ []) do
request(client, "DescribeModel", input, options)
end
@doc """
Returns a description of the specified model package, which is used to
create Amazon SageMaker models or list them on AWS Marketplace.
To create models in Amazon SageMaker, buyers can subscribe to model
packages listed on AWS Marketplace.
"""
def describe_model_package(client, input, options \\ []) do
request(client, "DescribeModelPackage", input, options)
end
@doc """
Describes the schedule for a monitoring job.
"""
def describe_monitoring_schedule(client, input, options \\ []) do
request(client, "DescribeMonitoringSchedule", input, options)
end
@doc """
Returns information about a notebook instance.
"""
def describe_notebook_instance(client, input, options \\ []) do
request(client, "DescribeNotebookInstance", input, options)
end
@doc """
Returns a description of a notebook instance lifecycle configuration.
For information about notebook instance lifestyle configurations, see [Step
2.1: (Optional) Customize a Notebook
Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html).
"""
def describe_notebook_instance_lifecycle_config(client, input, options \\ []) do
request(client, "DescribeNotebookInstanceLifecycleConfig", input, options)
end
@doc """
Returns a description of a processing job.
"""
def describe_processing_job(client, input, options \\ []) do
request(client, "DescribeProcessingJob", input, options)
end
@doc """
Gets information about a work team provided by a vendor. It returns details
about the subscription with a vendor in the AWS Marketplace.
"""
def describe_subscribed_workteam(client, input, options \\ []) do
request(client, "DescribeSubscribedWorkteam", input, options)
end
@doc """
Returns information about a training job.
"""
def describe_training_job(client, input, options \\ []) do
request(client, "DescribeTrainingJob", input, options)
end
@doc """
Returns information about a transform job.
"""
def describe_transform_job(client, input, options \\ []) do
request(client, "DescribeTransformJob", input, options)
end
@doc """
Provides a list of a trial's properties.
"""
def describe_trial(client, input, options \\ []) do
request(client, "DescribeTrial", input, options)
end
@doc """
Provides a list of a trials component's properties.
"""
def describe_trial_component(client, input, options \\ []) do
request(client, "DescribeTrialComponent", input, options)
end
@doc """
Describes a user profile. For more information, see `CreateUserProfile`.
"""
def describe_user_profile(client, input, options \\ []) do
request(client, "DescribeUserProfile", input, options)
end
@doc """
Lists private workforce information, including workforce name, Amazon
Resource Name (ARN), and, if applicable, allowed IP address ranges
([CIDRs](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)).
Allowable IP address ranges are the IP addresses that workers can use to
access tasks.
<important> This operation applies only to private workforces.
</important>
"""
def describe_workforce(client, input, options \\ []) do
request(client, "DescribeWorkforce", input, options)
end
@doc """
Gets information about a specific work team. You can see information such
as the create date, the last updated date, membership information, and the
work team's Amazon Resource Name (ARN).
"""
def describe_workteam(client, input, options \\ []) do
request(client, "DescribeWorkteam", input, options)
end
@doc """
Disassociates a trial component from a trial. This doesn't effect other
trials the component is associated with. Before you can delete a component,
you must disassociate the component from all trials it is associated with.
To associate a trial component with a trial, call the
`AssociateTrialComponent` API.
To get a list of the trials a component is associated with, use the
`Search` API. Specify `ExperimentTrialComponent` for the `Resource`
parameter. The list appears in the response under
`Results.TrialComponent.Parents`.
"""
def disassociate_trial_component(client, input, options \\ []) do
request(client, "DisassociateTrialComponent", input, options)
end
@doc """
An auto-complete API for the search functionality in the Amazon SageMaker
console. It returns suggestions of possible matches for the property name
to use in `Search` queries. Provides suggestions for `HyperParameters`,
`Tags`, and `Metrics`.
"""
def get_search_suggestions(client, input, options \\ []) do
request(client, "GetSearchSuggestions", input, options)
end
@doc """
Lists the machine learning algorithms that have been created.
"""
def list_algorithms(client, input, options \\ []) do
request(client, "ListAlgorithms", input, options)
end
@doc """
Lists apps.
"""
def list_apps(client, input, options \\ []) do
request(client, "ListApps", input, options)
end
@doc """
Request a list of jobs.
"""
def list_auto_m_l_jobs(client, input, options \\ []) do
request(client, "ListAutoMLJobs", input, options)
end
@doc """
List the Candidates created for the job.
"""
def list_candidates_for_auto_m_l_job(client, input, options \\ []) do
request(client, "ListCandidatesForAutoMLJob", input, options)
end
@doc """
Gets a list of the Git repositories in your account.
"""
def list_code_repositories(client, input, options \\ []) do
request(client, "ListCodeRepositories", input, options)
end
@doc """
Lists model compilation jobs that satisfy various filters.
To create a model compilation job, use `CreateCompilationJob`. To get
information about a particular model compilation job you have created, use
`DescribeCompilationJob`.
"""
def list_compilation_jobs(client, input, options \\ []) do
request(client, "ListCompilationJobs", input, options)
end
@doc """
Lists the domains.
"""
def list_domains(client, input, options \\ []) do
request(client, "ListDomains", input, options)
end
@doc """
Lists endpoint configurations.
"""
def list_endpoint_configs(client, input, options \\ []) do
request(client, "ListEndpointConfigs", input, options)
end
@doc """
Lists endpoints.
"""
def list_endpoints(client, input, options \\ []) do
request(client, "ListEndpoints", input, options)
end
@doc """
Lists all the experiments in your account. The list can be filtered to show
only experiments that were created in a specific time range. The list can
be sorted by experiment name or creation time.
"""
def list_experiments(client, input, options \\ []) do
request(client, "ListExperiments", input, options)
end
@doc """
Returns information about the flow definitions in your account.
"""
def list_flow_definitions(client, input, options \\ []) do
request(client, "ListFlowDefinitions", input, options)
end
@doc """
Returns information about the human task user interfaces in your account.
"""
def list_human_task_uis(client, input, options \\ []) do
request(client, "ListHumanTaskUis", input, options)
end
@doc """
Gets a list of `HyperParameterTuningJobSummary` objects that describe the
hyperparameter tuning jobs launched in your account.
"""
def list_hyper_parameter_tuning_jobs(client, input, options \\ []) do
request(client, "ListHyperParameterTuningJobs", input, options)
end
@doc """
Gets a list of labeling jobs.
"""
def list_labeling_jobs(client, input, options \\ []) do
request(client, "ListLabelingJobs", input, options)
end
@doc """
Gets a list of labeling jobs assigned to a specified work team.
"""
def list_labeling_jobs_for_workteam(client, input, options \\ []) do
request(client, "ListLabelingJobsForWorkteam", input, options)
end
@doc """
Lists the model packages that have been created.
"""
def list_model_packages(client, input, options \\ []) do
request(client, "ListModelPackages", input, options)
end
@doc """
Lists models created with the `CreateModel` API.
"""
def list_models(client, input, options \\ []) do
request(client, "ListModels", input, options)
end
@doc """
Returns list of all monitoring job executions.
"""
def list_monitoring_executions(client, input, options \\ []) do
request(client, "ListMonitoringExecutions", input, options)
end
@doc """
Returns list of all monitoring schedules.
"""
def list_monitoring_schedules(client, input, options \\ []) do
request(client, "ListMonitoringSchedules", input, options)
end
@doc """
Lists notebook instance lifestyle configurations created with the
`CreateNotebookInstanceLifecycleConfig` API.
"""
def list_notebook_instance_lifecycle_configs(client, input, options \\ []) do
request(client, "ListNotebookInstanceLifecycleConfigs", input, options)
end
@doc """
Returns a list of the Amazon SageMaker notebook instances in the
requester's account in an AWS Region.
"""
def list_notebook_instances(client, input, options \\ []) do
request(client, "ListNotebookInstances", input, options)
end
@doc """
Lists processing jobs that satisfy various filters.
"""
def list_processing_jobs(client, input, options \\ []) do
request(client, "ListProcessingJobs", input, options)
end
@doc """
Gets a list of the work teams that you are subscribed to in the AWS
Marketplace. The list may be empty if no work team satisfies the filter
specified in the `NameContains` parameter.
"""
def list_subscribed_workteams(client, input, options \\ []) do
request(client, "ListSubscribedWorkteams", input, options)
end
@doc """
Returns the tags for the specified Amazon SageMaker resource.
"""
def list_tags(client, input, options \\ []) do
request(client, "ListTags", input, options)
end
@doc """
Lists training jobs.
"""
def list_training_jobs(client, input, options \\ []) do
request(client, "ListTrainingJobs", input, options)
end
@doc """
Gets a list of `TrainingJobSummary` objects that describe the training jobs
that a hyperparameter tuning job launched.
"""
def list_training_jobs_for_hyper_parameter_tuning_job(client, input, options \\ []) do
request(client, "ListTrainingJobsForHyperParameterTuningJob", input, options)
end
@doc """
Lists transform jobs.
"""
def list_transform_jobs(client, input, options \\ []) do
request(client, "ListTransformJobs", input, options)
end
@doc """
Lists the trial components in your account. You can sort the list by trial
component name or creation time. You can filter the list to show only
components that were created in a specific time range. You can also filter
on one of the following:
<ul> <li> `ExperimentName`
</li> <li> `SourceArn`
</li> <li> `TrialName`
</li> </ul>
"""
def list_trial_components(client, input, options \\ []) do
request(client, "ListTrialComponents", input, options)
end
@doc """
Lists the trials in your account. Specify an experiment name to limit the
list to the trials that are part of that experiment. Specify a trial
component name to limit the list to the trials that associated with that
trial component. The list can be filtered to show only trials that were
created in a specific time range. The list can be sorted by trial name or
creation time.
"""
def list_trials(client, input, options \\ []) do
request(client, "ListTrials", input, options)
end
@doc """
Lists user profiles.
"""
def list_user_profiles(client, input, options \\ []) do
request(client, "ListUserProfiles", input, options)
end
@doc """
Use this operation to list all private and vendor workforces in an AWS
Region. Note that you can only have one private workforce per AWS Region.
"""
def list_workforces(client, input, options \\ []) do
request(client, "ListWorkforces", input, options)
end
@doc """
Gets a list of work teams that you have defined in a region. The list may
be empty if no work team satisfies the filter specified in the
`NameContains` parameter.
"""
def list_workteams(client, input, options \\ []) do
request(client, "ListWorkteams", input, options)
end
@doc """
Renders the UI template so that you can preview the worker's experience.
"""
def render_ui_template(client, input, options \\ []) do
request(client, "RenderUiTemplate", input, options)
end
@doc """
Finds Amazon SageMaker resources that match a search query. Matching
resources are returned as a list of `SearchRecord` objects in the response.
You can sort the search results by any resource property in a ascending or
descending order.
You can query against the following value types: numeric, text, Boolean,
and timestamp.
"""
def search(client, input, options \\ []) do
request(client, "Search", input, options)
end
@doc """
Starts a previously stopped monitoring schedule.
<note> New monitoring schedules are immediately started after creation.
</note>
"""
def start_monitoring_schedule(client, input, options \\ []) do
request(client, "StartMonitoringSchedule", input, options)
end
@doc """
Launches an ML compute instance with the latest version of the libraries
and attaches your ML storage volume. After configuring the notebook
instance, Amazon SageMaker sets the notebook instance status to
`InService`. A notebook instance's status must be `InService` before you
can connect to your Jupyter notebook.
"""
def start_notebook_instance(client, input, options \\ []) do
request(client, "StartNotebookInstance", input, options)
end
@doc """
A method for forcing the termination of a running job.
"""
def stop_auto_m_l_job(client, input, options \\ []) do
request(client, "StopAutoMLJob", input, options)
end
@doc """
Stops a model compilation job.
To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal.
This gracefully shuts the job down. If the job hasn't stopped, it sends the
SIGKILL signal.
When it receives a `StopCompilationJob` request, Amazon SageMaker changes
the `CompilationJobSummary$CompilationJobStatus` of the job to `Stopping`.
After Amazon SageMaker stops the job, it sets the
`CompilationJobSummary$CompilationJobStatus` to `Stopped`.
"""
def stop_compilation_job(client, input, options \\ []) do
request(client, "StopCompilationJob", input, options)
end
@doc """
Stops a running hyperparameter tuning job and all running training jobs
that the tuning job launched.
All model artifacts output from the training jobs are stored in Amazon
Simple Storage Service (Amazon S3). All data that the training jobs write
to Amazon CloudWatch Logs are still available in CloudWatch. After the
tuning job moves to the `Stopped` state, it releases all reserved resources
for the tuning job.
"""
def stop_hyper_parameter_tuning_job(client, input, options \\ []) do
request(client, "StopHyperParameterTuningJob", input, options)
end
@doc """
Stops a running labeling job. A job that is stopped cannot be restarted.
Any results obtained before the job is stopped are placed in the Amazon S3
output bucket.
"""
def stop_labeling_job(client, input, options \\ []) do
request(client, "StopLabelingJob", input, options)
end
@doc """
Stops a previously started monitoring schedule.
"""
def stop_monitoring_schedule(client, input, options \\ []) do
request(client, "StopMonitoringSchedule", input, options)
end
@doc """
Terminates the ML compute instance. Before terminating the instance, Amazon
SageMaker disconnects the ML storage volume from it. Amazon SageMaker
preserves the ML storage volume. Amazon SageMaker stops charging you for
the ML compute instance when you call `StopNotebookInstance`.
To access data on the ML storage volume for a notebook instance that has
been terminated, call the `StartNotebookInstance` API.
`StartNotebookInstance` launches another ML compute instance, configures
it, and attaches the preserved ML storage volume so you can continue your
work.
"""
def stop_notebook_instance(client, input, options \\ []) do
request(client, "StopNotebookInstance", input, options)
end
@doc """
Stops a processing job.
"""
def stop_processing_job(client, input, options \\ []) do
request(client, "StopProcessingJob", input, options)
end
@doc """
Stops a training job. To stop a job, Amazon SageMaker sends the algorithm
the `SIGTERM` signal, which delays job termination for 120 seconds.
Algorithms might use this 120-second window to save the model artifacts, so
the results of the training is not lost.
When it receives a `StopTrainingJob` request, Amazon SageMaker changes the
status of the job to `Stopping`. After Amazon SageMaker stops the job, it
sets the status to `Stopped`.
"""
def stop_training_job(client, input, options \\ []) do
request(client, "StopTrainingJob", input, options)
end
@doc """
Stops a transform job.
When Amazon SageMaker receives a `StopTransformJob` request, the status of
the job changes to `Stopping`. After Amazon SageMaker stops the job, the
status is set to `Stopped`. When you stop a transform job before it is
completed, Amazon SageMaker doesn't store the job's output in Amazon S3.
"""
def stop_transform_job(client, input, options \\ []) do
request(client, "StopTransformJob", input, options)
end
@doc """
Updates the specified Git repository with the specified values.
"""
def update_code_repository(client, input, options \\ []) do
request(client, "UpdateCodeRepository", input, options)
end
@doc """
Updates the default settings for new user profiles in the domain.
"""
def update_domain(client, input, options \\ []) do
request(client, "UpdateDomain", input, options)
end
@doc """
Deploys the new `EndpointConfig` specified in the request, switches to
using newly created endpoint, and then deletes resources provisioned for
the endpoint using the previous `EndpointConfig` (there is no availability
loss).
When Amazon SageMaker receives the request, it sets the endpoint status to
`Updating`. After updating the endpoint, it sets the status to `InService`.
To check the status of an endpoint, use the `DescribeEndpoint` API.
<note> You must not delete an `EndpointConfig` in use by an endpoint that
is live or while the `UpdateEndpoint` or `CreateEndpoint` operations are
being performed on the endpoint. To update an endpoint, you must create a
new `EndpointConfig`.
If you delete the `EndpointConfig` of an endpoint that is active or being
created or updated you may lose visibility into the instance type the
endpoint is using. The endpoint must be deleted in order to stop incurring
charges.
</note>
"""
def update_endpoint(client, input, options \\ []) do
request(client, "UpdateEndpoint", input, options)
end
@doc """
Updates variant weight of one or more variants associated with an existing
endpoint, or capacity of one variant associated with an existing endpoint.
When it receives the request, Amazon SageMaker sets the endpoint status to
`Updating`. After updating the endpoint, it sets the status to `InService`.
To check the status of an endpoint, use the `DescribeEndpoint` API.
"""
def update_endpoint_weights_and_capacities(client, input, options \\ []) do
request(client, "UpdateEndpointWeightsAndCapacities", input, options)
end
@doc """
Adds, updates, or removes the description of an experiment. Updates the
display name of an experiment.
"""
def update_experiment(client, input, options \\ []) do
request(client, "UpdateExperiment", input, options)
end
@doc """
Updates a previously created schedule.
"""
def update_monitoring_schedule(client, input, options \\ []) do
request(client, "UpdateMonitoringSchedule", input, options)
end
@doc """
Updates a notebook instance. NotebookInstance updates include upgrading or
downgrading the ML compute instance used for your notebook instance to
accommodate changes in your workload requirements.
"""
def update_notebook_instance(client, input, options \\ []) do
request(client, "UpdateNotebookInstance", input, options)
end
@doc """
Updates a notebook instance lifecycle configuration created with the
`CreateNotebookInstanceLifecycleConfig` API.
"""
def update_notebook_instance_lifecycle_config(client, input, options \\ []) do
request(client, "UpdateNotebookInstanceLifecycleConfig", input, options)
end
@doc """
Updates the display name of a trial.
"""
def update_trial(client, input, options \\ []) do
request(client, "UpdateTrial", input, options)
end
@doc """
Updates one or more properties of a trial component.
"""
def update_trial_component(client, input, options \\ []) do
request(client, "UpdateTrialComponent", input, options)
end
@doc """
Updates a user profile.
"""
def update_user_profile(client, input, options \\ []) do
request(client, "UpdateUserProfile", input, options)
end
@doc """
Restricts access to tasks assigned to workers in the specified workforce to
those within specific ranges of IP addresses. You specify allowed IP
addresses by creating a list of up to ten
[CIDRs](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html).
By default, a workforce isn't restricted to specific IP addresses. If you
specify a range of IP addresses, workers who attempt to access tasks using
any IP address outside the specified range are denied access and get a `Not
Found` error message on the worker portal. After restricting access with
this operation, you can see the allowed IP values for a private workforce
with the operation.
<important> This operation applies only to private workforces.
</important>
"""
def update_workforce(client, input, options \\ []) do
request(client, "UpdateWorkforce", input, options)
end
@doc """
Updates an existing work team with new member definitions or description.
"""
def update_workteam(client, input, options \\ []) do
request(client, "UpdateWorkteam", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "sagemaker"}
host = build_host("api.sagemaker", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "SageMaker.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/sage_maker.ex
| 0.862656 | 0.581244 |
sage_maker.ex
|
starcoder
|
defmodule Exfile.Ecto.FileTemplate do
@moduledoc """
A module to help you define an `Ecto.Type` backed by a custom backend.
Example:
```
defmodule MyApp.User.ProfilePicture do
use Exfile.Ecto.File,
backend: "profile_pictures",
cache_backend: "cache"
end
```
```
defmodule MyApp.User do
use Ecto.Schema
schema "users" do
field :profile_picture, MyApp.User.ProfilePicture
end
end
```
This will store any files assigned to the `profile_picture` field of `MyApp.User`
in the `cache` backend.
"""
@doc false
defmacro __using__(opts) do
backend_name = Keyword.get(opts, :backend, "store")
cache_backend_name = Keyword.get(opts, :cache_backend, "cache")
quote do
@moduledoc """
An `Ecto.Type` used to handle files persisted to the
`#{unquote(backend_name)}` backend.
"""
@behaviour Ecto.Type
defp backend(), do: Exfile.Config.get_backend(unquote(backend_name))
defp cache_backend(), do: Exfile.Config.get_backend(unquote(cache_backend_name))
@doc "The Ecto type"
def type, do: :string
@doc """
Casts a recognizable value to an `%Exfile.File{}` and uploads it to the
`#{unquote(cache_backend_name)}` backend.
Accepts five patterns:
* An `%Exfile.File{}` stored in the `#{unquote(cache_backend_name)}` or `#{unquote(backend_name)}` backends
* An `%Exfile.File{}` stored in a different backend
* An `%Exfile.LocalFile{}`
* A `%Plug.Upload{}`
* A string URI representing a file from an arbitrary backend
The string URI can be used to upload a file that is currently stored in
a separate backend. The format is:
```
exfile://[backend name]/[file ID]
```
"""
def cast(%Exfile.File{backend: %{backend_name: name}} = file) when not name in [unquote(backend_name), unquote(cache_backend_name)] do
case Exfile.Backend.upload(cache_backend(), file) do
{:ok, new_file} ->
{:ok, new_file}
{:error, _reason} ->
:error
end
end
def cast(%Exfile.File{} = file), do: {:ok, file}
def cast(%Plug.Upload{path: path, filename: filename}) do
cast(%Exfile.LocalFile{
path: path,
meta: %{
"filename" => filename
}
})
end
def cast(%Exfile.LocalFile{} = local_file) do
case Exfile.Backend.upload(cache_backend(), local_file) do
{:ok, new_file} ->
meta = Map.merge(new_file.meta, local_file.meta)
new_file = %{ new_file | meta: meta }
{:ok, new_file}
{:error, _reason} ->
:error
end
end
def cast(%URI{scheme: "exfile", host: remote_backend_name, path: "/" <> file_id}) do
case Exfile.Config.get_backend(remote_backend_name) do
{:error, _} -> :error
backend ->
cast(Exfile.Backend.get(backend, file_id))
end
end
def cast(uri) when is_binary(uri), do: URI.parse(uri) |> cast()
def cast(_), do: :error
@doc """
Loads a file URI from the database and returns an `%Exfile.File{}` struct
representing that file.
Supports loading a plain ID for backwards compatibility.
"""
def load("exfile://" <> _ = file_uri), do: URI.parse(file_uri) |> load
def load(file_id) when is_binary(file_id) do
load(%URI{
scheme: "exfile",
host: unquote(backend_name),
path: "/" <> file_id
})
end
def load(%URI{scheme: "exfile", host: remote_backend_name, path: "/" <> file_id}) do
case Exfile.Config.get_backend(remote_backend_name) do
{:error, _} -> :error
backend ->
{:ok, Exfile.Backend.get(backend, file_id)}
end
end
@doc """
Dumps an `%Exfile.File{}` struct to the file URI, suitable for storage in
the database.
"""
def dump(%Exfile.File{} = file), do: {:ok, Exfile.File.uri(file)}
def dump(_), do: :error
@doc """
Uploads a file from the `#{unquote(cache_backend_name)}` backend to the
`#{unquote(backend_name)}` backend.
This function should be called after the record has been successfully saved
to the database and all validations are passing.
"""
def upload!(file) do
Exfile.Backend.upload(backend(), file)
end
end
end
end
|
lib/exfile/ecto/file_template.ex
| 0.81615 | 0.550124 |
file_template.ex
|
starcoder
|
defmodule RedisGraph.Node do
@moduledoc """
A Node member of a Graph.
Nodes have an alias which uniquely identifies them in a Graph. Nodes
must have an alias in order for their associated Graph to be committed
to the database.
Nodes have a label which is analogous to a type definition. Nodes can
be queried based on their label, e.g. ``person`` or ``place`` or ``food``.
Nodes may optionally have properties, a map of values associated with
the entity. These properties can be returned by database queries.
Nodes may have aliases. When adding a `RedisGraph.Node` to a
`RedisGraph.Graph`, a random alias may be set on the Node prior
to being added to the Graph if it does not already have one.
Nodes which are created as the result of a ``MATCH`` query in a
`RedisGraph.QueryResult` will also have numeric ids which are
internal to the graph in the database.
"""
alias RedisGraph.Util
@type t() :: %__MODULE__{
id: integer(),
alias: String.t(),
label: String.t(),
properties: %{optional(String.t()) => any()}
}
defstruct [:id, :alias, :label, properties: %{}]
@doc """
Creates a new Node.
## Example
john = Node.new(%{
label: "person",
properties: %{
name: "<NAME>",
age: 33
}
})
"""
@spec new(map()) :: t()
def new(map) do
struct(__MODULE__, map)
end
@doc "Sets the node's alias if it is `nil`."
@spec set_alias_if_nil(t()) :: t()
def set_alias_if_nil(node) do
if is_nil(node.alias) do
%{node | alias: Util.random_string()}
else
node
end
end
@doc "Converts the properties to a query-appropriate string."
@spec properties_to_string(t()) :: String.t()
def properties_to_string(node) do
inner =
Map.keys(node.properties)
|> Enum.map(fn key -> "#{key}:#{Util.quote_string(node.properties[key])}" end)
|> Enum.join(",")
if String.length(inner) > 0 do
"{" <> inner <> "}"
else
""
end
end
@doc "Converts the node to a query-appropriate string."
@spec to_query_string(t()) :: String.t()
def to_query_string(node) do
alias_ =
case is_nil(node.alias) do
true -> ""
false -> node.alias
end
label =
case is_nil(node.label) do
true -> ""
false -> node.label
end
"(" <> alias_ <> ":" <> label <> properties_to_string(node) <> ")"
end
@doc """
Compare two Nodes with respect to equality.
Comparison logic:
* if ids differ then returns ``false``
* if aliases differ then returns ``false``
* if labels differ then returns ``false``
* if properties differ then returns ``false``
* otherwise returns ``true``
"""
@spec compare(t(), t()) :: boolean()
def compare(left, right) do
cond do
left.id != right.id -> false
left.alias != right.alias -> false
left.label != right.label -> false
map_size(left.properties) != map_size(right.properties) -> false
not Map.equal?(left.properties, right.properties) -> false
true -> true
end
end
end
|
lib/redis_graph/node.ex
| 0.915115 | 0.682977 |
node.ex
|
starcoder
|
defmodule Patch.Mock do
alias Patch.Mock
alias Patch.Mock.Code
alias Patch.Mock.Code.Freezer
@typedoc """
What exposures should be made in a module.
- `:public` will only expose the public functions
- `:all` will expose both public and private functions
- A list of exports can be provided, they will be added to the `:public` functions.
"""
@type exposes :: :all | :public | Code.exports()
@typedoc """
The exposes option controls if any private functions should be exposed.
The default is `:public`.
"""
@type exposes_option :: {:exposes, exposes()}
@typedoc """
This history_limit option controls how large of a history a mock should store
It defaults to `:infinity` which will store an unlimited history.
"""
@type history_limit_option :: {:history_limit, non_neg_integer() | :infinity}
@typedoc """
Sum-type of all valid options
"""
@type option :: exposes_option() | history_limit_option()
@doc """
Returns the number of times a matching call has been observed
The call arguments support any valid patterns.
This function uses the Mock's history to check, if the history is limited or disabled then calls
that have happened may report back as never having happened.
"""
@spec call_count(call :: Macro.t()) :: Macro.t()
defmacro call_count(call) do
quote do
unquote(call)
|> Patch.Mock.matches()
|> Enum.count()
end
end
@doc """
Checks to see if the call has been observed.
The call arguments support any valid patterns.
This function uses the Mock's history to check, if the history is limited or disabled then calls
that have happened may report back as never having happened.
"""
@spec called?(call :: Macro.t()) :: Macro.t()
defmacro called?(call) do
{module, function, pattern} = Macro.decompose_call(call)
quote do
unquote(module)
|> Patch.Mock.history()
|> Patch.Mock.History.entries(:desc)
|> Enum.any?(fn
{unquote(function), arguments} ->
Patch.Macro.match?(unquote(pattern), arguments)
_ ->
false
end)
end
end
@doc """
Checks to see if a function with the given name has been called in the given module.
This function uses the Mock's history to check, if the history is limited or disabled then calls
that have happened may report back as never having happened.
"""
@spec called?(module :: module(), name :: atom()) :: boolean()
def called?(module, name) do
module
|> history()
|> Mock.History.entries(:desc)
|> Enum.any?(&match?({^name, _}, &1))
end
@doc """
Expose private functions in a module.
If the module is not already mocked, calling this function will mock it.
"""
@spec expose(module :: module, exposes :: exposes()) :: :ok | {:error, term()}
def expose(module, exposes) do
with {:ok, _} <- module(module, exposes: exposes) do
Mock.Server.expose(module, exposes)
end
end
@doc """
Gets the call history for a module.
If the module is not already mocked, this function returns an empty new history.
"""
@spec history(module :: module()) :: Mock.History.t()
def history(module) do
Mock.Server.history(module)
end
@doc """
Given a call finds the latest call that matched.
Returns `{:ok, {function, arguments}}` if a matching call is found, `false` otherwise.
"""
@spec latest_match(call :: Macro.t()) :: Macro.t()
defmacro latest_match(call) do
{module, function, pattern} = Macro.decompose_call(call)
quote do
unquote(module)
|> Patch.Mock.history()
|> Patch.Mock.History.entries(:desc)
|> Enum.find_value(fn
{unquote(function), arguments} = call ->
if Patch.Macro.match?(unquote(pattern), arguments) do
{:ok, call}
else
false
end
_ ->
false
end)
end
end
@doc """
Decorates the history with whether or not the call in the history matches the provided call.
Provided call arguments support any valid patterns.
Returns the calls descending (newest first) as a two-tuple in the form
`{boolean(), {atom(), [term()]}}`
The first element indicates whether the call matches the provided call.
The second element is a tuple of function name and arguments.
This macro uses the Mock's history to check, if the history is limited or disabled then calls
that have happened may report back as never having happened.
"""
@spec match_history(call :: Macro.t()) :: Macro.t()
defmacro match_history(call) do
{module, function, pattern} = Macro.decompose_call(call)
quote do
unquote(module)
|> Patch.Mock.history()
|> Patch.Mock.History.entries(:desc)
|> Enum.map(fn
{unquote(function), arguments} = call ->
{Patch.Macro.match?(unquote(pattern), arguments), call}
call ->
{false, call}
end)
end
end
@doc """
Returns all the calls in the history that match the provided call.
Provided call arguments support any valid patterns.
Returns the calls descending (newest first) as the list of arguments in the call
This macro uses the Mock's history to check, if the history is limited or disabled then calls
that have happened may report back as never having happened.
"""
@spec matches(call :: Macro.t()) :: Macro.t()
defmacro matches(call) do
quote do
unquote(call)
|> Patch.Mock.match_history()
|> Enum.filter(&elem(&1, 0))
|> Enum.map(fn {true, {_function, arguments}} -> arguments end)
end
end
@doc """
Mocks the given module.
Mocking a module accepts two options, see the `t:option/0` type in this module for details.
"""
@spec module(module :: module(), options :: [option()]) ::
{:ok, pid()} | {:error, term()}
def module(module, options \\ []) do
:ok = Freezer.put(module)
case Mock.Supervisor.start_child(module, options) do
{:ok, pid} ->
{:ok, pid}
{:error, {:already_started, pid}} ->
{:ok, pid}
{:error, _} = error ->
error
end
end
@doc """
Registers a mock value for a function.
If the module is not already mocked, this function will mock it with no private functions
exposed.
"""
@spec register(module :: module(), name :: atom(), value :: Mock.Value.t()) :: :ok
def register(module, name, value) do
with {:ok, _} <- module(module) do
Mock.Server.register(module, name, value)
end
end
@doc """
Restores a module to pre-patch functionality.
If the module is not already mocked, this function no-ops.
"""
@spec restore(module :: module()) :: :ok
def restore(module) do
Mock.Server.restore(module)
end
@doc """
Restores a function in a module to pre-patch functionality.
If the module or function are not already mocked, this function no-ops.
"""
@spec restore(mdoule :: module(), name :: atom()) :: :ok
def restore(module, name) do
Mock.Server.restore(module, name)
end
end
|
lib/patch/mock.ex
| 0.901579 | 0.505859 |
mock.ex
|
starcoder
|
defmodule Geometry.GeometryCollectionZM do
@moduledoc """
A collection set of 3D geometries with a measurement.
`GeometryCollectionZM` implements the protocols `Enumerable` and `Collectable`.
## Examples
iex> Enum.map(
...> GeometryCollectionZM.new([
...> PointZM.new(11, 12, 13, 14),
...> LineStringZM.new([
...> PointZM.new(21, 22, 23, 24),
...> PointZM.new(31, 32, 33, 34)
...> ])
...> ]),
...> fn
...> %PointZM{} -> :point
...> %LineStringZM{} -> :line_string
...> end
...> ) |> Enum.sort()
[:line_string, :point]
iex> Enum.into([PointZM.new(1, 2, 3, 4)], GeometryCollectionZM.new())
%GeometryCollectionZM{
geometries: MapSet.new([%PointZM{coordinate: [1, 2, 3, 4]}])
}
"""
alias Geometry.{
GeoJson,
GeometryCollectionZM,
WKB,
WKT
}
defstruct geometries: MapSet.new()
@type t :: %GeometryCollectionZM{geometries: MapSet.t(Geometry.t())}
@doc """
Creates an empty `GeometryCollectionZM`.
## Examples
iex> GeometryCollectionZM.new()
%GeometryCollectionZM{geometries: MapSet.new()}
"""
@spec new :: t()
def new, do: %GeometryCollectionZM{}
@doc """
Creates an empty `GeometryCollectionZM`.
## Examples
iex> GeometryCollectionZM.new([
...> PointZM.new(1, 2, 3, 4),
...> LineStringZM.new([PointZM.new(1, 1, 1, 1), PointZM.new(2, 2, 2, 2)])
...> ])
%GeometryCollectionZM{geometries: MapSet.new([
%PointZM{coordinate: [1, 2, 3, 4]},
%LineStringZM{points: [[1, 1, 1, 1], [2, 2, 2, 2]]}
])}
"""
@spec new([Geometry.t()]) :: t()
def new(geometries), do: %GeometryCollectionZM{geometries: MapSet.new(geometries)}
@doc """
Returns `true` if the given `GeometryCollectionZM` is empty.
## Examples
iex> GeometryCollectionZM.empty?(GeometryCollectionZM.new())
true
iex> GeometryCollectionZM.empty?(GeometryCollectionZM.new([PointZM.new(1, 2, 3, 4)]))
false
"""
@spec empty?(t()) :: boolean
def empty?(%GeometryCollectionZM{geometries: geometries}), do: Enum.empty?(geometries)
@doc """
Returns the WKT representation for a `GeometryCollectionZM`. With option
`:srid` an EWKT representation with the SRID is returned.
## Examples
iex> GeometryCollectionZM.to_wkt(GeometryCollectionZM.new())
"GeometryCollection ZM EMPTY"
iex> GeometryCollectionZM.to_wkt(
...> GeometryCollectionZM.new([
...> PointZM.new(1.1, 1.2, 1.3, 1.4),
...> PointZM.new(2.1, 2.2, 2.3, 2.4)
...> ])
...> )
"GeometryCollection ZM (Point ZM (1.1 1.2 1.3 1.4), Point ZM (2.1 2.2 2.3 2.4))"
iex> GeometryCollectionZM.to_wkt(
...> GeometryCollectionZM.new([PointZM.new(1.1, 2.2, 3.3, 4.4)]),
...> srid: 4711)
"SRID=4711;GeometryCollection ZM (Point ZM (1.1 2.2 3.3 4.4))"
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%GeometryCollectionZM{geometries: geometries}, opts \\ []) do
WKT.to_ewkt(
<<
"GeometryCollection ZM ",
geometries |> MapSet.to_list() |> to_wkt_geometries()::binary()
>>,
opts
)
end
@doc """
Returns an `:ok` tuple with the `GeometryCollectionZM` from the given WKT
string. Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> GeometryCollectionZM.from_wkt(
...> "GeometryCollection ZM (Point ZM (1.1 2.2 3.3 4.4))")
{
:ok,
%GeometryCollectionZM{
geometries: MapSet.new([%PointZM{coordinate: [1.1, 2.2, 3.3, 4.4]}])
}
}
iex> GeometryCollectionZM.from_wkt(
...> "SRID=123;GeometryCollection ZM (Point ZM (1.1 2.2 3.3 4.4))")
{:ok, {
%GeometryCollectionZM{
geometries: MapSet.new([%PointZM{coordinate: [1.1, 2.2, 3.3, 4.4]}])
},
123
}}
iex> GeometryCollectionZM.from_wkt("GeometryCollection ZM EMPTY")
{:ok, %GeometryCollectionZM{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, GeometryCollectionZM)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, GeometryCollectionZM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `GeometryCollectionZM`.
## Examples
iex> GeometryCollectionZM.to_geo_json(
...> GeometryCollectionZM.new([PointZM.new(1.1, 2.2, 3.3, 4.4)]))
%{
"type" => "GeometryCollection",
"geometries" => [
%{
"type" => "Point",
"coordinates" => [1.1, 2.2, 3.3, 4.4]
}
]
}
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%GeometryCollectionZM{geometries: geometries}) do
%{
"type" => "GeometryCollection",
"geometries" =>
Enum.map(geometries, fn geometry ->
Geometry.to_geo_json(geometry)
end)
}
end
@doc """
Returns an `:ok` tuple with the `GeometryCollectionZM` from the given GeoJSON
term. Otherwise returns an `:error` tuple.
## Examples
iex> ~s({
...> "type": "GeometryCollection",
...> "geometries": [
...> {"type": "Point", "coordinates": [1.1, 2.2, 3.3, 4.4]}
...> ]
...> })
iex> |> Jason.decode!()
iex> |> GeometryCollectionZM.from_geo_json()
{
:ok,
%GeometryCollectionZM{
geometries: MapSet.new([%PointZM{coordinate: [1.1, 2.2, 3.3, 4.4]}])
}
}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json) do
GeoJson.to_geometry_collection(json, GeometryCollectionZM, type: :zm)
end
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_geometry_collection(json, GeometryCollectionZM, type: :zm) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKB representation for a `GeometryCollectionZM`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:ndr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZM.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid()]
def to_wkb(%GeometryCollectionZM{geometries: geometries}, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_geometries(geometries, endian, mode)::binary()
>>
end
@doc """
Returns an `:ok` tuple with the `GeometryCollectionZM` from the given WKB
string. Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZM.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, GeometryCollectionZM)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, GeometryCollectionZM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the number of elements in `GeometryCollectionZM`.
## Examples
iex> GeometryCollectionZM.size(
...> GeometryCollectionZM.new([
...> PointZM.new(11, 12, 13, 14),
...> LineStringZM.new([
...> PointZM.new(21, 22, 23, 24),
...> PointZM.new(31, 32, 33, 34)
...> ])
...> ])
...> )
2
"""
@spec size(t()) :: non_neg_integer()
def size(%GeometryCollectionZM{geometries: geometries}), do: MapSet.size(geometries)
@doc """
Checks if `GeometryCollectionZM` contains `geometry`.
## Examples
iex> GeometryCollectionZM.member?(
...> GeometryCollectionZM.new([
...> PointZM.new(11, 12, 13, 14),
...> LineStringZM.new([
...> PointZM.new(21, 22, 23, 24),
...> PointZM.new(31, 32, 33, 34)
...> ])
...> ]),
...> PointZM.new(11, 12, 13, 14)
...> )
true
iex> GeometryCollectionZM.member?(
...> GeometryCollectionZM.new([
...> PointZM.new(11, 12, 13, 14),
...> LineStringZM.new([
...> PointZM.new(21, 22, 23, 24),
...> PointZM.new(31, 32, 33, 34)
...> ])
...> ]),
...> PointZM.new(1, 2, 3, 4)
...> )
false
"""
@spec member?(t(), Geometry.t()) :: boolean()
def member?(%GeometryCollectionZM{geometries: geometries}, geometry),
do: MapSet.member?(geometries, geometry)
@doc """
Converts `GeometryCollectionZM` to a list.
## Examples
iex> GeometryCollectionZM.to_list(
...> GeometryCollectionZM.new([
...> PointZM.new(11, 12, 13, 14)
...> ])
...> )
[%PointZM{coordinate: [11, 12, 13, 14]}]
"""
@spec to_list(t()) :: [Geometry.t()]
def to_list(%GeometryCollectionZM{geometries: geometries}), do: MapSet.to_list(geometries)
@compile {:inline, to_wkt_geometries: 1}
defp to_wkt_geometries([]), do: "EMPTY"
defp to_wkt_geometries([geometry | geometries]) do
<<"(",
Enum.reduce(geometries, Geometry.to_wkt(geometry), fn %module{} = geometry, acc ->
<<acc::binary(), ", ", module.to_wkt(geometry)::binary()>>
end)::binary(), ")">>
end
@compile {:inline, to_wkb_geometries: 3}
defp to_wkb_geometries(geometries, endian, mode) do
Enum.reduce(geometries, WKB.length(geometries, endian, mode), fn %module{} = geometry, acc ->
<<acc::binary(), module.to_wkb(geometry, endian: endian, mode: mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "C0000007"
{:ndr, false} -> "070000C0"
{:xdr, true} -> "E0000007"
{:ndr, true} -> "070000E0"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0xC0000007::big-integer-size(32)>>
{:ndr, false} -> <<0xC0000007::little-integer-size(32)>>
{:xdr, true} -> <<0xE0000007::big-integer-size(32)>>
{:ndr, true} -> <<0xE0000007::little-integer-size(32)>>
end
end
defimpl Enumerable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def count(geometry_collection) do
{:ok, GeometryCollectionZM.size(geometry_collection)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def member?(geometry_collection, val) do
{:ok, GeometryCollectionZM.member?(geometry_collection, val)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def slice(geometry_collection) do
size = GeometryCollectionZM.size(geometry_collection)
{:ok, size,
&Enumerable.List.slice(GeometryCollectionZM.to_list(geometry_collection), &1, &2, size)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def reduce(geometry_collection, acc, fun) do
Enumerable.List.reduce(GeometryCollectionZM.to_list(geometry_collection), acc, fun)
end
end
defimpl Collectable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def into(%GeometryCollectionZM{geometries: geometries}) do
fun = fn
list, {:cont, x} ->
[{x, []} | list]
list, :done ->
%GeometryCollectionZM{
geometries: %{geometries | map: Map.merge(geometries.map, Map.new(list))}
}
_list, :halt ->
:ok
end
{[], fun}
end
end
end
|
lib/geometry/geometry_collection_zm.ex
| 0.960842 | 0.650176 |
geometry_collection_zm.ex
|
starcoder
|
defmodule LinkedList do
@opaque t :: tuple()
@doc """
Construct a new LinkedList
"""
@spec new() :: t
def new(), do: {}
@doc """
Push an item onto a LinkedList
"""
@spec push(t, any()) :: t
def push(list, elem), do: {elem, list}
@doc """
Counts the number of elements in a LinkedList
"""
@spec count(t) :: non_neg_integer()
def count(list), do: list |> do_count(0)
@doc """
Determine if a LinkedList is empty
"""
@spec empty?(t) :: boolean()
def empty?({}), do: true
def empty?(_), do: false
@doc """
Get the value of a head of the LinkedList
"""
@spec peek(t) :: {:ok, any()} | {:error, :empty_list}
def peek({head, _}), do: {:ok, head}
def peek(_), do: {:error, :empty_list}
@doc """
Get tail of a LinkedList
"""
@spec tail(t) :: {:ok, t} | {:error, :empty_list}
def tail({_, tail}), do: {:ok, tail}
def tail(_), do: {:error, :empty_list}
@doc """
Remove the head from a LinkedList
"""
@spec pop(t) :: {:ok, any(), t} | {:error, :empty_list}
def pop({head, tail}), do: {:ok, head, tail}
def pop(_), do: {:error, :empty_list}
@doc """
Construct a LinkedList from a stdlib List
"""
@spec from_list(list()) :: t
def from_list(list), do: list |> do_from_list(new()) |> reverse()
@doc """
Construct a stdlib List LinkedList from a LinkedList
"""
@spec to_list(t) :: list()
def to_list(list), do: list |> do_to_list([]) |> Enum.reverse()
@doc """
Reverse a LinkedList
"""
@spec reverse(t) :: t
def reverse(list) do
list |> do_reverse(new())
end
defp do_count({}, acc), do: acc
defp do_count({_, tail}, acc), do: do_count(tail, acc + 1)
defp do_from_list([], acc), do: acc
defp do_from_list([head | tail], acc), do: do_from_list(tail, push(acc, head))
defp do_to_list({}, acc), do: acc
defp do_to_list({head, tail}, acc), do: do_to_list(tail, [head | acc])
defp do_reverse({}, acc), do: acc
defp do_reverse({head, tail}, acc), do: do_reverse(tail, push(acc, head))
end
|
exercism/elixir/simple-linked-list/lib/linked_list.ex
| 0.844361 | 0.448185 |
linked_list.ex
|
starcoder
|
defmodule ZenMonitor.Local.Tables do
@moduledoc """
`ZenMonitor.Local.Tables` owns tables that are shared between multiple `ZenMonitor.Local`
components.
See `nodes/0` and `references/0` for more information.
"""
use GenServer
@node_table Module.concat(__MODULE__, "Nodes")
@reference_table Module.concat(__MODULE__, "References")
## Client
def start_link(_opts \\ []) do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
@doc """
Nodes holds cached information about remote node compatibility
This information is stored in one of the following structures:
For compatible nodes
{ remote_node, :compatible }
^---key---^ ^--value--^
For incompatible nodes
{ remote_node, {:incompatible, enforce_until, attempts} }
^---key---^ ^---------------value-----------------^
`enforce_until` is the time (as reported by System.monotonic_time(:milliseconds)) after which
this cache entry should no longer be enforced.
`attempts` is the number of consecutive connect attempts that have failed, this value is useful
for calculating geometric backoff values
"""
@spec nodes() :: :ets.tab()
def nodes do
@node_table
end
@doc """
References holds the set of authoritative monitor references
These references are stored in this structure:
{ {subscriber_pid, monitor_reference}, {remote_node, remote_pid} }
^-------------key-----------------^ ^----------value--------^
There is a compound key of {subscriber_pid, monitor_reference} this allows for lookup of a given
reference (if the subscriber is known, by convention it will be the calling process, self()) or
the retrieval of all active monitors for a subscriber.
"""
@spec references() :: :ets.tab()
def references do
@reference_table
end
## Server
def init(_opts) do
@node_table = :ets.new(@node_table, [:public, :named_table, :set, write_concurrency: true])
@reference_table =
:ets.new(@reference_table, [:public, :named_table, :ordered_set, write_concurrency: true])
{:ok, nil}
end
end
|
lib/zen_monitor/local/tables.ex
| 0.881774 | 0.600745 |
tables.ex
|
starcoder
|
defmodule Neo4j.Sips.Utils do
@moduledoc "Common utilities"
@doc """
Generate a random string.
"""
def random_id, do: :random.uniform |> Float.to_string |> String.slice(2..10)
@doc """
Given a list of queries i.e. `[{"cypher statement ..."}, %{parameters...}]`, this
method will return a JSON that may look like this:
````
{
"statements" : [ {
"statement" : "CREATE (n {props}) RETURN n",
"parameters" : {
"props" : {
"name" : "My Node"
}
}
} ]
}
````
"""
def neo4j_statements(queries, options \\ nil) when is_list(queries) do
make_neo4j_statements(queries, [], options)
end
@doc """
use a collection for finding and extracting elements with a given name
"""
def get_element(c, name) do
Enum.map(c, &(Map.get(&1, name))) |> List.first
end
# some of the methods here are a customized variant from a similar project:
# - https://github.com/raw1z/ex_neo4j
def format_statements(queries) when is_list(queries) do
do_format_statements(queries, [])
end
def do_format_statements([], acc), do: to_json(%{statements: Enum.reverse(acc)})
def do_format_statements([{query, params}|tail], acc) do
statement = format_statement(query, params)
do_format_statements(tail, [statement|acc])
end
def format_statement(query, params) do
statement = %{ statement: query }
if Map.size(params) > 0 do
statement = Map.merge(statement, %{parameters: params})
end
statement
end
# private stuff
defp make_neo4j_statements([], acc, _options) do
to_json(%{statements: Enum.reverse(acc)})
end
defp make_neo4j_statements([query|tail], acc, options) when is_binary(query) do
statement = neo4j_statement(query, %{}, options)
make_neo4j_statements(tail, [statement|acc], options)
end
defp make_neo4j_statements([{query, params}|tail], acc, options) do
statement = neo4j_statement(query, params, options)
make_neo4j_statements(tail, [statement|acc], options)
end
defp neo4j_statement(query, params, options) do
q = String.strip(query)
if String.length(q) > 0 do
statement = %{ statement: q}
if Map.size(params) > 0 do
statement = Map.merge(statement, %{parameters: params})
end
if options do
statement = Map.merge(statement, options)
end
statement
end
end
defp to_json(value, options \\ []) do
Poison.encode!(value, options)
|> IO.iodata_to_binary
end
end
|
lib/neo4j_sips_models/utils.ex
| 0.772531 | 0.499268 |
utils.ex
|
starcoder
|
defmodule DemoRGBLCD do
@moduledoc """
Sample functions to demonstrate and test GrovePi.RGBLCD module
"""
# References
# C++ library: https://github.com/Seeed-Studio/Grove_LCD_RGB_Backlight
alias GrovePi.RGBLCD
@doc """
Shows autoscroll function
"""
def autoscroll() do
{:ok, config} = RGBLCD.initialize()
print_autoscroll(config)
end
defp print_autoscroll(config) do
RGBLCD.set_cursor(0, 0)
IO.inspect(config)
print_nums()
RGBLCD.set_cursor(1, 16)
{:ok, new_config} = RGBLCD.autoscroll(config)
IO.inspect(new_config)
print_nums()
RGBLCD.scroll_left(10)
RGBLCD.clear_display()
{:ok, new_config} = RGBLCD.autoscroll_off(new_config)
print_autoscroll(new_config)
end
@doc """
Toggles cursor blinking on and off every 3000ms
"""
def blink() do
{:ok, config} = RGBLCD.initialize()
RGBLCD.set_text("hello world!")
toggle_blink(config)
end
defp toggle_blink(config) do
{:ok, new_config} = RGBLCD.cursor_blink_on(config)
Process.sleep(3000)
{:ok, new_config} = RGBLCD.cursor_blink_off(new_config)
Process.sleep(3000)
toggle_blink(new_config)
end
@doc """
Toggles the cursor on and off every 1500ms
"""
def cursor() do
{:ok, config} = RGBLCD.initialize()
RGBLCD.set_text("hello world!")
toggle_cursor(config)
end
defp toggle_cursor(config) do
{:ok, new_config} = RGBLCD.cursor_on(config)
Process.sleep(1500)
{:ok, new_config} = RGBLCD.cursor_off(new_config)
Process.sleep(1500)
toggle_cursor(new_config)
end
@doc """
Demonstrates setting the RGB color
"""
def colors() do
{:ok, _config} = RGBLCD.initialize()
toggle_colors()
end
defp toggle_colors() do
RGBLCD.set_rgb(255, 0, 0)
Process.sleep(1500)
RGBLCD.set_rgb(0, 255, 0)
Process.sleep(1500)
RGBLCD.set_rgb(0, 0, 255)
Process.sleep(1500)
RGBLCD.set_rgb(:rand.uniform(255), :rand.uniform(255), :rand.uniform(255))
Process.sleep(1500)
toggle_colors()
end
@doc """
Toggles the display on and off every 1500ms
"""
def display() do
{:ok, config} = RGBLCD.initialize()
RGBLCD.set_text("hello world!")
toggle_display(config)
end
defp toggle_display(config) do
{:ok, new_config} = RGBLCD.display_on(config)
Process.sleep(1500)
{:ok, new_config} = RGBLCD.display_off(new_config)
Process.sleep(1500)
toggle_display(new_config)
end
@doc """
Prints 0 to 9 with 500ms delay between numbers
"""
def print_nums do
for num <- 0..9 do
num
|> Integer.to_string
|> RGBLCD.write_text
IO.puts(num)
Process.sleep(500)
end
end
@doc """
Demonstrates text direction both ways
"""
def text_direction() do
{:ok, config} = RGBLCD.initialize()
do_text_direction(config)
end
defp do_text_direction(config) do
IO.inspect(config)
print_nums()
{:ok, new_config} = RGBLCD.text_right_to_left(config)
IO.inspect(new_config)
print_nums()
{:ok, new_config} = RGBLCD.text_left_to_right(new_config)
do_text_direction(new_config)
end
@doc """
Demonstrates moving the cursor to the second line
"""
def set_cursor() do
{:ok, config} = RGBLCD.initialize()
{:ok, _new_config} = RGBLCD.cursor_on(config)
do_set_cursor()
end
defp do_set_cursor() do
RGBLCD.set_cursor(0, 5)
Process.sleep(1000)
print_nums()
RGBLCD.set_cursor(1, 3)
Process.sleep(1000)
print_nums()
RGBLCD.clear_display()
do_set_cursor()
end
end
|
examples/demo_rgblcd/lib/demo_rgblcd.ex
| 0.701611 | 0.432962 |
demo_rgblcd.ex
|
starcoder
|
defmodule NaiveBayes do
@moduledoc """
An implementation of Naive Bayes
"""
defstruct vocab: %Vocab{}, data: %Data{}, smoothing: 1, binarized: false, assume_uniform: false
@doc """
Initializes a new NaiveBayes agent
Returns `{:ok, pid}`.
## Examples
iex> {:ok, nbayes} = NaiveBayes.new(binarized: false, assume_uniform: true, smoothing: 2)
{:ok, #PID<0.137.0>}
"""
def new(opts \\ []) do
binarized = opts[:binarized] || false
assume_uniform = opts[:assume_uniform] || false
smoothing = opts[:smoothing] || 1
{:ok, pid} = Agent.start_link fn ->
%NaiveBayes{smoothing: smoothing, binarized: binarized, assume_uniform: assume_uniform}
end
{:ok, pid}
end
@doc """
Trains the naive bayes instance given a list of tokens and categories
Returns `:ok` or `:error`
## Examples
iex> {:ok, nbayes} = NaiveBayes.new
{:ok, #PID<0.137.0>}
iex> nbayes |> NaiveBayes.train( ["a", "b", "c"], "classA" )
:ok
"""
def train(pid, tokens, categories) do
categories = List.flatten [categories]
case Enum.count(tokens) > 0 && Enum.count(categories) > 0 do
true ->
Agent.get_and_update(pid, fn classifier ->
tokens = if classifier.binarized, do: Enum.uniq(tokens), else: tokens
classifier = Enum.reduce(categories, classifier, fn(category, classifier) ->
classifier = put_in(classifier.data, Data.increment_examples(classifier.data, category))
Enum.reduce(tokens, classifier, fn(token, classifier) ->
classifier = put_in(classifier.data, Data.add_token_to_category(classifier.data, category, token))
put_in(classifier.vocab, Vocab.seen_token(classifier.vocab, token))
end)
end)
{:ok, classifier}
end)
:ok
false ->
:error
end
end
@doc """
Returns a list of probabilities of classes given a list of tokens.
## Examples
iex> results = nbayes |> NaiveBayes.classify( ["a", "b", "c"] )
%{"HAM" => 0.4832633319857435, "SPAM" => 0.5167366680142564}
"""
def classify(pid, tokens) do
classifier = classifier_instance(pid)
tokens = if classifier.binarized, do: Enum.uniq(tokens), else: tokens
calculate_probabilities(classifier, tokens)
end
@doc """
Allows removal of low frequency words that increase processing time and may overfit
Returns `:ok`
## Examples
iex> nbayes |> NaiveBayes.purge_less_than(5)
:ok
"""
def purge_less_than(pid, x) do
Agent.get_and_update(pid, fn classifier ->
{classifier, remove_list} = Enum.reduce(classifier.vocab.tokens, {classifier, []}, fn ({token, _}, {classifier, remove_list}) ->
case Data.purge_less_than(classifier.data, token, x) do
false -> {classifier, remove_list}
data -> {put_in(classifier.data, data), remove_list ++ [token]}
end
end)
classifier = Enum.reduce(remove_list, classifier, fn (token, classifier) ->
put_in(classifier.vocab, Vocab.remove_token(classifier.vocab, token))
end)
{:ok, classifier}
end, 3600*24*30*1000) # don't timeout
:ok
end
@doc """
Increase smoothing constant to dampen the effect of the rare tokens
Returns `:ok`
## Examples
iex> nbayes |> NaiveBayes.set_smoothing(2)
:ok
"""
def set_smoothing(pid, x) do
Agent.get_and_update pid, fn classifier ->
{:ok, put_in(classifier.smoothing, x)}
end
:ok
end
@doc """
Set the assume_uniform constant.
Returns `:ok`
## Examples
iex> nbayes |> NaiveBayes.assume_uniform(true)
:ok
"""
def assume_uniform(pid, bool) do
Agent.get_and_update pid, fn classifier ->
{:ok, put_in(classifier.assume_uniform, bool)}
end
:ok
end
defp calculate_probabilities(classifier, tokens) do
v_size = Enum.count(classifier.vocab.tokens)
total_example_count = Data.total_examples(classifier.data)
prob_numerator = Enum.reduce(classifier.data.categories, %{}, fn ({cat_name, cat_data}, probs) ->
cat_prob = case classifier.assume_uniform do
true -> :math.log(1 / Enum.count(classifier.data.categories))
false -> :math.log(Data.example_count(cat_data) / total_example_count)
end
denominator = (cat_data[:total_tokens] + classifier.smoothing * v_size)
log_probs = Enum.reduce(tokens, 0, fn (token, log_probs) ->
numerator = (cat_data[:tokens][token] || 0) + classifier.smoothing
log_probs + :math.log( numerator / denominator )
end)
put_in(probs[cat_name], log_probs + cat_prob)
end)
normalize(prob_numerator)
end
defp normalize(prob_numerator) do
normalizer = Enum.reduce(prob_numerator, 0, fn ({_, numerator}, normalizer) ->
normalizer + numerator
end)
{intermed, renormalizer} = Enum.reduce(prob_numerator, {%{}, 0}, fn ({cat, numerator}, {intermed, renormalizer}) ->
r = normalizer / numerator
intermed = put_in(intermed, [cat], r)
renormalizer = renormalizer + r
{intermed, renormalizer}
end)
Enum.reduce(intermed, %{}, fn ({cat, value}, final_probs) ->
put_in(final_probs, [cat], value / renormalizer)
end)
end
defp classifier_instance(pid) do
Agent.get pid, fn c -> c end
end
end
|
lib/naive_bayes.ex
| 0.873012 | 0.672294 |
naive_bayes.ex
|
starcoder
|
defmodule Toolshed.Unix do
@moduledoc """
Helpers for when your fingers are too used to typing Unix
commands.
Helpers include:
* `cat/1` - print out a file
* `grep/2` - print out lines of a file that match a regular expression
* `tree/1` - print out a directory tree
* `uptime/0` - print the update of the Erlang VM
"""
@doc """
Reads and prints out the contents of a file
"""
@spec cat(Path.t()) :: :"do not show this result in output"
def cat(path) do
path
|> File.read!()
|> IO.write()
IEx.dont_display_result()
end
@doc """
Run a regular expression on a file and print the matching lines.
iex> grep ~r/video/, "/etc/mime.types"
"""
@spec grep(Regex.t(), Path.t()) :: :"do not show this result in output"
def grep(regex, path) do
File.stream!(path)
|> Stream.filter(&Regex.match?(regex, &1))
|> Stream.each(&IO.write/1)
|> Stream.run()
IEx.dont_display_result()
end
@doc """
Print out directories and files in tree form.
"""
@spec tree(Path.t()) :: :"do not show this result in output"
def tree(path \\ ".") do
IO.puts(path)
case file_info(path, path) do
{:directory, _} ->
do_tree("", path, files(path))
_ ->
:ok
end
IEx.dont_display_result()
end
@doc """
Print out the current uptime.
"""
@spec uptime() :: :"do not show this result in output"
def uptime() do
:c.uptime()
IEx.dont_display_result()
end
defp do_tree(_prefix, _dir, []), do: :ok
defp do_tree(prefix, dir, [{:directory, filename} | rest]) do
puts_tree_branch(prefix, filename, rest)
path = Path.join(dir, filename)
do_tree([prefix, tree_trunk(rest)], path, files(path))
do_tree(prefix, dir, rest)
end
defp do_tree(prefix, dir, [{_type, filename} | rest]) do
puts_tree_branch(prefix, filename, rest)
do_tree(prefix, dir, rest)
end
defp puts_tree_branch(prefix, filename, rest) do
IO.puts([prefix, tree_branch(rest), filename])
end
defp tree_branch([]), do: "βββ "
defp tree_branch(_), do: "βββ "
defp tree_trunk([]), do: " Β "
defp tree_trunk(_), do: "βΒ "
defp files(dir) do
File.ls!(dir)
|> Enum.map(&file_info(Path.join(dir, &1), &1))
end
defp file_info(path, name) do
stat = File.lstat!(path)
{stat.type, name}
end
end
|
lib/toolshed/unix.ex
| 0.737158 | 0.474692 |
unix.ex
|
starcoder
|
defmodule AWS.DirectConnect do
@moduledoc """
AWS Direct Connect links your internal network to an AWS Direct Connect
location over a standard Ethernet fiber-optic cable. One end of the cable
is connected to your router, the other to an AWS Direct Connect router.
With this connection in place, you can create virtual interfaces directly
to the AWS cloud (for example, to Amazon EC2 and Amazon S3) and to Amazon
VPC, bypassing Internet service providers in your network path. A
connection provides access to all AWS Regions except the China (Beijing)
and (China) Ningxia Regions. AWS resources in the China Regions can only be
accessed through locations associated with those Regions.
"""
@doc """
Accepts a proposal request to attach a virtual private gateway or transit
gateway to a Direct Connect gateway.
"""
def accept_direct_connect_gateway_association_proposal(client, input, options \\ []) do
request(client, "AcceptDirectConnectGatewayAssociationProposal", input, options)
end
@doc """
Deprecated. Use `AllocateHostedConnection` instead.
Creates a hosted connection on an interconnect.
Allocates a VLAN number and a specified amount of bandwidth for use by a
hosted connection on the specified interconnect.
<note> Intended for use by AWS Direct Connect Partners only.
</note>
"""
def allocate_connection_on_interconnect(client, input, options \\ []) do
request(client, "AllocateConnectionOnInterconnect", input, options)
end
@doc """
Creates a hosted connection on the specified interconnect or a link
aggregation group (LAG) of interconnects.
Allocates a VLAN number and a specified amount of capacity (bandwidth) for
use by a hosted connection on the specified interconnect or LAG of
interconnects. AWS polices the hosted connection for the specified capacity
and the AWS Direct Connect Partner must also police the hosted connection
for the specified capacity.
<note> Intended for use by AWS Direct Connect Partners only.
</note>
"""
def allocate_hosted_connection(client, input, options \\ []) do
request(client, "AllocateHostedConnection", input, options)
end
@doc """
Provisions a private virtual interface to be owned by the specified AWS
account.
Virtual interfaces created using this action must be confirmed by the owner
using `ConfirmPrivateVirtualInterface`. Until then, the virtual interface
is in the `Confirming` state and is not available to handle traffic.
"""
def allocate_private_virtual_interface(client, input, options \\ []) do
request(client, "AllocatePrivateVirtualInterface", input, options)
end
@doc """
Provisions a public virtual interface to be owned by the specified AWS
account.
The owner of a connection calls this function to provision a public virtual
interface to be owned by the specified AWS account.
Virtual interfaces created using this function must be confirmed by the
owner using `ConfirmPublicVirtualInterface`. Until this step has been
completed, the virtual interface is in the `confirming` state and is not
available to handle traffic.
When creating an IPv6 public virtual interface, omit the Amazon address and
customer address. IPv6 addresses are automatically assigned from the Amazon
pool of IPv6 addresses; you cannot specify custom IPv6 addresses.
"""
def allocate_public_virtual_interface(client, input, options \\ []) do
request(client, "AllocatePublicVirtualInterface", input, options)
end
@doc """
Provisions a transit virtual interface to be owned by the specified AWS
account. Use this type of interface to connect a transit gateway to your
Direct Connect gateway.
The owner of a connection provisions a transit virtual interface to be
owned by the specified AWS account.
After you create a transit virtual interface, it must be confirmed by the
owner using `ConfirmTransitVirtualInterface`. Until this step has been
completed, the transit virtual interface is in the `requested` state and is
not available to handle traffic.
"""
def allocate_transit_virtual_interface(client, input, options \\ []) do
request(client, "AllocateTransitVirtualInterface", input, options)
end
@doc """
Associates an existing connection with a link aggregation group (LAG). The
connection is interrupted and re-established as a member of the LAG
(connectivity to AWS is interrupted). The connection must be hosted on the
same AWS Direct Connect endpoint as the LAG, and its bandwidth must match
the bandwidth for the LAG. You can re-associate a connection that's
currently associated with a different LAG; however, if removing the
connection would cause the original LAG to fall below its setting for
minimum number of operational connections, the request fails.
Any virtual interfaces that are directly associated with the connection are
automatically re-associated with the LAG. If the connection was originally
associated with a different LAG, the virtual interfaces remain associated
with the original LAG.
For interconnects, any hosted connections are automatically re-associated
with the LAG. If the interconnect was originally associated with a
different LAG, the hosted connections remain associated with the original
LAG.
"""
def associate_connection_with_lag(client, input, options \\ []) do
request(client, "AssociateConnectionWithLag", input, options)
end
@doc """
Associates a hosted connection and its virtual interfaces with a link
aggregation group (LAG) or interconnect. If the target interconnect or LAG
has an existing hosted connection with a conflicting VLAN number or IP
address, the operation fails. This action temporarily interrupts the hosted
connection's connectivity to AWS as it is being migrated.
<note> Intended for use by AWS Direct Connect Partners only.
</note>
"""
def associate_hosted_connection(client, input, options \\ []) do
request(client, "AssociateHostedConnection", input, options)
end
@doc """
Associates a virtual interface with a specified link aggregation group
(LAG) or connection. Connectivity to AWS is temporarily interrupted as the
virtual interface is being migrated. If the target connection or LAG has an
associated virtual interface with a conflicting VLAN number or a
conflicting IP address, the operation fails.
Virtual interfaces associated with a hosted connection cannot be associated
with a LAG; hosted connections must be migrated along with their virtual
interfaces using `AssociateHostedConnection`.
To reassociate a virtual interface to a new connection or LAG, the
requester must own either the virtual interface itself or the connection to
which the virtual interface is currently associated. Additionally, the
requester must own the connection or LAG for the association.
"""
def associate_virtual_interface(client, input, options \\ []) do
request(client, "AssociateVirtualInterface", input, options)
end
@doc """
Confirms the creation of the specified hosted connection on an
interconnect.
Upon creation, the hosted connection is initially in the `Ordering` state,
and remains in this state until the owner confirms creation of the hosted
connection.
"""
def confirm_connection(client, input, options \\ []) do
request(client, "ConfirmConnection", input, options)
end
@doc """
Accepts ownership of a private virtual interface created by another AWS
account.
After the virtual interface owner makes this call, the virtual interface is
created and attached to the specified virtual private gateway or Direct
Connect gateway, and is made available to handle traffic.
"""
def confirm_private_virtual_interface(client, input, options \\ []) do
request(client, "ConfirmPrivateVirtualInterface", input, options)
end
@doc """
Accepts ownership of a public virtual interface created by another AWS
account.
After the virtual interface owner makes this call, the specified virtual
interface is created and made available to handle traffic.
"""
def confirm_public_virtual_interface(client, input, options \\ []) do
request(client, "ConfirmPublicVirtualInterface", input, options)
end
@doc """
Accepts ownership of a transit virtual interface created by another AWS
account.
After the owner of the transit virtual interface makes this call, the
specified transit virtual interface is created and made available to handle
traffic.
"""
def confirm_transit_virtual_interface(client, input, options \\ []) do
request(client, "ConfirmTransitVirtualInterface", input, options)
end
@doc """
Creates a BGP peer on the specified virtual interface.
You must create a BGP peer for the corresponding address family (IPv4/IPv6)
in order to access AWS resources that also use that address family.
If logical redundancy is not supported by the connection, interconnect, or
LAG, the BGP peer cannot be in the same address family as an existing BGP
peer on the virtual interface.
When creating a IPv6 BGP peer, omit the Amazon address and customer
address. IPv6 addresses are automatically assigned from the Amazon pool of
IPv6 addresses; you cannot specify custom IPv6 addresses.
For a public virtual interface, the Autonomous System Number (ASN) must be
private or already whitelisted for the virtual interface.
"""
def create_bgp_peer(client, input, options \\ []) do
request(client, "CreateBGPPeer", input, options)
end
@doc """
Creates a connection between a customer network and a specific AWS Direct
Connect location.
A connection links your internal network to an AWS Direct Connect location
over a standard Ethernet fiber-optic cable. One end of the cable is
connected to your router, the other to an AWS Direct Connect router.
To find the locations for your Region, use `DescribeLocations`.
You can automatically add the new connection to a link aggregation group
(LAG) by specifying a LAG ID in the request. This ensures that the new
connection is allocated on the same AWS Direct Connect endpoint that hosts
the specified LAG. If there are no available ports on the endpoint, the
request fails and no connection is created.
"""
def create_connection(client, input, options \\ []) do
request(client, "CreateConnection", input, options)
end
@doc """
Creates a Direct Connect gateway, which is an intermediate object that
enables you to connect a set of virtual interfaces and virtual private
gateways. A Direct Connect gateway is global and visible in any AWS Region
after it is created. The virtual interfaces and virtual private gateways
that are connected through a Direct Connect gateway can be in different AWS
Regions. This enables you to connect to a VPC in any Region, regardless of
the Region in which the virtual interfaces are located, and pass traffic
between them.
"""
def create_direct_connect_gateway(client, input, options \\ []) do
request(client, "CreateDirectConnectGateway", input, options)
end
@doc """
Creates an association between a Direct Connect gateway and a virtual
private gateway. The virtual private gateway must be attached to a VPC and
must not be associated with another Direct Connect gateway.
"""
def create_direct_connect_gateway_association(client, input, options \\ []) do
request(client, "CreateDirectConnectGatewayAssociation", input, options)
end
@doc """
Creates a proposal to associate the specified virtual private gateway or
transit gateway with the specified Direct Connect gateway.
You can associate a Direct Connect gateway and virtual private gateway or
transit gateway that is owned by any AWS account.
"""
def create_direct_connect_gateway_association_proposal(client, input, options \\ []) do
request(client, "CreateDirectConnectGatewayAssociationProposal", input, options)
end
@doc """
Creates an interconnect between an AWS Direct Connect Partner's network and
a specific AWS Direct Connect location.
An interconnect is a connection that is capable of hosting other
connections. The AWS Direct Connect partner can use an interconnect to
provide AWS Direct Connect hosted connections to customers through their
own network services. Like a standard connection, an interconnect links the
partner's network to an AWS Direct Connect location over a standard
Ethernet fiber-optic cable. One end is connected to the partner's router,
the other to an AWS Direct Connect router.
You can automatically add the new interconnect to a link aggregation group
(LAG) by specifying a LAG ID in the request. This ensures that the new
interconnect is allocated on the same AWS Direct Connect endpoint that
hosts the specified LAG. If there are no available ports on the endpoint,
the request fails and no interconnect is created.
For each end customer, the AWS Direct Connect Partner provisions a
connection on their interconnect by calling `AllocateHostedConnection`. The
end customer can then connect to AWS resources by creating a virtual
interface on their connection, using the VLAN assigned to them by the AWS
Direct Connect Partner.
<note> Intended for use by AWS Direct Connect Partners only.
</note>
"""
def create_interconnect(client, input, options \\ []) do
request(client, "CreateInterconnect", input, options)
end
@doc """
Creates a link aggregation group (LAG) with the specified number of bundled
physical dedicated connections between the customer network and a specific
AWS Direct Connect location. A LAG is a logical interface that uses the
Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces,
enabling you to treat them as a single interface.
All connections in a LAG must use the same bandwidth (either 1Gbps or
10Gbps) and must terminate at the same AWS Direct Connect endpoint.
You can have up to 10 dedicated connections per LAG. Regardless of this
limit, if you request more connections for the LAG than AWS Direct Connect
can allocate on a single endpoint, no LAG is created.
You can specify an existing physical dedicated connection or interconnect
to include in the LAG (which counts towards the total number of
connections). Doing so interrupts the current physical dedicated
connection, and re-establishes them as a member of the LAG. The LAG will be
created on the same AWS Direct Connect endpoint to which the dedicated
connection terminates. Any virtual interfaces associated with the dedicated
connection are automatically disassociated and re-associated with the LAG.
The connection ID does not change.
If the AWS account used to create a LAG is a registered AWS Direct Connect
Partner, the LAG is automatically enabled to host sub-connections. For a
LAG owned by a partner, any associated virtual interfaces cannot be
directly configured.
"""
def create_lag(client, input, options \\ []) do
request(client, "CreateLag", input, options)
end
@doc """
Creates a private virtual interface. A virtual interface is the VLAN that
transports AWS Direct Connect traffic. A private virtual interface can be
connected to either a Direct Connect gateway or a Virtual Private Gateway
(VGW). Connecting the private virtual interface to a Direct Connect gateway
enables the possibility for connecting to multiple VPCs, including VPCs in
different AWS Regions. Connecting the private virtual interface to a VGW
only provides access to a single VPC within the same Region.
Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an
update to the underlying physical connection if it wasn't updated to
support jumbo frames. Updating the connection disrupts network connectivity
for all virtual interfaces associated with the connection for up to 30
seconds. To check whether your connection supports jumbo frames, call
`DescribeConnections`. To check whether your virtual interface supports
jumbo frames, call `DescribeVirtualInterfaces`.
"""
def create_private_virtual_interface(client, input, options \\ []) do
request(client, "CreatePrivateVirtualInterface", input, options)
end
@doc """
Creates a public virtual interface. A virtual interface is the VLAN that
transports AWS Direct Connect traffic. A public virtual interface supports
sending traffic to public services of AWS such as Amazon S3.
When creating an IPv6 public virtual interface (`addressFamily` is `ipv6`),
leave the `customer` and `amazon` address fields blank to use auto-assigned
IPv6 space. Custom IPv6 addresses are not supported.
"""
def create_public_virtual_interface(client, input, options \\ []) do
request(client, "CreatePublicVirtualInterface", input, options)
end
@doc """
Creates a transit virtual interface. A transit virtual interface should be
used to access one or more transit gateways associated with Direct Connect
gateways. A transit virtual interface enables the connection of multiple
VPCs attached to a transit gateway to a Direct Connect gateway.
<important> If you associate your transit gateway with one or more Direct
Connect gateways, the Autonomous System Number (ASN) used by the transit
gateway and the Direct Connect gateway must be different. For example, if
you use the default ASN 64512 for both your the transit gateway and Direct
Connect gateway, the association request fails.
</important> Setting the MTU of a virtual interface to 8500 (jumbo frames)
can cause an update to the underlying physical connection if it wasn't
updated to support jumbo frames. Updating the connection disrupts network
connectivity for all virtual interfaces associated with the connection for
up to 30 seconds. To check whether your connection supports jumbo frames,
call `DescribeConnections`. To check whether your virtual interface
supports jumbo frames, call `DescribeVirtualInterfaces`.
"""
def create_transit_virtual_interface(client, input, options \\ []) do
request(client, "CreateTransitVirtualInterface", input, options)
end
@doc """
Deletes the specified BGP peer on the specified virtual interface with the
specified customer address and ASN.
You cannot delete the last BGP peer from a virtual interface.
"""
def delete_bgp_peer(client, input, options \\ []) do
request(client, "DeleteBGPPeer", input, options)
end
@doc """
Deletes the specified connection.
Deleting a connection only stops the AWS Direct Connect port hour and data
transfer charges. If you are partnering with any third parties to connect
with the AWS Direct Connect location, you must cancel your service with
them separately.
"""
def delete_connection(client, input, options \\ []) do
request(client, "DeleteConnection", input, options)
end
@doc """
Deletes the specified Direct Connect gateway. You must first delete all
virtual interfaces that are attached to the Direct Connect gateway and
disassociate all virtual private gateways associated with the Direct
Connect gateway.
"""
def delete_direct_connect_gateway(client, input, options \\ []) do
request(client, "DeleteDirectConnectGateway", input, options)
end
@doc """
Deletes the association between the specified Direct Connect gateway and
virtual private gateway.
We recommend that you specify the `associationID` to delete the
association. Alternatively, if you own virtual gateway and a Direct Connect
gateway association, you can specify the `virtualGatewayId` and
`directConnectGatewayId` to delete an association.
"""
def delete_direct_connect_gateway_association(client, input, options \\ []) do
request(client, "DeleteDirectConnectGatewayAssociation", input, options)
end
@doc """
Deletes the association proposal request between the specified Direct
Connect gateway and virtual private gateway or transit gateway.
"""
def delete_direct_connect_gateway_association_proposal(client, input, options \\ []) do
request(client, "DeleteDirectConnectGatewayAssociationProposal", input, options)
end
@doc """
Deletes the specified interconnect.
<note> Intended for use by AWS Direct Connect Partners only.
</note>
"""
def delete_interconnect(client, input, options \\ []) do
request(client, "DeleteInterconnect", input, options)
end
@doc """
Deletes the specified link aggregation group (LAG). You cannot delete a LAG
if it has active virtual interfaces or hosted connections.
"""
def delete_lag(client, input, options \\ []) do
request(client, "DeleteLag", input, options)
end
@doc """
Deletes a virtual interface.
"""
def delete_virtual_interface(client, input, options \\ []) do
request(client, "DeleteVirtualInterface", input, options)
end
@doc """
Deprecated. Use `DescribeLoa` instead.
Gets the LOA-CFA for a connection.
The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a
document that your APN partner or service provider uses when establishing
your cross connect to AWS at the colocation facility. For more information,
see [Requesting Cross Connects at AWS Direct Connect
Locations](https://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html)
in the *AWS Direct Connect User Guide*.
"""
def describe_connection_loa(client, input, options \\ []) do
request(client, "DescribeConnectionLoa", input, options)
end
@doc """
Displays the specified connection or all connections in this Region.
"""
def describe_connections(client, input, options \\ []) do
request(client, "DescribeConnections", input, options)
end
@doc """
Deprecated. Use `DescribeHostedConnections` instead.
Lists the connections that have been provisioned on the specified
interconnect.
<note> Intended for use by AWS Direct Connect Partners only.
</note>
"""
def describe_connections_on_interconnect(client, input, options \\ []) do
request(client, "DescribeConnectionsOnInterconnect", input, options)
end
@doc """
Describes one or more association proposals for connection between a
virtual private gateway or transit gateway and a Direct Connect gateway.
"""
def describe_direct_connect_gateway_association_proposals(client, input, options \\ []) do
request(client, "DescribeDirectConnectGatewayAssociationProposals", input, options)
end
@doc """
Lists the associations between your Direct Connect gateways and virtual
private gateways. You must specify a Direct Connect gateway, a virtual
private gateway, or both. If you specify a Direct Connect gateway, the
response contains all virtual private gateways associated with the Direct
Connect gateway. If you specify a virtual private gateway, the response
contains all Direct Connect gateways associated with the virtual private
gateway. If you specify both, the response contains the association between
the Direct Connect gateway and the virtual private gateway.
"""
def describe_direct_connect_gateway_associations(client, input, options \\ []) do
request(client, "DescribeDirectConnectGatewayAssociations", input, options)
end
@doc """
Lists the attachments between your Direct Connect gateways and virtual
interfaces. You must specify a Direct Connect gateway, a virtual interface,
or both. If you specify a Direct Connect gateway, the response contains all
virtual interfaces attached to the Direct Connect gateway. If you specify a
virtual interface, the response contains all Direct Connect gateways
attached to the virtual interface. If you specify both, the response
contains the attachment between the Direct Connect gateway and the virtual
interface.
"""
def describe_direct_connect_gateway_attachments(client, input, options \\ []) do
request(client, "DescribeDirectConnectGatewayAttachments", input, options)
end
@doc """
Lists all your Direct Connect gateways or only the specified Direct Connect
gateway. Deleted Direct Connect gateways are not returned.
"""
def describe_direct_connect_gateways(client, input, options \\ []) do
request(client, "DescribeDirectConnectGateways", input, options)
end
@doc """
Lists the hosted connections that have been provisioned on the specified
interconnect or link aggregation group (LAG).
<note> Intended for use by AWS Direct Connect Partners only.
</note>
"""
def describe_hosted_connections(client, input, options \\ []) do
request(client, "DescribeHostedConnections", input, options)
end
@doc """
Deprecated. Use `DescribeLoa` instead.
Gets the LOA-CFA for the specified interconnect.
The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a
document that is used when establishing your cross connect to AWS at the
colocation facility. For more information, see [Requesting Cross Connects
at AWS Direct Connect
Locations](https://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html)
in the *AWS Direct Connect User Guide*.
"""
def describe_interconnect_loa(client, input, options \\ []) do
request(client, "DescribeInterconnectLoa", input, options)
end
@doc """
Lists the interconnects owned by the AWS account or only the specified
interconnect.
"""
def describe_interconnects(client, input, options \\ []) do
request(client, "DescribeInterconnects", input, options)
end
@doc """
Describes all your link aggregation groups (LAG) or the specified LAG.
"""
def describe_lags(client, input, options \\ []) do
request(client, "DescribeLags", input, options)
end
@doc """
Gets the LOA-CFA for a connection, interconnect, or link aggregation group
(LAG).
The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a
document that is used when establishing your cross connect to AWS at the
colocation facility. For more information, see [Requesting Cross Connects
at AWS Direct Connect
Locations](https://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html)
in the *AWS Direct Connect User Guide*.
"""
def describe_loa(client, input, options \\ []) do
request(client, "DescribeLoa", input, options)
end
@doc """
Lists the AWS Direct Connect locations in the current AWS Region. These are
the locations that can be selected when calling `CreateConnection` or
`CreateInterconnect`.
"""
def describe_locations(client, input, options \\ []) do
request(client, "DescribeLocations", input, options)
end
@doc """
Describes the tags associated with the specified AWS Direct Connect
resources.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Lists the virtual private gateways owned by the AWS account.
You can create one or more AWS Direct Connect private virtual interfaces
linked to a virtual private gateway.
"""
def describe_virtual_gateways(client, input, options \\ []) do
request(client, "DescribeVirtualGateways", input, options)
end
@doc """
Displays all virtual interfaces for an AWS account. Virtual interfaces
deleted fewer than 15 minutes before you make the request are also
returned. If you specify a connection ID, only the virtual interfaces
associated with the connection are returned. If you specify a virtual
interface ID, then only a single virtual interface is returned.
A virtual interface (VLAN) transmits the traffic between the AWS Direct
Connect location and the customer network.
"""
def describe_virtual_interfaces(client, input, options \\ []) do
request(client, "DescribeVirtualInterfaces", input, options)
end
@doc """
Disassociates a connection from a link aggregation group (LAG). The
connection is interrupted and re-established as a standalone connection
(the connection is not deleted; to delete the connection, use the
`DeleteConnection` request). If the LAG has associated virtual interfaces
or hosted connections, they remain associated with the LAG. A disassociated
connection owned by an AWS Direct Connect Partner is automatically
converted to an interconnect.
If disassociating the connection would cause the LAG to fall below its
setting for minimum number of operational connections, the request fails,
except when it's the last member of the LAG. If all connections are
disassociated, the LAG continues to exist as an empty LAG with no physical
connections.
"""
def disassociate_connection_from_lag(client, input, options \\ []) do
request(client, "DisassociateConnectionFromLag", input, options)
end
@doc """
Lists the virtual interface failover test history.
"""
def list_virtual_interface_test_history(client, input, options \\ []) do
request(client, "ListVirtualInterfaceTestHistory", input, options)
end
@doc """
Starts the virtual interface failover test that verifies your configuration
meets your resiliency requirements by placing the BGP peering session in
the DOWN state. You can then send traffic to verify that there are no
outages.
You can run the test on public, private, transit, and hosted virtual
interfaces.
You can use
[ListVirtualInterfaceTestHistory](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_ListVirtualInterfaceTestHistory.html)
to view the virtual interface test history.
If you need to stop the test before the test interval completes, use
[StopBgpFailoverTest](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_StopBgpFailoverTest.html).
"""
def start_bgp_failover_test(client, input, options \\ []) do
request(client, "StartBgpFailoverTest", input, options)
end
@doc """
Stops the virtual interface failover test.
"""
def stop_bgp_failover_test(client, input, options \\ []) do
request(client, "StopBgpFailoverTest", input, options)
end
@doc """
Adds the specified tags to the specified AWS Direct Connect resource. Each
resource can have a maximum of 50 tags.
Each tag consists of a key and an optional value. If a tag with the same
key is already associated with the resource, this action updates its value.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes one or more tags from the specified AWS Direct Connect resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates the specified attributes of the Direct Connect gateway association.
Add or remove prefixes from the association.
"""
def update_direct_connect_gateway_association(client, input, options \\ []) do
request(client, "UpdateDirectConnectGatewayAssociation", input, options)
end
@doc """
Updates the attributes of the specified link aggregation group (LAG).
You can update the following attributes:
<ul> <li> The name of the LAG.
</li> <li> The value for the minimum number of connections that must be
operational for the LAG itself to be operational.
</li> </ul> When you create a LAG, the default value for the minimum number
of operational connections is zero (0). If you update this value and the
number of operational connections falls below the specified value, the LAG
automatically goes down to avoid over-utilization of the remaining
connections. Adjust this value with care, as it could force the LAG down if
it is set higher than the current number of operational connections.
"""
def update_lag(client, input, options \\ []) do
request(client, "UpdateLag", input, options)
end
@doc """
Updates the specified attributes of the specified virtual private
interface.
Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an
update to the underlying physical connection if it wasn't updated to
support jumbo frames. Updating the connection disrupts network connectivity
for all virtual interfaces associated with the connection for up to 30
seconds. To check whether your connection supports jumbo frames, call
`DescribeConnections`. To check whether your virtual q interface supports
jumbo frames, call `DescribeVirtualInterfaces`.
"""
def update_virtual_interface_attributes(client, input, options \\ []) do
request(client, "UpdateVirtualInterfaceAttributes", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "directconnect"}
host = build_host("directconnect", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "OvertureService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/direct_connect.ex
| 0.895091 | 0.53127 |
direct_connect.ex
|
starcoder
|
defmodule Vex.Validators.Format do
@moduledoc """
Ensure a value matches a regular expression.
## Options
* `:with`: The regular expression.
* `:message`: Optional. A custom error message. May be in EEx format
and use the fields described in "Custom Error Messages," below.
The regular expression can be provided in place of the keyword list if no other options
are needed.
## Examples
iex> Vex.Validators.Format.validate("foo", ~r/^f/)
:ok
iex> Vex.Validators.Format.validate("foo", ~r/o{3,}/)
{:error, "must have the correct format"}
iex> Vex.Validators.Format.validate("foo", [with: ~r/^f/])
:ok
iex> Vex.Validators.Format.validate("bar", [with: ~r/^f/, message: "must start with an f"])
{:error, "must start with an f"}
iex> Vex.Validators.Format.validate("", [with: ~r/^f/, allow_blank: true])
:ok
iex> Vex.Validators.Format.validate(nil, [with: ~r/^f/, allow_nil: true])
:ok
## Custom Error Messages
Custom error messages (in EEx format), provided as :message, can use the following values:
iex> Vex.Validators.Format.__validator__(:message_fields)
[value: "The bad value", pattern: "The regex that didn't match"]
An example:
iex> Vex.Validators.Format.validate("bar", [with: ~r/"^f"/, message: "<%= value %> doesn't start with an f"])
{:error, "bar doesn't start with an f"}
"""
use Vex.Validator
@message_fields [value: "The bad value", pattern: "The regex that didn't match"]
def validate(value, options) when is_list(options) do
unless_skipping(value, options) do
pattern = Keyword.get(options, :with)
result(
Regex.match?(pattern, to_string(value)),
message(options, "must have the correct format", value: value, pattern: pattern)
)
end
end
def validate(value, format) do
if Regex.regex?(format), do: validate(value, with: format)
end
defp result(true, _), do: :ok
defp result(false, message), do: {:error, message}
end
|
lib/vex/validators/format.ex
| 0.901762 | 0.591487 |
format.ex
|
starcoder
|
defmodule TailPipe do
@moduledoc """
An operator macro for piping into the final argument of a function.
Via the `~>/2` macro, overloads the operator as the "tail pipe". This lets
you pipe the output of preceding logic set into the final (i.e. tail) argument
of the next function.
### Examples
# Import the operator into your module
import TailPipe
# Calling `use` will work too if you want
use TailPipe
# Basic usage
iex> "hello world" ~> String.split()
["hello", "world"]
# We can chain too
"hello world"
~> String.split()
~> Enum.concat(["oh"])
|> Enum.join(" ")
# "oh hello world"
# More useful: dynamically creating a struct
defmodule Traveler do
defstruct [:id, :name, :location]
def new(kwl) do
kwl
|> Map.new()
|> Map.put(:location, "Unknown")
~> struct(__MODULE__)
end
end
iex> Traveler.new(id: 1, name: "Hal")
%Traveler{id: 1, location: "Unknown", name: "Hal"}
### Why?
Why not!
But really, this is mostly an experiment. Elixir provides both a set of reserved
operators that can be overloaded and a macro system to do so. In other functional
languages, something similar exists in the form of the "backwards pipe" operator.
The tail pipe is similar, but you call it in the left-to-right order as the pipe
operator.
Also, it does feel useful for the small handful of cases where the final
argument in a function is often the result of a chain (i.e. pipeline) of operations,
like in the struct example above.
"""
defmacro __using__(_) do
quote do
import TailPipe
end
end
defmacro lhs ~> rhs do
case rhs do
{_, _, args} ->
Macro.pipe(lhs, rhs, length(args))
_ ->
raise ArgumentError,
message:
"Cannot backwards pipe #{Macro.to_string(lhs)} into #{Macro.to_string(rhs)}. " <>
"Can only pipe into local calls foo(), remote calls Foo.bar() or " <>
"anonymous functions calls foo.()"
end
end
end
|
lib/back_pipe.ex
| 0.824002 | 0.413418 |
back_pipe.ex
|
starcoder
|
defmodule Nostrum.Voice do
@moduledoc """
Interface for playing audio through Discord's voice channels.
# Using Discord Voice Channels
To play sound in Discord with Nostrum, you'll need `ffmpeg` to be installed.
If you don't have the executable `ffmpeg` in the path, the absolute path may
be configured through config keys `:nostrum, :ffmpeg`.
A bot may be connected to at most one voice channel per guild. For this reason,
most of the functions in this module take a guild id, and the resulting action
will be performed in the given guild's voice channel that the bot is connected to.
The primary Discord gateway responsible for all text based communication relies on
one websocket connection per shard, where small bots typically only have one shard.
The Discord voice gateways work by establishing a websocket connection per guild/channel.
After some handshaking on this connection, audio data can be sent over UDP/RTP. Behind
the scenes the voice websocket connections are implemented nearly the same way the main
shard websocket connections are, and require no developer intervention.
"""
alias Nostrum.Api
alias Nostrum.Struct.{Channel, Guild, VoiceState}
alias Nostrum.Voice.Audio
alias Nostrum.Voice.Session
alias Nostrum.Voice.Supervisor, as: VoiceSupervisor
alias Porcelain.Process, as: Proc
require Logger
use GenServer
@doc false
def start_link(_args) do
GenServer.start_link(__MODULE__, %{}, name: VoiceStateMap)
end
@doc false
def init(args) do
{:ok, args}
end
@doc false
def update_voice(guild_id, args \\ []) do
GenServer.call(VoiceStateMap, {:update, guild_id, args})
end
@doc false
def get_voice(guild_id) do
GenServer.call(VoiceStateMap, {:get, guild_id})
end
@doc false
def remove_voice(guild_id) do
GenServer.call(VoiceStateMap, {:remove, guild_id})
end
@doc """
Joins or moves the bot to a voice channel.
This function is equivalent to `Nostrum.Api.update_voice_state/4`.
"""
@spec join_channel(Guild.id(), Channel.id(), boolean, boolean) :: no_return | :ok
def join_channel(guild_id, channel_id, self_mute \\ false, self_deaf \\ false) do
Api.update_voice_state(guild_id, channel_id, self_mute, self_deaf)
end
@doc """
Leaves the voice channel of the given guild id.
This function is equivalent to calling `Nostrum.Api.update_voice_state(guild_id, nil)`.
"""
@spec leave_channel(Guild.id()) :: no_return | :ok
def leave_channel(guild_id) do
Api.update_voice_state(guild_id, nil)
end
@doc """
Plays sound in the voice channel the bot is in.
The bot must be connected to a voice channel in the guild specified.
## Parameters
- `guild_id` - ID of guild whose voice channel the sound will be played in.
- `input` - Audio to be played. Type of `input` determined by `type` parameter.
- `type` - Type of input (defaults to `:url`).
- `:url` Input will be [any url that `ffmpeg` can read](https://www.ffmpeg.org/ffmpeg-protocols.html).
- `:pipe` Input will be data that is piped to stdin of `ffmpeg`.
- `:ytdl` Input will be url for `youtube-dl`, which gets automatically piped to `ffmpeg`.
Returns `{:error, reason}` if unable to play or a sound is playing, else `:ok`.
## Examples
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> Nostrum.Voice.play(123456789, "~/music/FavoriteSong.mp3", :url)
```
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> raw_data = File.read!("~/music/sound_effect.wav")
iex> Nostrum.Voice.play(123456789, raw_data, :pipe)
```
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> Nostrum.Voice.play(123456789, "https://www.youtube.com/watch?v=b4RJ-QGOtw4", :ytdl)
```
"""
@spec play(Guild.id(), String.t() | binary() | iodata(), :url | :pipe | :ytdl) ::
:ok | {:error, String.t()}
def play(guild_id, input, type \\ :url) do
voice = get_voice(guild_id)
cond do
not VoiceState.ready_for_rtp?(voice) ->
{:error, "Must be connected to voice channel to play audio."}
VoiceState.playing?(voice) ->
{:error, "Audio already playing in voice channel."}
true ->
unless is_nil(voice.ffmpeg_proc), do: Proc.stop(voice.ffmpeg_proc)
set_speaking(voice, true)
ffmpeg_proc = Audio.spawn_ffmpeg(input, type)
{:ok, encoder_pid} = LibOpus.start(ffmpeg_proc.out)
voice = update_voice(guild_id, encoder_pid: encoder_pid, ffmpeg_proc: ffmpeg_proc)
{:ok, pid} = Task.start(fn -> Audio.init_player(voice) end)
update_voice(guild_id, player_pid: pid)
:ok
end
end
@doc """
Stops the current sound being played in a voice channel.
The bot must be connected to a voice channel in the guild specified.
## Parameters
- `guild_id` - ID of guild whose voice channel the sound will be stopped in.
Returns `{:error, reason}` if unable to stop or no sound is playing, else `:ok`.
If a sound has finished playing, this function does not need to be called to start
playing another sound.
## Examples
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> Nostrum.Voice.play(123456789, "http://brandthill.com/files/weird_dubstep_noises.mp3")
iex> Nostrum.Voice.stop(123456789)
```
"""
@spec stop(Guild.id()) :: :ok | {:error, String.t()}
def stop(guild_id) do
voice = get_voice(guild_id)
cond do
not VoiceState.ready_for_rtp?(voice) ->
{:error, "Must be connected to voice channel to stop audio."}
not VoiceState.playing?(voice) ->
{:error, "Audio must be playing to stop."}
true ->
set_speaking(voice, false)
Process.exit(voice.player_pid, :stop)
Proc.stop(voice.ffmpeg_proc)
:ok
end
end
@doc """
Pauses the current sound being played in a voice channel.
The bot must be connected to a voice channel in the guild specified.
## Parameters
- `guild_id` - ID of guild whose voice channel the sound will be paused in.
Returns `{:error, reason}` if unable to pause or no sound is playing, else `:ok`.
This function is similar to `stop/1`, except that the sound may be
resumed after being paused.
## Examples
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> Nostrum.Voice.play(123456789, "~/files/twelve_hour_loop_of_waterfall_sounds.mp3")
iex> Nostrum.Voice.pause(123456789)
```
"""
@spec pause(Guild.id()) :: :ok | {:error, String.t()}
def pause(guild_id) do
voice = get_voice(guild_id)
cond do
not VoiceState.ready_for_rtp?(voice) ->
{:error, "Must be connected to voice channel to pause audio."}
not VoiceState.playing?(voice) ->
{:error, "Audio must be playing to pause."}
true ->
set_speaking(voice, false)
Process.exit(voice.player_pid, :pause)
:ok
end
end
@doc """
Resumes playing the current paused sound in a voice channel.
The bot must be connected to a voice channel in the guild specified.
## Parameters
- `guild_id` - ID of guild whose voice channel the sound will be resumed in.
Returns `{:error, reason}` if unable to resume or no sound has been paused, otherwise returns `:ok`.
This function is used to resume a sound that had previously been paused.
## Examples
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> Nostrum.Voice.play(123456789, "~/stuff/Toto - Africa (Bass Boosted)")
iex> Nostrum.Voice.pause(123456789)
iex> Nostrum.Voice.resume(123456789)
```
"""
@spec resume(Guild.id()) :: :ok | {:error, String.t()}
def resume(guild_id) do
voice = get_voice(guild_id)
cond do
not VoiceState.ready_for_rtp?(voice) ->
{:error, "Must be connected to voice channel to resume audio."}
VoiceState.playing?(voice) ->
{:error, "Audio already playing in voice channel."}
is_nil(voice.ffmpeg_proc) ->
{:error, "Audio must be paused to resume."}
true ->
set_speaking(voice, true)
{:ok, pid} = Task.start(fn -> Audio.player_loop(voice) end)
update_voice(guild_id, player_pid: pid)
:ok
end
end
@doc """
Checks if the bot is playing sound in a voice channel.
## Parameters
- `guild_id` - ID of guild to check if audio being played.
Returns `true` if the bot is currently being played in a voice channel, otherwise `false`.
## Examples
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> Nostrum.Voice.play(123456789, "https://a-real-site.biz/RickRoll.m4a")
iex> Nostrum.Voice.playing?(123456789)
true
iex> Nostrum.Voice.pause(123456789)
iex> Nostrum.Voice.playing?(123456789)
false
```
"""
@spec playing?(Guild.id()) :: boolean
def playing?(guild_id) do
get_voice(guild_id) |> VoiceState.playing?()
end
@doc """
Checks if the connection is up and ready to play audio.
## Parameters
- `guild_id` - ID of guild to check if voice connection is up.
Returns `true` if the bot is connected to a voice channel, otherwise `false`.
This function does not check if audio is already playing. For that, use `playing?/1`.
## Examples
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> Nostrum.Voice.ready?(123456789)
true
iex> Nostrum.Voice.leave_channel(123456789)
iex> Nostrum.Voice.ready?(123456789)
false
```
"""
@spec ready?(Guild.id()) :: boolean
def ready?(guild_id) do
get_voice(guild_id) |> VoiceState.ready_for_rtp?()
end
@doc """
Gets the id of the voice channel that the bot is connected to.
## Parameters
- `guild_id` - ID of guild that the resultant channel belongs to.
Returns the `channel_id` for the channel the bot is connected to, otherwise `nil`.
## Examples
```Elixir
iex> Nostrum.Voice.join_channel(123456789, 420691337)
iex> Nostrum.Voice.get_channel(123456789)
420691337
iex> Nostrum.Voice.leave_channel(123456789)
iex> Nostrum.Voice.get_channel(123456789)
nil
```
"""
@spec get_channel_id(Guild.id()) :: Channel.id()
def get_channel_id(guild_id) do
voice = get_voice(guild_id)
if voice, do: voice.channel_id, else: nil
end
@doc false
def set_speaking(%VoiceState{} = voice, speaking) do
Session.set_speaking(voice.session_pid, speaking)
end
@doc false
def set_speaking(guild_id, speaking) do
get_voice(guild_id) |> set_speaking(speaking)
end
@doc false
def handle_call({:update, guild_id, args}, _from, state) do
voice =
state
|> Map.get(guild_id, VoiceState.new(guild_id: guild_id))
|> Map.merge(Enum.into(args, %{}))
state = Map.put(state, guild_id, voice)
start_if_ready(voice)
{:reply, voice, state}
end
@doc false
def handle_call({:get, guild_id}, _from, state) do
{:reply, Map.get(state, guild_id), state}
end
@doc false
def handle_call({:remove, guild_id}, _from, state) do
state[guild_id] |> VoiceState.cleanup()
VoiceSupervisor.end_session(guild_id)
{:reply, true, Map.delete(state, guild_id)}
end
@doc false
def start_if_ready(%VoiceState{} = voice) do
if VoiceState.ready_for_ws?(voice) do
VoiceSupervisor.create_session(voice)
end
end
end
|
lib/nostrum/voice.ex
| 0.901972 | 0.662182 |
voice.ex
|
starcoder
|
defmodule Mix.Tasks.Profile.Cprof do
use Mix.Task
@shortdoc "Profiles the given file or expression with cprof"
@moduledoc """
Profiles the given file or expression using Erlang's `cprof` tool.
`cprof` can be useful when you want to discover the bottlenecks related
to function calls.
Before running the code, it invokes the `app.start` task which compiles
and loads your project. Then the target expression is profiled, together
with all matching function calls, by setting breakpoints containing
counters. These can only be set on BEAM code so BIFs cannot be call
count traced.
To profile the code, you can use syntax similar to the `mix run` task:
mix profile.cprof -e Hello.world
mix profile.cprof -e "[1, 2, 3] |> Enum.reverse |> Enum.map(&Integer.to_string/1)"
mix profile.cprof my_script.exs arg1 arg2 arg3
## Command line options
* `--matching` - only profile calls matching the given `Module.function/arity` pattern
* `--limit` - filters out any results with a call count less than the limit
* `--module` - filters out any results not pertaining to the given module
* `--config`, `-c` - loads the given configuration file
* `--eval`, `-e` - evaluate the given code
* `--require`, `-r` - requires pattern before running the command
* `--parallel`, `-p` - makes all requires parallel
* `--no-compile` - does not compile even if files require compilation
* `--no-deps-check` - does not check dependencies
* `--no-archives-check` - does not check archives
* `--no-halt` - does not halt the system after running the command
* `--no-start` - does not start applications after compilation
* `--no-elixir-version-check` - does not check the Elixir version from mix.exs
## Profile output
Example output:
CNT
Total 15
Enum 6 <--
Enum."-map/2-lists^map/1-0-"/2 4
Enum.reverse/1 1
Enum.map/2 1
:elixir_compiler 4 <--
anonymous fn/1 in :elixir_compiler.__FILE__/1 3
anonymous fn/0 in :elixir_compiler.__FILE__/1 1
String.Chars.Integer 3 <--
String.Chars.Integer.to_string/1 3
:erlang 2 <--
:erlang.trace_pattern/3 2
Profile done over 20229 matching functions
The default output contains data gathered from all matching functions. The left
column structures each module and its total call count trace is presented on the right.
Each module has its count discriminated by function below. The `<--` symbol is meant to
help visualize where a new module call count begins.
The first row (Total) is the sum of all function calls. In the last row the number of
matching functions that were considered for profiling is presented.
When `--matching` option is specified, call count tracing will be started only for
the functions matching the given pattern:
String.Chars.Integer 3 <--
String.Chars.Integer.to_string/1 3
Profile done over 1 matching functions
The pattern can be a module name, such as `String` to count all calls to that module,
a call without arity, such as `String.split`, to count all calls to that function
regardless of arity, or a call with arity, such as `String.split/2`, to count all
calls to that exact module, function and arity.
## Caveats
You should be aware the profiler is stopped as soon as the code has finished running. This
may need special attention, when: running asynchronous code as function calls which were
called before the profiler stopped will not be counted; running synchronous code as long
running computations and a profiler without a proper MFA trace pattern or filter may
lead to a result set which is difficult to comprehend.
Other caveats are the impossibility to call count trace BIFs, since breakpoints can
only be set on BEAM code; functions calls performed by `:cprof` are not traced; the
maximum size of a call counter is equal to the host machine's word size
(for example, 2147483647 in a 32-bit host).
"""
@switches [parallel: :boolean, require: :keep, eval: :keep, config: :keep, matching: :string,
halt: :boolean, compile: :boolean, deps_check: :boolean, limit: :integer,
module: :string, start: :boolean, archives_check: :boolean, warmup: :boolean,
elixir_version_check: :boolean, parallel_require: :keep]
def run(args) do
{opts, head} = OptionParser.parse_head!(args,
aliases: [r: :require, p: :parallel, e: :eval, c: :config],
strict: @switches)
Mix.Tasks.Run.run(["--no-mix-exs" | args], opts, head,
&profile_code(&1, opts),
&profile_code(File.read!(&1), opts))
end
defp profile_code(code_string, opts) do
content =
quote do
unquote(__MODULE__).profile(fn ->
unquote(Code.string_to_quoted!(code_string))
end, unquote(opts))
end
# Use compile_quoted since it leaves less noise than eval_quoted
Code.compile_quoted(content)
end
@doc false
def profile(fun, opts) do
fun
|> profile_and_analyse(opts)
|> print_output
:cprof.stop()
end
defp profile_and_analyse(fun, opts) do
if Keyword.get(opts, :warmup, true) do
IO.puts "Warmup..."
fun.()
end
num_matched_functions = case Keyword.get(opts, :matching) do
nil ->
:cprof.start()
matching ->
case Mix.Utils.parse_mfa(matching) do
{:ok, args} -> apply(:cprof, :start, args)
:error -> Mix.raise "Invalid matching pattern: #{matching}"
end
end
apply(fun, [])
:cprof.pause()
limit = Keyword.get(opts, :limit)
module = Keyword.get(opts, :module)
analysis_result = case {limit, module} do
{nil, nil} ->
:cprof.analyse()
{limit, nil} ->
:cprof.analyse(limit)
{limit, module} ->
module = string_to_existing_module(module)
if limit do
:cprof.analyse(module, limit)
else
:cprof.analyse(module)
end
end
{num_matched_functions, analysis_result}
end
defp string_to_existing_module(":" <> module), do: String.to_existing_atom(module)
defp string_to_existing_module(module), do: Module.concat([module])
defp print_output({num_matched_functions, {all_call_count, mod_analysis_list}}) do
print_total_row(all_call_count)
Enum.each(mod_analysis_list, &print_analysis_result/1)
print_number_of_matched_functions(num_matched_functions)
end
defp print_output({num_matched_functions, {_mod, _call_count, _mod_fun_list} = mod_analysis}) do
print_analysis_result(mod_analysis)
print_number_of_matched_functions(num_matched_functions)
end
defp print_number_of_matched_functions(num_matched_functions) do
IO.puts "Profile done over #{num_matched_functions} matching functions"
end
defp print_total_row(all_call_count) do
IO.puts ""
print_row(["s", "s", "s"], ["", "CNT", ""])
print_row(["s", "B", "s"], ["Total", all_call_count, ""])
end
defp print_analysis_result({module, total_module_count, module_fun_list}) do
module
|> Atom.to_string
|> module_name_for_printing()
|> print_module(total_module_count, "", "<--")
Enum.each(module_fun_list, &print_function(&1, " "))
end
defp print_module(module, count, prefix, suffix) do
print_row(["s", "B", "s"], ["#{prefix}#{module}", count, suffix])
end
defp module_name_for_printing("Elixir." <> rest = _module_name), do: rest
defp module_name_for_printing(module_name), do: ":" <> module_name
defp print_function({fun, count}, prefix, suffix \\ "") do
print_row(["s", "B", "s"], ["#{prefix}#{function_text(fun)}", count, suffix])
end
defp function_text({module, function, arity}) do
Exception.format_mfa(module, function, arity)
end
defp function_text(other), do: inspect(other)
@columns [-60, 12, 5]
defp print_row(formats, data) do
Stream.zip(@columns, formats)
|> Stream.map(fn({width, format}) -> "~#{width}#{format}" end)
|> Enum.join
|> :io.format(data)
IO.puts ""
end
end
|
lib/mix/lib/mix/tasks/profile.cprof.ex
| 0.887771 | 0.53443 |
profile.cprof.ex
|
starcoder
|
defmodule SafeURL.DNSResolver do
@moduledoc """
In some cases you might want to use a custom strategy
for DNS resolution. You can do so by passing your own
implementation of `SafeURL.DNSResolver` in the global
or local config.
By default, the `DNS` package is used for resolution,
but you can replace it with a wrapper that uses
different configuration or a completely different
implementation altogether.
## Use-cases
* Using a specific DNS server
* Avoiding network access in specific environments
* Mocking DNS resolution in tests
## Usage
Start by creating a module that implements the
`DNSResolver` behaviour. Currently, this means adding
only one `c:resolve/1` callback that takes a host and
returns a list of resolved IPs.
As an example, suppose you wanted to use
[Cloudflare's DNS](https://1.1.1.1/dns/), you can do
that by wrapping `DNS` with your own settings in a new
module:
defmodule CloudflareDNS do
@behaviour SafeURL.DNSResolver
@impl true
def resolve(domain) do
DNS.resolve(domain, :a, {"1.1.1.1", 53}, :udp)
end
end
To use it, simply pass it in the global config:
config :safeurl, dns_module: CloudflareDNS
You can also directly set the `:dns_module` in method options:
SafeURL.allowed?("https://example.com", dns_module: CloudflareDNS)
## Testing
This is especially useful in tests where you want to
ensure your HTTP Client wrapper with `SafeURL` is
working as expected.
You can override the `:dns_module` config to ensure
a specific IP is resolved for a domain or no network
requests are made:
defmodule TestDNSResolver do
@behaviour SafeURL.DNSResolver
@impl true
def resolve("google.com"), do: {:ok, [{192, 168, 1, 10}]}
def resolve("github.com"), do: {:ok, [{192, 168, 1, 20}]}
def resolve(_domain), do: {:ok, [{192, 168, 1, 99}]}
end
"""
@type resolution :: :inet.ip() | [:inet.ip()]
@callback resolve(host :: binary()) :: {:ok, resolution()} | {:error, atom()}
end
|
lib/safeurl/dns_resolver.ex
| 0.846101 | 0.512693 |
dns_resolver.ex
|
starcoder
|
defmodule Hypex.Bitstring do
@moduledoc """
This module provides a Hypex register implementation using a Bitstring under
the hood.
Using this implementation provides several guarantees about memory, in that the
memory cost stays constant and falls well below that of other registers.
Unfortunately this efficiency comes at the cost of some throughput, although
this module should be easily sufficient for all but the most write-intensive
use cases.
"""
# define behaviour
@behaviour Hypex.Register
@doc """
Creates a new bitstring with a size of `(2 ^ width) * width` with all bits initialized to 0.
"""
@spec init(number) :: bitstring
def init(width) do
m = :erlang.bsl(1, width) * width
<< 0 :: size(m) >>
end
@doc """
Takes a list of bits and converts them to a bitstring.
We can just delegate to the native Erlang implementation as it provides the
functionality we need built in.
"""
@spec from_list([ bit :: number ]) :: bitstring
defdelegate from_list(bit_list), to: :erlang, as: :list_to_bitstring
@doc """
Takes a bitstring and converts it to a list of bits.
We can just delegate to the native Erlang implementation as it provides the
functionality we need built in.
"""
@spec to_list(bitstring) :: [ bit :: number ]
defdelegate to_list(registers), to: :erlang, as: :bitstring_to_list
@doc """
Returns a bit from the list of registers.
"""
@spec get_value(bitstring, idx :: number, width :: number) :: result :: number
def get_value(registers, idx, width) do
head_length = idx * width
<< _head :: bitstring-size(head_length), value :: size(width), _tail :: bitstring >> = registers
value
end
@doc """
Sets a bit inside the list of registers.
"""
@spec set_value(bitstring, idx :: number, width :: number, value :: number) :: bitstring
def set_value(registers, idx, width, value) do
head_length = idx * width
<< head :: bitstring-size(head_length), _former :: size(width), tail :: bitstring >> = registers
<< head :: bitstring, value :: size(width), tail :: bitstring >>
end
@doc """
Converts a list of registers into a provided accumulator.
Internally we pass everything to the binary reduction function in the utils
module, as there's already a native implementation for accumulation.
"""
@spec reduce(bitstring, width :: number, accumulator :: any, (number, any -> any)) :: accumulator :: any
defdelegate reduce(registers, width, acc, fun), to: Hypex.Util, as: :binary_reduce
end
|
lib/hypex/bitstring.ex
| 0.827201 | 0.772745 |
bitstring.ex
|
starcoder
|
defmodule AWS.DMS do
@moduledoc """
AWS Database Migration Service
AWS Database Migration Service (AWS DMS) can migrate your data to and from
the most widely used commercial and open-source databases such as Oracle,
PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora,
MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports
homogeneous migrations such as Oracle to Oracle, as well as heterogeneous
migrations between different database platforms, such as Oracle to MySQL or
SQL Server to PostgreSQL.
"""
@doc """
Adds metadata tags to a DMS resource, including replication instance,
endpoint, security group, and migration task. These tags can also be used
with cost allocation reporting to track cost associated with DMS resources,
or used in a Condition statement in an IAM policy for DMS.
"""
def add_tags_to_resource(client, input, options \\ []) do
request(client, "AddTagsToResource", input, options)
end
@doc """
Creates an endpoint using the provided settings.
"""
def create_endpoint(client, input, options \\ []) do
request(client, "CreateEndpoint", input, options)
end
@doc """
Creates the replication instance using the specified parameters.
"""
def create_replication_instance(client, input, options \\ []) do
request(client, "CreateReplicationInstance", input, options)
end
@doc """
Creates a replication subnet group given a list of the subnet IDs in a VPC.
"""
def create_replication_subnet_group(client, input, options \\ []) do
request(client, "CreateReplicationSubnetGroup", input, options)
end
@doc """
Creates a replication task using the specified parameters.
"""
def create_replication_task(client, input, options \\ []) do
request(client, "CreateReplicationTask", input, options)
end
@doc """
Deletes the specified certificate.
"""
def delete_certificate(client, input, options \\ []) do
request(client, "DeleteCertificate", input, options)
end
@doc """
Deletes the specified endpoint.
<note> All tasks associated with the endpoint must be deleted before you
can delete the endpoint.
</note> <p/>
"""
def delete_endpoint(client, input, options \\ []) do
request(client, "DeleteEndpoint", input, options)
end
@doc """
Deletes the specified replication instance.
<note> You must delete any migration tasks that are associated with the
replication instance before you can delete it.
</note> <p/>
"""
def delete_replication_instance(client, input, options \\ []) do
request(client, "DeleteReplicationInstance", input, options)
end
@doc """
Deletes a subnet group.
"""
def delete_replication_subnet_group(client, input, options \\ []) do
request(client, "DeleteReplicationSubnetGroup", input, options)
end
@doc """
Deletes the specified replication task.
"""
def delete_replication_task(client, input, options \\ []) do
request(client, "DeleteReplicationTask", input, options)
end
@doc """
Lists all of the AWS DMS attributes for a customer account. The attributes
include AWS DMS quotas for the account, such as the number of replication
instances allowed. The description for a quota includes the quota name,
current usage toward that quota, and the quota's maximum value.
This command does not take any parameters.
"""
def describe_account_attributes(client, input, options \\ []) do
request(client, "DescribeAccountAttributes", input, options)
end
@doc """
Provides a description of the certificate.
"""
def describe_certificates(client, input, options \\ []) do
request(client, "DescribeCertificates", input, options)
end
@doc """
Describes the status of the connections that have been made between the
replication instance and an endpoint. Connections are created when you test
an endpoint.
"""
def describe_connections(client, input, options \\ []) do
request(client, "DescribeConnections", input, options)
end
@doc """
Returns information about the type of endpoints available.
"""
def describe_endpoint_types(client, input, options \\ []) do
request(client, "DescribeEndpointTypes", input, options)
end
@doc """
Returns information about the endpoints for your account in the current
region.
"""
def describe_endpoints(client, input, options \\ []) do
request(client, "DescribeEndpoints", input, options)
end
@doc """
Returns information about the replication instance types that can be
created in the specified region.
"""
def describe_orderable_replication_instances(client, input, options \\ []) do
request(client, "DescribeOrderableReplicationInstances", input, options)
end
@doc """
Returns the status of the RefreshSchemas operation.
"""
def describe_refresh_schemas_status(client, input, options \\ []) do
request(client, "DescribeRefreshSchemasStatus", input, options)
end
@doc """
Returns information about replication instances for your account in the
current region.
"""
def describe_replication_instances(client, input, options \\ []) do
request(client, "DescribeReplicationInstances", input, options)
end
@doc """
Returns information about the replication subnet groups.
"""
def describe_replication_subnet_groups(client, input, options \\ []) do
request(client, "DescribeReplicationSubnetGroups", input, options)
end
@doc """
Returns information about replication tasks for your account in the current
region.
"""
def describe_replication_tasks(client, input, options \\ []) do
request(client, "DescribeReplicationTasks", input, options)
end
@doc """
Returns information about the schema for the specified endpoint.
<p/>
"""
def describe_schemas(client, input, options \\ []) do
request(client, "DescribeSchemas", input, options)
end
@doc """
Returns table statistics on the database migration task, including table
name, rows inserted, rows updated, and rows deleted.
"""
def describe_table_statistics(client, input, options \\ []) do
request(client, "DescribeTableStatistics", input, options)
end
@doc """
Uploads the specified certificate.
"""
def import_certificate(client, input, options \\ []) do
request(client, "ImportCertificate", input, options)
end
@doc """
Lists all tags for an AWS DMS resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Modifies the specified endpoint.
"""
def modify_endpoint(client, input, options \\ []) do
request(client, "ModifyEndpoint", input, options)
end
@doc """
Modifies the replication instance to apply new settings. You can change one
or more parameters by specifying these parameters and the new values in the
request.
Some settings are applied during the maintenance window.
<p/>
"""
def modify_replication_instance(client, input, options \\ []) do
request(client, "ModifyReplicationInstance", input, options)
end
@doc """
Modifies the settings for the specified replication subnet group.
"""
def modify_replication_subnet_group(client, input, options \\ []) do
request(client, "ModifyReplicationSubnetGroup", input, options)
end
@doc """
Modifies the specified replication task.
You can't modify the task endpoints. The task must be stopped before you
can modify it.
"""
def modify_replication_task(client, input, options \\ []) do
request(client, "ModifyReplicationTask", input, options)
end
@doc """
Populates the schema for the specified endpoint. This is an asynchronous
operation and can take several minutes. You can check the status of this
operation by calling the DescribeRefreshSchemasStatus operation.
"""
def refresh_schemas(client, input, options \\ []) do
request(client, "RefreshSchemas", input, options)
end
@doc """
Removes metadata tags from a DMS resource.
"""
def remove_tags_from_resource(client, input, options \\ []) do
request(client, "RemoveTagsFromResource", input, options)
end
@doc """
Starts the replication task.
"""
def start_replication_task(client, input, options \\ []) do
request(client, "StartReplicationTask", input, options)
end
@doc """
Stops the replication task.
<p/>
"""
def stop_replication_task(client, input, options \\ []) do
request(client, "StopReplicationTask", input, options)
end
@doc """
Tests the connection between the replication instance and the endpoint.
"""
def test_connection(client, input, options \\ []) do
request(client, "TestConnection", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "dms"}
host = get_host("dms", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AmazonDMSv20160101.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/dms.ex
| 0.828176 | 0.50592 |
dms.ex
|
starcoder
|
defmodule ExRabbitMQ.RPC.Client do
@moduledoc """
*A behavior module for implementing a RabbitMQ RPC client with a `GenServer`.*
It uses the `ExRabbitMQ.Consumer`, which in-turn uses the `AMQP` library to configure and consume messages from a
queue that are actually the response messages of the requests. This queue is set as the `reply_to` header in the AMQP
message so that the RPC server knows where to reply with a response message. Additionally, all request messages are
"tagged" with a `correlation_id` which the RPC server also includes in the response message, so that the RPC client
can be track and relate it.
A typical implementation of this behavior is to call the `c:setup_client/3` on `GenServer.init/1` and then call the
`c:request/4` for sending request messages. When the response message is received the `c:handle_response/3` will be
invoked.
Make sure that before starting a `ExRabbitMQ.RPC.Client` to already run in your supervision tree the
`ExRabbitMQ.Connection.Supervisor`.
**Example**
```elixir
defmodule MyClient do
@moduledoc false
use GenServer
use ExRabbitMQ.RPC.Client
def start_link(args) do
GenServer.start_link(__MODULE__, args, [])
end
def init(_) do
{:ok, state} = setup_client(:connection, %{})
{:ok, state}
end
def request_something(client, queue, value) do
GenServer.cast(client, {:request_something, queue, value})
end
def handle_cast({:request_something, queue, value}, state) do
payload = Poison.encode!(value)
{:ok, _correlation_id} = request(payload, "", queue)
{:noreply, state}
end
def handle_response({:ok, payload}, correlation_id, state) do
# Do some processing here...
{:noreply, state}
end
end
```
"""
@type state :: term
@type response :: {:ok, payload :: String.t()} | {:error, reason :: term}
@type connection :: atom | %ExRabbitMQ.Connection.Config{}
@type result :: {:ok, state} | {:error, reason :: term, state}
@type request_result :: {:ok, correlation_id :: String.t()} | {:error, reason :: term}
@type response_result ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
Opens a RabbitMQ connection & channel and configures the queue for receiving responses.
This function calls the function `ExRabbitMQ.Consumer.xrmq_init/3` for creating a new RabbitMQ connection &
channel and configure the exchange & queue for consuming incoming response messages. This queue will be set in the
`reply_to` header of the AMQP message and will be used by the RPC server to reply back with a response message.
This function is usually called on `GenServer.init/1` callback.
### Parameters
The parameter `connection_config` specifies the configuration of the RabbitMQ connection. If set to an atom,
the configuration will be loaded from the application's config.exs under the app key :exrabbitmq,
eg. if the value is set to `:default_connection`, then the config.exs should have configuration like the following:
```elixir
config :exrabbitmq, :default_connection,
username: "guest",
password: "<PASSWORD>",
host: "localhost",
port: 5672
```
The parameter `connection_config` can also be set to the struct `ExRabbitMQ.Connection.Config` which allows
to programatically configure the connection without config.exs.
The parameter `state` is the state of the `GenServer` process.
The optional parameter `opts` provides additional options for setting up the RabbitMQ client.
The available options are:
* `:queue` - specifies a custom Queue configuration. If set to an atom, the configuration will be loaded from the
application's config.exs under the app key :exrabbitmq,
eg. if the value is set to `:default_queue`, then the config.exs should have configuration like the following:
```elixir
config :exrabbitmq, :default_queue,
queue: "test_queue",
queue_opts: [durable: true],
consume_opts: [no_ack: true]
```
If not set, then a temporary queue on RabbitMQ just for receiving message, that will be deleted when the channel
is down. The configuration of the queue will be:
```elixir
%QueueConfig{
queue: "rpc.gen-" <> UUID.uuid4(),
queue_opts: [exclusive: true, auto_delete: true],
consume_opts: [no_ack: false]
}
```
* `:queue_prefix` - allows to specify the prefix of the generated queue name, which by default is `rpc.gen-`.
If the `:queue` option is set, this setting will be ignored.
The return of the function can be `{:ok, state}` when the consumer has been successfully registered or on error the
tuple `{:error, reason, state}`.
*For more information about the connection & queue configuration, please check the documentation of the function
`ExRabbitMQ.Consumer.xrmq_init/3`.*
"""
@callback setup_client(connection_config :: connection, state, opts :: keyword) :: result
@doc """
Publishes a request message with `payload` to specified exchange and queue.
This function will publish a message on a queue that a RPC server is consuming, which we will receive the response
message through the `c:handle_response/3` callback. This function **must** be called from the `ExRabbitMQ.RPC.Client`
process, as it needs the process's dictionary which contain the connection & channel information.
### Parameters
The parameter `payload` is the payload of the request message to be sent to the RPC server.
The parameter `exchange` is the RabbitMQ exchange to use for routing this message.
The parameter `queue` is the RabbitMQ queue to deliver this message. This queue must be the queue that an RPC server
is consuming.
The parameter `opts` is a keyword list with the publishing options. The publish options are the same as in
`AMQP.Basic.publish/5` but with a few changes:
* `:correlation_id` - if not specified, will be set to an auto-generated one (using `UUID.uuid4/0`),
* `:reply_to` - cannot be overrided and will be always set as the queue name as configured
with `c:setup_client/3`,
* `:timestamp` - if not specified, will be set to the current time,
* `:expiration` - if not specified, will be set to 5000ms. For no expiration, it needs to be set to a value that
is less or equal than zero.
The return value can be:
* `{:ok, correlation_id}` - the request has been published successfully. The `correlation_id` is an id for this
request, that the RPC server will include in the response message, and this process can
relate it when receives this response,
* `{:error, reason}` - the request has failed to be published with the returned `reason`.
"""
@callback request(
payload :: binary,
exchange :: String.t(),
routing_key :: String.t(),
opts :: keyword
) :: request_result
@doc """
Invoked when a message has been received from RabbitMQ which is a response message from the RPC server for a request
we previously did.
### Parameters
The parameter `response` has the result of the request and the can take the following values:
* `{:ok, payload}` - the RPC server has replied with a response message for our request.
* `{:error, reason}` - when there was an error with the response of the request. If the `reason` has the value
`:expired`, then the `:expiration` value in the request message has been exceeded, meaning
that the RPC server didn't respond within this time.
The parameter `correlation_id` is the id of the request that this response is related to. This value was set
previously with the call of the `c:request/4` function and the RPC server returned it back with the response message.
The parameter `state` is the state of the `GenServer` process.
This callback should return a value, as in `GenServer.handle_info/2`.
"""
@callback handle_response(response :: response, correlation_id :: String.t(), state) ::
response_result
defmacro __using__(_) do
quote location: :keep do
@behaviour ExRabbitMQ.RPC.Client
alias AMQP.Basic
alias ExRabbitMQ.RPC.Client.{ExpirationHandler, Options, RequestTracking}
use ExRabbitMQ.Consumer, GenServer
@doc false
def setup_client(connection_config, state, opts \\ []) do
queue_config = Options.get_queue_config(opts)
xrmq_init(connection_config, queue_config, state)
end
@doc false
def request(payload, exchange, routing_key, opts \\ []) do
expiration = Options.get_expiration(opts)
correlation_id = Options.get_correlation_id(opts)
from = Options.get_call_from(opts)
with {:ok, channel} <- get_channel(),
{:ok, reply_to} <- get_reply_to_queue(),
opts <- Options.get_publish_options(opts, correlation_id, reply_to, expiration),
:ok <- Basic.publish(channel, exchange, routing_key, payload, opts),
:ok <- ExpirationHandler.set(correlation_id, expiration),
:ok <- RequestTracking.set(correlation_id, from) do
{:ok, correlation_id}
else
{:error, reason} -> {:error, reason}
error -> {:error, error}
end
end
@doc false
def request_sync(client, payload, exchange, routing_key, opts \\ []) do
expiration = Options.get_expiration(opts)
message = {payload, exchange, routing_key, opts}
GenServer.call(client, {:rpc_request_sync, message}, expiration + 1000)
end
@doc false
# Receive a message when the reply_to queue has been registered for consuming.
def handle_info({:basic_consume_ok, _}, state) do
{:noreply, state}
end
@doc false
# Receive the message that was send by `ExRabbitMQ.RPC.Client.ExpirationHandler.set/2` that informs the process that
# the request message has expired on RabbitMQ.
def handle_info({:expired, correlation_id}, state) do
do_handle_response({:error, :expired}, correlation_id, state)
end
@doc false
def handle_call({:rpc_request_sync, message}, from, state) do
{payload, exchange, routing_key, opts} = message
opts = Options.set_call_from(opts, from)
with {:ok, _correlation_id} <- request(payload, exchange, routing_key, opts) do
{:noreply, state}
else
error -> {:reply, error, state}
end
end
@doc false
# Receive the response message and calls the `c:handle_response/3` for further processing.
# If the response is related with a synchronous request, then reply to that process instead.
def xrmq_basic_deliver(payload, %{correlation_id: correlation_id}, state) do
do_handle_response({:ok, payload}, correlation_id, state)
end
# Gets the channel information for the process dictionary.
defp get_channel do
case xrmq_get_channel_info() do
{channel, _} when channel != nil -> {:ok, channel}
_ -> {:error, :no_channel}
end
end
# Returns the queue name for receiving the replies.
defp get_reply_to_queue do
case xrmq_get_queue_config() do
%{queue: queue} when queue != nil or queue != "" -> {:ok, queue}
_ -> {:error, :no_queue}
end
end
defp do_handle_response(result, correlation_id, state) do
with :ok <- ExpirationHandler.cancel(correlation_id),
{:ok, from} <- RequestTracking.get_delete(correlation_id) do
GenServer.reply(from, result)
{:noreply, state}
else
{:error, :no_request_tracking} ->
handle_response(result, correlation_id, state)
_ ->
{:noreply, state}
end
end
end
end
end
|
lib/ex_rabbitmq/rpc/client.ex
| 0.920513 | 0.793706 |
client.ex
|
starcoder
|
defmodule Essence.Document do
defstruct type: "", uri: "", text: "", nested_tokens: [], meta: %{}
@moduledoc """
This module defines the struct type `Essence.Document`, as well as a
variety of convenience methods for access the document's text, paragraphs,
sentences and tokens.
"""
@doc """
Read the `text` represented by a `String` and create an `Essence.Document`.
"""
@spec from_text(text :: String.t) :: %Essence.Document{}
def from_text(text) when is_bitstring(text) do
paragraphs = Essence.Chunker.paragraphs(text)
sentences = paragraphs
|> Enum.map( fn(x) -> Essence.Chunker.sentences(x) end )
tokens = sentences
|> Enum.map(fn(x) -> x |> Enum.map(fn(y) -> Essence.Tokenizer.tokenize(y) end) end)
%Essence.Document{
type: :plain_text,
uri: "",
text: text,
nested_tokens: tokens
}
end
@doc """
Retrieve the tokenized paragraphs from the given `Essence.Document`.
"""
@spec paragraphs(document :: %Essence.Document{}) :: List.t
def paragraphs(%Essence.Document{nested_tokens: tokens}) do
tokens
end
@doc """
Retrieve a the `n`-th tokenized paragraph from the given `Essence.Document`
"""
@spec paragraph(document :: %Essence.Document{}, n :: integer) :: List.t
def paragraph(%Essence.Document{nested_tokens: tokens}, n) do
tokens |> Enum.at(n)
end
@doc """
Retrieve the tokenized sentences from the given `Essence.Document`.
"""
@spec sentences(document :: %Essence.Document{}) :: List.t
def sentences(%Essence.Document{nested_tokens: tokens}) do
tokens |> List.foldl([], fn(x, acc) -> acc ++ x end)
end
@doc """
Retrieve the `n`-th tokenized sentence from the given `Essence.Document`
"""
@spec sentence(document :: %Essence.Document{}, n :: integer) :: List.t
def sentence(doc = %Essence.Document{}, n) do
doc
|> sentences
|> Enum.at(n)
end
@doc """
Retrieve the list of all tokens contained in the given `Essence.Document`
"""
@spec enumerate_tokens(document :: %Essence.Document{}) :: List.t
def enumerate_tokens(%Essence.Document{nested_tokens: tokens}) do
tokens
|> List.flatten()
|> Enum.map(fn word -> String.downcase word end)
end
@doc """
Retrieve the list of all words in the given `Essence.Document`, ignoring all tokens that are punctuation.
"""
@spec words(document :: %Essence.Document{}) :: List.t
def words(doc = %Essence.Document{}) do
doc
|> enumerate_tokens
|> Enum.filter(&Essence.Token.is_word?/1)
end
@doc """
Find all occurrences of `token` in the given `Essence.Document`. Returns a
list of [token: index] tuples.
"""
@spec find_token(doc :: %Essence.Document{}, token :: String.t) :: List.t
def find_token(doc = %Essence.Document{}, token) do
doc
|> Essence.Document.enumerate_tokens
|> Enum.with_index
|> Enum.filter( fn({tok, _idx}) -> String.upcase(tok) == String.upcase(token) end )
end
@doc """
For each occurrence of `token` in the given `Essence.Document`, `doc`,
returns a list containing the token as well as `n` (default=5) tokens to the left and
right of the occurrence.
"""
@spec context_of(doc :: %Essence.Document{}, token :: String.t, n :: number) :: List.t
def context_of(doc = %Essence.Document{}, token, n \\ 5) do
indices = doc |> find_token(token)
tokens = doc |> enumerate_tokens
indices |> Enum.map( fn({tok, idx}) -> context_left(tokens, idx-1, n) ++ [tok] ++ context_right(tokens, idx+1, n) end)
end
@doc """
Pretty prints all occurrences of `token` in the given `Essence.Document`,
`doc`. Prints `n` (default=20) characters of context.
"""
@spec concordance(doc :: %Essence.Document{}, token :: String.t, n :: number) :: :ok
def concordance(doc = %Essence.Document{}, token, n \\ 20) do
doc
|> context_of(token, round(n / 5)+2)
|> Enum.each(¢er(&1, n))
end
defp context_left(token_list, idx, len) do
token_list |> Enum.slice( (max(0, idx-len))..(idx) )
end
defp context_right(token_list, idx, len) do
token_list |> Enum.slice( (idx)..(min(Enum.count(token_list), idx+len)) )
end
defp center(token_list, len) do
mid = round(Enum.count(token_list) / 2) -1
l = token_list
|> Enum.slice(0..mid-1)
|> Enum.join(" ")
lx = l
|> String.slice(-(min(len, String.length(l)))..-1)
|> String.pad_leading(len, " ")
mx = Enum.at(token_list, mid)
r = token_list
|> Enum.slice(mid+1..Enum.count(token_list))
|> Enum.join(" ")
rx = r
|> String.slice(0..min(len, String.length(r)))
|> String.pad_trailing(len, " ")
IO.puts("#{lx} #{mx} #{rx}")
end
@doc """
Returns a list of all the 1-contexts (1 token to the left, 1 token to the right) of the
given `token` in the given `document`, excluding the token itself.
"""
@spec one_contexts_of(doc :: %Essence.Document{}, token :: String.t) :: List.t
def one_contexts_of(doc = %Essence.Document{}, token) do
indices = doc |> find_token(token)
tokens = doc |> enumerate_tokens
indices |> Enum.map( fn({_tok, idx}) -> context_left(tokens, idx-1, 0) ++ context_right(tokens, idx+1, 0) end)
end
end
|
lib/essence/document.ex
| 0.842426 | 0.632063 |
document.ex
|
starcoder
|
defprotocol Alambic.BlockingCollection do
@moduledoc """
Interface to a blocking collection.
A blocking collection is a collection of items where:
- multiple processes can push data into the collecion
- multiple processes can consume items from the collection
- a blocking collection may be "completed" meaning it will not
accept any more items.
- getting data blocks until some data is availbale or the collection
is "omplete" (will not receive any more data)
- putting data may block until some room is available in the collection
for more data (blocking collections can either accept unlimited
amount of items or limit the number of items they can hold)
`BlockingCollection` also implements the `Enumerable` protocol and using
functions from the `Enum` and `Stream` module is the preferred way of
consuming a blocking collection. Enumerating a blocking collection will
consume its items, so if multiple processes are enumerating a blocking
collection at the same time, they will only see a subset of the items
added to the collection.
"""
@vsn 1
@doc """
Add an item to the collection. May block until some room is available
in the collection.
Return `:ok` if adding was successful, `:error` if some internal error
occured or the collection does not accept items any more.
"""
@spec add(t, term) :: :ok | :error
def add(bc, item)
@doc """
Try to add an item in the collection. Will return `true` if the item
was added, `false` if the collection cannot accept items at the moment.
"""
@spec try_add(t, term) :: true | false
def try_add(bc, item)
@doc """
Get an item from the collection. If no item is available, will block
until an item is available or the collection has been completed.
Return:
- `{:ok, item}` when an item is available
- `:completed` when the collection has been completed
- `:error` if some error occurred
"""
@spec take(t) :: :error | :completed | {:ok, term}
def take(bc)
@doc """
Try to get an item from the collection. Do not block.
Return:
- `{true, item}` if some item was found
- `{false, reason}` if not item could be returned.
`reason` maybe:
- `:completed` if the collection is completed
- `:error` if an error occurred
- `:empty` if the collection is currenlty empty
"""
@spec try_take(t) :: {true, term} | {false, :completed | :error | :empty}
def try_take(bc)
@doc """
Put the collection in the completed state, where it will not accept any more
items but will serve those currently inside the collection.
"""
@spec complete(t) :: :ok | :error
def complete(bc)
@doc "Return the number of items in the collection."
@spec count(t) :: integer
def count(bc)
end
defmodule Alambic.BlockingCollection.Enumerable do
@moduledoc """
Mixin for `Enumerable` implementation in blocking collections.
"""
alias Alambic.BlockingCollection
defmacro __using__(_) do
quote location: :keep do
def member?(_coll, _value), do: {:error, __MODULE__}
def count(coll), do: {:ok, BlockingCollection.count(coll)}
def reduce(_coll, {:halt, acc}, _fun) do
{:halted, acc}
end
def reduce(collection, {:suspend, acc}, fun) do
{:suspended, acc, &reduce(collection, &1, fun)}
end
def reduce(collection, {:cont, acc}, fun) do
case BlockingCollection.take(collection) do
{:ok, item} -> reduce(collection, fun.(item, acc), fun)
:completed -> {:done, acc}
:error -> {:halted, acc}
end
end
def slice(_coll), do: {:error, __MODULE__}
defoverridable [count: 1, member?: 2]
end
end
end
defmodule Alambic.BlockingCollection.Collectable do
@moduledoc """
Mixin for `Collectable` implementation in blocking collections.
"""
alias Alambic.BlockingCollection
defmacro __using__(_) do
quote location: :keep do
def into(collection) do
{collection, fn
c, {:cont, item} ->
:ok = BlockingCollection.add(c, item)
c
c, :done -> c
_, :halt -> :ok
end}
end
end
end
end
|
lib/alambic/blocking_collection.ex
| 0.892989 | 0.690683 |
blocking_collection.ex
|
starcoder
|
defmodule Excv.Imgcodecs do
require Logger
@moduledoc """
Imgcodecs correspond to "opencv2/imgcodecs.hpp".
"""
@typep im_read_result ::
{{pos_integer(), pos_integer(), pos_integer()}, {atom(), pos_integer()}, binary()}
@on_load :load_nif
@doc false
def load_nif do
nif_file = '#{Application.app_dir(:excv, "priv/libexcv")}'
case :erlang.load_nif(nif_file, 0) do
:ok -> :ok
{:error, {:reload, _}} -> :ok
{:error, reason} -> Logger.error("Failed to load NIF: #{inspect(reason)}")
end
end
@doc """
Saves an image to a specified file.
The function `imwrite` saves the image to the specified file. The image format is chosen based on the filename extension.
In general, only 8-bit single-channel or 3-channel (with 'RGB' channel order) images can be saved using this function,
with these exceptions:
* 16-bit unsigned (`CV_16U`) images can be saved in the case of PNG, JPEG 2000, and TIFF formats
* 32-bit float (`CV_32F`) images can be saved in PFM, TIFF, OpenEXR, and Radiance HDR formats;
3-channel (`CV_32FC3`) TIFF images will be saved using the LogLuv high dynamic range encoding (4 bytes per pixel)
* PNG images with an alpha channel can be saved using this function.
To do this, create 8-bit (or 16-bit) 4-channel image RGBA, where the alpha channel goes last.
Fully transparent pixels should have alpha set to 0,
fully opaque pixels should have alpha set to 255/65535.
* Multiple images (list of `Nx.t()`) can be saved in TIFF format.
If the image format is not supported, the image will be converted to 8-bit unsigned (`CV_8U`) and saved that way.
Note that the order of colors in a pixel is different from OpenCV, that is, 'RGB' instead of 'BRG',
and the order of arguments is also different, that is, `img`, `file`, and `options` instead of `file`, `img` and `options`.
If the format, depth or channel order is different, use `Excv.Nx.convertTo/4` and `Excv.Imgproc.cvtColor/4` to convert it
before saving.
Or, use the universal `File` functions to save the image to XML or YAML format.
## Parameters
* `img`(`Nx.Tensor` or list of `Nx.Tensor`): image or images to be saved.
* `file`(String): Path of the file.
* `options`(Keyword list): Format-specific parameters. (To be implemented)
"""
@spec imwrite(Nx.Tensor.t() | list(), Path.t(), Keyword.t()) ::
:ok | :error | {:error, String.t()}
def imwrite(img, file, options \\ [])
def imwrite(img, file, options) when is_struct(img, Nx.Tensor) do
im_write_sub(img, Nx.type(img), Path.absname(file), options)
end
def imwrite(imgs, file, options) when is_list(imgs) do
imgs
|> Enum.map(&if is_number(&1), do: Nx.tensor([&1]), else: &1)
|> Enum.map(
&unless is_struct(&1, Nx.Tensor),
do:
raise(FunctionClauseError,
message: "no function clause matching in Excv.Imgcodecs.imwrite/3"
)
)
im_write_sub_list(imgs, Enum.map(imgs, &Nx.type(&1)), Path.absname(file), options)
end
defp im_write_sub(img, type, path, options) do
{y, x, d} = Nx.shape(img)
im_write_nif(
{{x, y}, Nx.to_binary(img), {type, d}},
path,
options
)
end
defp im_write_sub_list(imgs, types, path, options) do
shapes = Enum.map(imgs, fn img -> Nx.shape(img) end)
sizes = Enum.map(shapes, fn {y, x, _} -> {x, y} end)
data = Enum.map(imgs, fn img -> img.data.state end)
ds = Enum.map(shapes, fn {_, _, d} -> d end)
im_write_nif(
Enum.zip([sizes, data, Enum.zip(types, ds)]),
path,
options
)
end
@doc false
@spec im_write_nif(list() | tuple(), binary(), list()) :: :ok | :error
def im_write_nif(_size_data_type, _path, _options) do
:erlang.nif_error(:nif_not_loaded)
end
@doc """
Loads an image from a file.
The function `imread` loads an image from the specified file and returns a tuple of `:ok` and it.
If the image cannot be read (because of missing file, improper permissions, unsupported or invalid format),
the function returns a tuple of `:error` and reason if available.
Currently, the following file formats are supported:
* Windows bitmaps - `*.bmp`, `*.dib` (always supported)
* JPEG files - `*.jpeg`, `*.jpg`, `*.jpe` (see the *Note* section)
* JPEG 2000 files - `*.jp2` (see the Note section)
* Portable Network Graphics - `*.png` (see the *Note* section)
* WebP - `*.webp` (see the *Note* section)
* Portable image format - `*.pbm`, `*.pgm`, `*.ppm` `*.pxm`, `*.pnm` (always supported)
* Sun rasters - `*.sr`, `*.ras` (always supported)
* TIFF files - `*.tiff`, `*.tif` (see the *Note* section)
* OpenEXR Image files - `*.exr` (see the *Note* section)
* Radiance HDR - `*.hdr`, `*.pic` (always supported)
* Raster and Vector geospatial data supported by GDAL (see the *Note* section)
## Note
* The function determines the type of an image by the content, not by the file extension.
* In the case of color images, the decoded images will have the channels stored in **R G B** order instead of **B G R** order in OpenCV.
* When using `grayscale: true`, the codec's internal grayscale conversion will be used, if available. Results may differ to the output of `Excv.Imgproc.cvtColor/4`.
* On Microsoft Windows\* OS and macOS\*, the codecs shipped with an OpenCV image (`libjpeg`, `libpng`, `libtiff`, and `libjasper`) are used by default.
So, Excv can always read JPEGs, PNGs, and TIFFs.
On macOS, there is also an option to use native macOS image readers.
But beware that currently these native image loaders give images with different pixel values because of the color management embedded into macOS.
* On Linux\*, BSD flavors and other Unix-like open-source operating systems, Excv looks for codecs supplied with an OS image.
Install the relevant packages (do not forget the development files, for example, "libjpeg-dev", in Debian\* and Ubuntu\*)
to get the codec support or turn on the `OPENCV_BUILD_3RDPARTY_LIBS` flag in CMake when building OpenCV.
* In the case you set `WITH_GDAL` flag to true in CMake and `load_GDAL: true` to load the image,
then the `GDAL` driver will be used in order to decode the image, supporting the following formats: `Raster`, `Vector`.
* If EXIF information is embedded in the image file, the EXIF orientation will be taken into account and thus the image will be rotated
accordingly except if the flags `ignore_orientation: true` or `unchanged: true` are passed.
* By default number of pixels must be less than $2^30$. Limit can be set using system variable `OPENCV_IO_MAX_IMAGE_PIXELS`
## Parameters
* `file`: Path of the file to be loaded.
* `options`: (To be implemented)
"""
@spec imread(Path.t(), Keyword.t()) ::
{:ok, Nx.Tensor.t() | list()} | {:error, String.t()}
def imread(file, options \\ []) do
if File.exists?(file) do
case im_read_nif(Path.absname(file), options) do
{:ok, result} -> {:ok, parse_result_im_read(result)}
:error -> {:error, :no_reason}
{:error, reason} -> {:error, reason}
end
else
{:error, "File #{file} does not exist."}
end
end
@doc false
@spec im_read_nif(Path.t(), Keyword.t()) ::
{:ok, im_read_result() | list(im_read_result())} | :error | {:error, String.t()}
def im_read_nif(_path, _options) do
:erlang.nif_error(:nif_not_loaded)
end
defp parse_result_im_read({shape, type, data}) do
Nx.from_binary(data, type) |> Nx.reshape(shape)
end
end
|
lib/excv/imgcodecs.ex
| 0.884433 | 0.593786 |
imgcodecs.ex
|
starcoder
|
import Kernel, except: [round: 1]
defmodule Float do
@moduledoc """
Functions for working with floating point numbers.
"""
@doc """
Parses a binary into a float.
If successful, returns a tuple in the form of `{float, remainder_of_binary}`;
when the binary cannot be coerced into a valid float, the atom `:error` is
returned.
If the size of float exceeds the maximum size of `1.7976931348623157e+308`,
the `ArgumentError` exception is raised.
If you want to convert a string-formatted float directly to a float,
`String.to_float/1` can be used instead.
## Examples
iex> Float.parse("34")
{34.0, ""}
iex> Float.parse("34.25")
{34.25, ""}
iex> Float.parse("56.5xyz")
{56.5, "xyz"}
iex> Float.parse("pi")
:error
"""
@spec parse(binary) :: {float, binary} | :error
def parse("-" <> binary) do
case parse_unsigned(binary) do
:error -> :error
{number, remainder} -> {-number, remainder}
end
end
def parse("+" <> binary) do
parse_unsigned(binary)
end
def parse(binary) do
parse_unsigned(binary)
end
defp parse_unsigned(<<digit, rest::binary>>) when digit in ?0..?9, do:
parse_unsigned(rest, false, false, <<digit>>)
defp parse_unsigned(binary) when is_binary(binary), do:
:error
defp parse_unsigned(<<digit, rest::binary>>, dot?, e?, acc) when digit in ?0..?9, do:
parse_unsigned(rest, dot?, e?, <<acc::binary, digit>>)
defp parse_unsigned(<<?., digit, rest::binary>>, false, false, acc) when digit in ?0..?9, do:
parse_unsigned(rest, true, false, <<acc::binary, ?., digit>>)
defp parse_unsigned(<<exp_marker, digit, rest::binary>>, dot?, false, acc) when exp_marker in 'eE' and digit in ?0..?9, do:
parse_unsigned(rest, true, true, <<add_dot(acc, dot?)::binary, ?e, digit>>)
defp parse_unsigned(<<exp_marker, sign, digit, rest::binary>>, dot?, false, acc) when exp_marker in 'eE' and sign in '-+' and digit in ?0..?9, do:
parse_unsigned(rest, true, true, <<add_dot(acc, dot?)::binary, ?e, sign, digit>>)
defp parse_unsigned(rest, dot?, _e?, acc), do:
{:erlang.binary_to_float(add_dot(acc, dot?)), rest}
defp add_dot(acc, true), do: acc
defp add_dot(acc, false), do: acc <> ".0"
@doc """
Rounds a float to the largest integer less than or equal to `num`.
`floor/2` also accepts a precision to round a floating point value down
to an arbitrary number of fractional digits (between 0 and 15).
This function always returns a float. `Kernel.trunc/1` may be used instead to
truncate the result to an integer afterwards.
## Examples
iex> Float.floor(34.25)
34.0
iex> Float.floor(-56.5)
-57.0
iex> Float.floor(34.259, 2)
34.25
"""
@spec floor(float, 0..15) :: float
def floor(number, precision \\ 0) when is_float(number) and precision in 0..15 do
power = power_of_10(precision)
number = number * power
truncated = trunc(number)
variance = if number - truncated < 0, do: -1.0, else: 0.0
(truncated + variance) / power
end
@doc """
Rounds a float to the smallest integer greater than or equal to `num`.
`ceil/2` also accepts a precision to round a floating point value down
to an arbitrary number of fractional digits (between 0 and 15).
This function always returns floats. `Kernel.trunc/1` may be used instead to
truncate the result to an integer afterwards.
## Examples
iex> Float.ceil(34.25)
35.0
iex> Float.ceil(-56.5)
-56.0
iex> Float.ceil(34.251, 2)
34.26
"""
@spec ceil(float, 0..15) :: float
def ceil(number, precision \\ 0) when is_float(number) and precision in 0..15 do
power = power_of_10(precision)
number = number * power
truncated = trunc(number)
variance = if number - truncated > 0, do: 1.0, else: 0.0
(truncated + variance) / power
end
@doc """
Rounds a floating point value to an arbitrary number of fractional digits
(between 0 and 15).
This function only accepts floats and always returns a float. Use
`Kernel.round/1` if you want a function that accepts both floats and integers
and always returns an integer.
## Examples
iex> Float.round(5.5674, 3)
5.567
iex> Float.round(5.5675, 3)
5.568
iex> Float.round(-5.5674, 3)
-5.567
iex> Float.round(-5.5675, 3)
-5.568
iex> Float.round(-5.5675)
-6.0
"""
@spec round(float, 0..15) :: float
def round(number, precision \\ 0) when is_float(number) and precision in 0..15 do
power = power_of_10(precision)
Kernel.round(number * power) / power
end
Enum.reduce 0..15, 1, fn x, acc ->
defp power_of_10(unquote(x)), do: unquote(acc)
acc * 10
end
@doc """
Returns a charlist which corresponds to the text representation
of the given float.
It uses the shortest representation according to algorithm described
in "Printing Floating-Point Numbers Quickly and Accurately" in
Proceedings of the SIGPLAN '96 Conference on Programming Language
Design and Implementation.
## Examples
iex> Float.to_charlist(7.0)
'7.0'
"""
@spec to_charlist(float) :: charlist
def to_charlist(float) when is_float(float) do
:io_lib_format.fwrite_g(float)
end
@doc """
Returns a binary which corresponds to the text representation
of the given float.
It uses the shortest representation according to algorithm described
in "Printing Floating-Point Numbers Quickly and Accurately" in
Proceedings of the SIGPLAN '96 Conference on Programming Language
Design and Implementation.
## Examples
iex> Float.to_string(7.0)
"7.0"
"""
@spec to_string(float) :: String.t
def to_string(float) when is_float(float) do
IO.iodata_to_binary(:io_lib_format.fwrite_g(float))
end
# TODO: Deprecate by v1.5
@doc false
def to_char_list(float), do: Float.to_charlist(float)
@doc false
def to_char_list(float, options) do
IO.warn "Float.to_char_list/2 is deprecated, use :erlang.float_to_list/2 instead"
:erlang.float_to_list(float, expand_compact(options))
end
@doc false
def to_string(float, options) do
IO.warn "Float.to_string/2 is deprecated, use :erlang.float_to_binary/2 instead"
:erlang.float_to_binary(float, expand_compact(options))
end
defp expand_compact([{:compact, false} | t]), do: expand_compact(t)
defp expand_compact([{:compact, true} | t]), do: [:compact | expand_compact(t)]
defp expand_compact([h | t]), do: [h | expand_compact(t)]
defp expand_compact([]), do: []
end
|
lib/elixir/lib/float.ex
| 0.90796 | 0.664268 |
float.ex
|
starcoder
|
defmodule Nadia.Graph do
@moduledoc """
Provides access to Telegra.ph API.
## Reference
http://telegra.ph/api
"""
alias Nadia.Graph.Model.{Account, Error}
import Nadia.Graph.API
@doc """
Use this method to create a new Telegraph account. Most users only need one account, but this can be useful for channel administrators who would like to keep individual author names and profile links for each of their channels. On success, returns an Account object with the regular fields and an additional access_token field.
Args:
* `short_name` - account name, helps users with several accounts remember which they are currently using. Displayed to the user above the "Edit/Publish" button on Telegra.ph, other users don't see this name. 1-32 characters
* `author_name` - default author name used when creating new articles. 0-128 characters
* `options` - orddict of options
Options:
* `:author_url` - default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channel. 0-512 characters
"""
@spec create_account(binary, binary, [{atom, any}]) :: {:ok, Account.t()} | {:error, Error.t()}
def create_account(short_name, author_name, options \\ []) do
request("createAccount", [short_name: short_name, author_name: author_name] ++ options)
end
@doc """
Use this method to update information about a Telegraph account. Pass only the parameters that you want to edit. On success, returns an Account object with the default fields.
* `access_token` - access token of the Telegraph account
* `short_name` - new account name. 1-32 characters
* `author_name` - new default author name used when creating new articles. 0-128 characters
* `options` - orddict of options
Options:
* `:author_url` - new default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channel. 0-512 characters
"""
@spec edit_account_info(binary, binary, binary, [{atom, any}]) ::
{:ok, Account.t()} | {:error, Error.t()}
def edit_account_info(access_token, short_name, author_name, options \\ []) do
request(
"editAccountInfo",
[access_token: access_token, short_name: short_name, author_name: author_name] ++ options
)
end
@doc """
Use this method to get information about a Telegraph account. Returns an Account object on success.
* `access_token` - access token of the Telegraph account
* `fields` - list of account fields to return. Available fields: short_name, author_name, author_url, auth_url, page_count
"""
@spec get_account_info(binary, [binary]) :: {:ok, Account.t()} | {:error, Error.t()}
def get_account_info(access_token, fields \\ ["short_name", "author_name", "author_url"]) do
request("getAccountInfo", access_token: access_token, fields: fields)
end
@doc """
Use this method to revoke access_token and generate a new one, for example, if the user would like to reset all connected sessions, or you have reasons to believe the token was compromised. On success, returns an Account object with new access_token and auth_url fields.
* `access_token` - access token of the Telegraph account
"""
@spec revoke_access_token(binary) :: {:ok, Account.t()} | {:error, Error.t()}
def revoke_access_token(access_token) do
request("revokeAccessToken", access_token: access_token)
end
@doc """
Use this method to get a list of pages belonging to a Telegraph account. Returns a PageList object, sorted by most recently created pages first.
* `access_token` - access token of the Telegraph account
* `offset` - sequential number of the first page to be returned
* `limit` - limits the number of pages to be retrieved. 0-200
"""
@spec get_page_list(binary, integer, integer) :: {:ok, [[PageList.t()]]} | {:error, Error.t()}
def get_page_list(access_token, offset \\ 0, limit \\ 50) do
request("getPageList", access_token: access_token, offset: offset, limit: limit)
end
@doc """
Use this method to create a new Telegraph page. On success, returns a Page object.
Args:
* `access_token` - (String) Access token of the Telegraph account.
* `title` - (String, 1-256 characters) Page title.
* `content` - (Array of Node, up to 64 KB) Content of the page.
* `options` - orddict of options
Options:
* `:author_name` - (String, 0-128 characters) Author name, displayed below the article's title.
* `:author_url` - (String, 0-512 characters) Profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channel.
* `:return_content` - (Boolean, default = false) If true, a content field will be returned in the Page object (see: Content format).
"""
@spec create_page(binary, binary, binary, [{atom, any}]) ::
{:ok, Page.t()} | {:error, Error.t()}
def create_page(access_token, title, content, options \\ []) do
request("createPage", [access_token: access_token, title: title, content: content] ++ options)
end
@doc """
Use this method to edit an existing Telegraph page. On success, returns a Page object.
* `access_token` - (String) Access token of the Telegraph account.
* `path` - (String) Path to the page.
* `title` - (String, 1-256 characters) Page title.
* `content` - (Array of Node, up to 64 KB) Content of the page.
* `options` - orddict of options
Options:
* `:author_name` - (String, 0-128 characters) Author name, displayed below the article's title.
* `:author_url` - (String, 0-512 characters) Profile link, opened when users click on the author's * `:name below` - the title. Can be any link, not necessarily to a Telegram profile or channel.
* `:return_content` - (Boolean, default = false) If true, a content field will be returned in the Page object.
"""
@spec edit_page(binary, binary, binary, binary, [{atom, any}]) ::
{:ok, Page.t()} | {:error, Error.t()}
def edit_page(access_token, path, title, content, options \\ []) do
request(
"editPage/" <> path,
[access_token: access_token, title: title, content: content] ++ options
)
end
@doc """
Use this method to get a Telegraph page. Returns a Page object on success.
* `path` path to the Telegraph page (in the format Title-12-31, i.e. everything that comes after http://telegra.ph/)
* `return_content` - if true, content field will be returned in Page object
"""
@spec get_page(binary, [atom]) :: {:ok, Page.t()} | {:error, Error.t()}
def get_page(path, return_content \\ true) do
request("getPage/" <> path, return_content: return_content)
end
@doc """
Use this method to get the number of views for a Telegraph article. Returns a PageViews object on success. By default, the total number of page views will be returned.
* `path` - path to the Telegraph page (in the format Title-12-31, where 12 is the month and 31 the day the article was first published)
* `filter_fields` - orddict of fields
Filter fields:
* `:year` - if passed, the number of page views for the requested year will be returned.
* `:month` - if passed, the number of page views for the requested month will be returned
* `:day` - if passed, the number of page views for the requested day will be returned.
* `:hour` - if passed, the number of page views for the requested hour will be returned.
"""
@spec get_views(binary, [{atom, any}]) :: {:ok, PageViews.t()} | {:error, Error.t()}
def get_views(path, filter_fields) do
request("getViews/" <> path, filter_fields)
end
end
|
lib/nadia/graph.ex
| 0.894375 | 0.421076 |
graph.ex
|
starcoder
|
defmodule Extract.Kafka.Subscribe do
use Definition, schema: Extract.Kafka.Subscribe.V1
@type t :: %__MODULE__{
version: integer,
endpoints: keyword,
topic: String.t()
}
defstruct version: 1, endpoints: nil, topic: nil
def on_new(data) do
data
|> Map.update(:endpoints, [], &transform_endpoints/1)
|> Ok.ok()
end
defp transform_endpoints(list) when is_list(list) do
Enum.map(list, &transform_endpoint/1)
end
defp transform_endpoints(other), do: other
defp transform_endpoint([host, port]), do: {String.to_atom(host), port}
defp transform_endpoint(other), do: other
defimpl Jason.Encoder, for: __MODULE__ do
def encode(value, opts) do
Map.from_struct(value)
|> Map.update!(:endpoints, fn list ->
Enum.map(list, fn {host, port} -> [host, port] end)
end)
|> Jason.Encode.map(opts)
end
end
defimpl Extract.Step, for: __MODULE__ do
import Extract.Context
@dialyzer [:no_return, :no_fail_call]
def execute(%{endpoints: endpoints, topic: topic}, context) do
ensure_topic(endpoints, topic)
connection = :"kafka_subscribe_#{topic}"
source = fn _opts ->
Stream.resource(
initialize(endpoints, topic, connection),
&receive_messages/1,
&shutdown/1
)
end
context
|> register_after_function(&acknowledge_messages(connection, &1))
|> set_source(source)
|> Ok.ok()
end
defp acknowledge_messages(connection, messages) do
%{
"topic" => topic,
"partition" => partition,
"generation_id" => generation_id,
"offset" => offset
} =
messages
|> Enum.map(&get_in(&1, [Access.key(:meta), "kafka"]))
|> Enum.max_by(&Map.get(&1, "offset"))
:ok = Elsa.Group.Acknowledger.ack(connection, topic, partition, generation_id, offset)
end
defp receive_messages(acc) do
receive do
{:kafka_subscribe, messages} ->
extract_messages =
Enum.map(messages, fn %{value: payload} = elsa_message ->
meta =
elsa_message
|> Map.from_struct()
|> Map.drop([:value, :timestamp, :headers, :key])
|> Enum.reduce(%{}, fn {k, v}, acc ->
Map.put(acc, to_string(k), v)
end)
Extract.Message.new(data: payload, meta: %{"kafka" => meta})
end)
{extract_messages, acc}
end
end
defp ensure_topic(endpoints, topic) do
unless Elsa.topic?(endpoints, topic) do
Elsa.create_topic(endpoints, topic)
end
end
defp initialize(endpoints, topic, connection) do
fn ->
{:ok, elsa} =
Elsa.Supervisor.start_link(
connection: connection,
endpoints: endpoints,
group_consumer: [
group: "kafka_subscribe_#{topic}",
topics: [topic],
handler: Extract.Kafka.Subscribe.Handler,
handler_init_args: %{pid: self()},
config: [
begin_offset: :earliest,
offset_reset_policy: :reset_to_earliest,
prefetch_count: 0,
prefetch_bytes: 1_000_000
]
]
)
%{elsa_supervisor: elsa}
end
end
defp shutdown(%{elsa_supervisor: elsa} = acc) do
Process.exit(elsa, :normal)
acc
end
end
end
defmodule Extract.Kafka.Subscribe.Handler do
use Elsa.Consumer.MessageHandler
def handle_messages(messages, state) do
send(state.pid, {:kafka_subscribe, messages})
{:no_ack, state}
end
end
defmodule Extract.Kafka.Subscribe.V1 do
use Definition.Schema
@impl true
def s do
schema(%Extract.Kafka.Subscribe{
version: version(1),
endpoints: spec(is_list() and not_nil?()),
topic: required_string()
})
end
end
|
apps/extract_kafka/lib/extract/kafka/subscribe.ex
| 0.569374 | 0.40116 |
subscribe.ex
|
starcoder
|
defmodule Ratatouille.Renderer.Canvas do
@moduledoc """
A canvas represents a terminal window, a subvision of it for rendering, and a
sparse mapping of positions to cells.
A `%Canvas{}` struct can be rendered to different output formats. This includes
the primary use-case of rendering to the termbox-managed window, but also
rendering to strings, which is useful for testing.
"""
alias ExTermbox.{Cell, Position}
alias Ratatouille.Renderer.{Box, Utils}
alias __MODULE__, as: Canvas
@type t :: %Canvas{render_box: Box.t(), outer_box: Box.t(), cells: map()}
@enforce_keys [:render_box, :outer_box]
defstruct render_box: nil,
outer_box: nil,
cells: %{}
@doc """
Creates an empty canvas with the given dimensions.
## Examples
iex> Canvas.from_dimensions(10, 20)
%Canvas{
outer_box: %Ratatouille.Renderer.Box{
top_left: %ExTermbox.Position{x: 0, y: 0},
bottom_right: %ExTermbox.Position{x: 9, y: 19}
},
render_box: %Ratatouille.Renderer.Box{
top_left: %ExTermbox.Position{x: 0, y: 0},
bottom_right: %ExTermbox.Position{x: 9, y: 19}
},
cells: %{}
}
"""
@spec from_dimensions(non_neg_integer(), non_neg_integer()) :: Canvas.t()
def from_dimensions(x, y) do
%Canvas{
render_box: Box.from_dimensions(x, y),
outer_box: Box.from_dimensions(x, y)
}
end
@spec put_box(Canvas.t(), Box.t()) :: Canvas.t()
def put_box(%Canvas{} = canvas, render_box) do
%Canvas{canvas | render_box: render_box}
end
@whitespace Utils.atoi(" ")
def fill_background(%Canvas{render_box: box, cells: cells} = canvas) do
positions = Box.positions(box)
filled_cells =
for pos <- positions,
do: {pos, %Cell{ch: @whitespace, position: pos}},
into: %{}
%Canvas{canvas | cells: Map.merge(cells, filled_cells)}
end
@doc """
Copies the canvas to a new one with the render box padded on each side (top,
left, bottom, right) by `size`. Pass a negative size to remove padding.
"""
@spec padded(Canvas.t(), integer()) :: Canvas.t()
def padded(%Canvas{render_box: box} = canvas, size) do
%Canvas{canvas | render_box: Box.padded(box, size)}
end
@doc """
Copies the canvas to a new one with the render box consumed by the given `dx`
and `dy`.
The render box is used to indicate the empty, renderable space on the canvas,
so this might be called with a `dy` of 1 after rendering a line of text. The
box is consumed left-to-right and top-to-bottom.
"""
@spec consume(Canvas.t(), integer(), integer()) :: Canvas.t()
def consume(%Canvas{render_box: box} = canvas, dx, dy) do
%Canvas{canvas | render_box: Box.consume(box, dx, dy)}
end
@doc """
Creates a new canvas with `n` rows (from the top) consumed.
"""
@spec consume_rows(Canvas.t(), integer()) :: Canvas.t()
def consume_rows(canvas, n), do: consume(canvas, 0, n)
@doc """
Creates a new canvas with `n` columns (from the left) consumed.
"""
@spec consume_columns(Canvas.t(), integer()) :: Canvas.t()
def consume_columns(canvas, n), do: consume(canvas, n, 0)
@spec translate(Canvas.t(), integer(), integer()) :: Canvas.t()
def translate(%Canvas{render_box: box} = canvas, dx, dy) do
%Canvas{canvas | render_box: Box.translate(box, dx, dy)}
end
@spec render_to_strings(Canvas.t()) :: list(String.t())
def render_to_strings(%Canvas{cells: cells_map}) do
positions = Map.keys(cells_map)
ys = for %Position{y: y} <- positions, do: y
xs = for %Position{x: x} <- positions, do: x
y_max = Enum.max(ys, fn -> 0 end)
x_max = Enum.max(xs, fn -> 0 end)
cells =
for y <- 0..y_max, x <- 0..x_max do
pos = %Position{x: x, y: y}
cells_map[pos] || %Cell{position: pos, ch: ' '}
end
cells
|> Enum.chunk_by(&row_idx/1)
|> Enum.map(fn columns ->
columns
|> Enum.map(&cell_to_string/1)
|> Enum.join()
end)
end
@spec render_to_string(Canvas.t()) :: String.t()
def render_to_string(%Canvas{} = canvas),
do: canvas |> render_to_strings() |> Enum.join("\n")
@spec render_to_termbox(module(), Canvas.t()) :: :ok
def render_to_termbox(bindings, %Canvas{cells: cells}) do
# TODO: only attempt to render cells in the canvas box
for {_pos, cell} <- cells do
:ok = bindings.put_cell(cell)
end
:ok
end
defp cell_to_string(%Cell{ch: ch}), do: to_string([ch])
defp row_idx(%Cell{position: %Position{y: y}}), do: y
end
|
lib/ratatouille/renderer/canvas.ex
| 0.907322 | 0.832373 |
canvas.ex
|
starcoder
|
defmodule NebulexEctoRepoAdapter.Local.MatchSpecification do
@moduledoc """
The Match Specifications module contains various functions which convert Ecto queries to
ETS Match Specifications (:ets.match_spec()) in order to execute the given queries.
See: https://github.com/evadne/etso/blob/develop/lib/etso/ets/match_specification.ex
For Nebulex.Adapters.Local adapter the stored entry tuple is
{:entry, key, value, version, expire_at}. So the match spec could be something like:
spec = [{{:entry, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [{{:"$1", :"$2"}}]}]
"""
alias NebulexEctoRepoAdapter.Local.TableStructure
def build(query, params) do
{_, schema} = query.from.source
field_names = TableStructure.field_names(schema)
match_head = build_head(field_names)
match_conditions = build_conditions(field_names, params, query.wheres)
match_body = [build_body(field_names, query.select.fields)]
{match_head, match_conditions, match_body}
end
defp build_head(field_names) do
{:entry, :"$1", List.to_tuple(Enum.map(1..length(field_names), fn x -> :"$#{x + 1}" end)), :_,
:_}
end
defp build_conditions(field_names, params, query_wheres) do
Enum.reduce(query_wheres, [], fn %Ecto.Query.BooleanExpr{expr: expression}, acc ->
[build_condition(field_names, params, expression) | acc]
end)
end
defmacrop guard_operator(:and), do: :andalso
defmacrop guard_operator(:or), do: :orelse
defmacrop guard_operator(:!=), do: :"/="
defmacrop guard_operator(:<=), do: :"=<"
defmacrop guard_operator(operator), do: operator
for operator <- ~w(== != < > <= >= and or)a do
defp build_condition(field_names, params, {unquote(operator), [], [lhs, rhs]}) do
lhs_condition = build_condition(field_names, params, lhs)
rhs_condition = build_condition(field_names, params, rhs)
{guard_operator(unquote(operator)), lhs_condition, rhs_condition}
end
end
for operator <- ~w(not)a do
defp build_condition(field_names, params, {unquote(operator), [], [clause]}) do
condition = build_condition(field_names, params, clause)
{guard_operator(unquote(operator)), condition}
end
end
defp build_condition(field_names, params, {:in, [], [field, value]}) do
field_name = resolve_field_name(field)
field_index = get_field_index(field_names, field_name)
resolve_field_values(params, value)
|> Enum.map(&{:==, :"$#{field_index}", &1})
|> Enum.reduce(&{:orelse, &1, &2})
end
defp build_condition(field_names, _, {{:., [], [{:&, [], [0]}, field_name]}, [], []}) do
:"$#{get_field_index(field_names, field_name)}"
end
defp build_condition(_, params, {:^, [], [index]}) do
Enum.at(params, index)
end
defp build_condition(_, _, value) when not is_tuple(value) do
value
end
defp build_body(field_names, query_select_fields) do
for select_field <- query_select_fields do
field_name = resolve_field_name(select_field)
field_index = get_field_index(field_names, field_name)
:"$#{field_index}"
end
end
defp resolve_field_name(field) do
{{:., _, [{:&, [], [0]}, field_name]}, [], []} = field
field_name
end
defp resolve_field_values(params, {:^, [], indices}) do
for index <- indices do
Enum.at(params, index)
end
end
defp get_field_index(field_names, field_name) do
2 + Enum.find_index(field_names, fn x -> x == field_name end)
end
end
|
lib/nebulex_ecto_repo_adapter/local/match_specification.ex
| 0.826607 | 0.594139 |
match_specification.ex
|
starcoder
|
defmodule Modbus.Response do
@moduledoc false
alias Modbus.Utils
def pack({:rc, slave, _address, count}, values) do
^count = Enum.count(values)
data = Utils.bitlist_to_bin(values)
reads(slave, 1, data)
end
def pack({:ri, slave, _address, count}, values) do
^count = Enum.count(values)
data = Utils.bitlist_to_bin(values)
reads(slave, 2, data)
end
def pack({:rhr, slave, _address, count}, values) do
^count = Enum.count(values)
data = Utils.reglist_to_bin(values)
reads(slave, 3, data)
end
def pack({:rir, slave, _address, count}, values) do
^count = Enum.count(values)
data = Utils.reglist_to_bin(values)
reads(slave, 4, data)
end
def pack({:fc, slave, address, value}, nil) when is_integer(value) do
write(:d, slave, 5, address, value)
end
def pack({:phr, slave, address, value}, nil) when is_integer(value) do
write(:a, slave, 6, address, value)
end
def pack({:fc, slave, address, values}, nil) when is_list(values) do
writes(:d, slave, 15, address, values)
end
def pack({:phr, slave, address, values}, nil) when is_list(values) do
writes(:a, slave, 16, address, values)
end
def parse({:rc, slave, _address, count}, <<slave, 1, bytes, data::binary>>) do
^bytes = Utils.byte_count(count)
Utils.bin_to_bitlist(count, data)
end
def parse({:ri, slave, _address, count}, <<slave, 2, bytes, data::binary>>) do
^bytes = Utils.byte_count(count)
Utils.bin_to_bitlist(count, data)
end
def parse({:rhr, slave, _address, count}, <<slave, 3, bytes, data::binary>>) do
^bytes = 2 * count
Utils.bin_to_reglist(count, data)
end
def parse({:rir, slave, _address, count}, <<slave, 4, bytes, data::binary>>) do
^bytes = 2 * count
Utils.bin_to_reglist(count, data)
end
def parse({:fc, slave, address, 0}, <<slave, 5, address::16, 0x00, 0x00>>) do
nil
end
def parse({:fc, slave, address, 1}, <<slave, 5, address::16, 0xFF, 0x00>>) do
nil
end
def parse({:phr, slave, address, value}, <<slave, 6, address::16, value::16>>) do
nil
end
def parse({:fc, slave, address, values}, <<slave, 15, address::16, count::16>>) do
^count = Enum.count(values)
nil
end
def parse({:phr, slave, address, values}, <<slave, 16, address::16, count::16>>) do
^count = Enum.count(values)
nil
end
def length({:rc, _slave, _address, count}) do
3 + Utils.byte_count(count)
end
def length({:ri, _slave, _address, count}) do
3 + Utils.byte_count(count)
end
def length({:rhr, _slave, _address, count}) do
3 + 2 * count
end
def length({:rir, _slave, _address, count}) do
3 + 2 * count
end
def length({:fc, _slave, _address, _}) do
6
end
def length({:phr, _slave, _address, _}) do
6
end
defp reads(slave, function, data) do
bytes = :erlang.byte_size(data)
<<slave, function, bytes, data::binary>>
end
defp write(:d, slave, function, address, value) do
<<slave, function, address::16, Utils.bool_to_byte(value), 0x00>>
end
defp write(:a, slave, function, address, value) do
<<slave, function, address::16, value::16>>
end
defp writes(_type, slave, function, address, values) do
count = Enum.count(values)
<<slave, function, address::16, count::16>>
end
end
|
lib/response.ex
| 0.583085 | 0.539347 |
response.ex
|
starcoder
|
defmodule TelemetryMetricsLogger do
@moduledoc """
A reporter that prints events to the `Logger`.
This module aggregates and prints metrics information at a configurable frequency.
For example, imagine the given metrics:
metrics = [
last_value("vm.memory.binary", unit: :byte),
counter("vm.memory.total"),
summary("phoenix.endpoint.stop.duration", unit: {:native, :millisecond})
]
A this reporter can be started as a child of your supervision tree like this:
{TelemetryMetricsLogger, metrics: metrics, reporter_options: [interval: 60]}
Then, every sixty seconds, you will see a report like this:
```log
12:31:54.492 [info] Telemetry report 2020-11-09T17:48:00Z
Event [:vm, :memory]
Measurement "binary"
Last value: 100 B
Measurement "total"
Counter: 1
Event [:phoenix, :endpoint, :stop]
Measurement "duration"
Summary:
Average: 101 ms
Min: 52 ms
Max: 127 ms
```
"""
use GenServer
require Logger
def start_link(opts) do
metrics =
opts[:metrics] ||
raise ArgumentError, "the :metrics option is required by #{inspect(__MODULE__)}"
reporter_options = Keyword.get(opts, :reporter_options, [])
log_level = reporter_options |> Keyword.get(:log_level, :info)
reporting_interval = reporter_options |> Keyword.get(:interval, 60)
GenServer.start_link(__MODULE__, {metrics, log_level, reporting_interval}, name: __MODULE__)
end
def handle_event(event_name, measurements, metadata, _config) do
GenServer.cast(__MODULE__, {:handle_event, event_name, measurements, metadata})
end
@impl true
def init({metrics, log_level, reporting_interval}) do
Process.flag(:trap_exit, true)
groups = Enum.group_by(metrics, & &1.event_name)
for {event, _metrics} <- groups do
id = {__MODULE__, event, self()}
:telemetry.attach(id, event, &handle_event/4, [])
end
Process.send_after(self(), :report, reporting_interval * 1_000)
{
:ok,
%{
metric_definitions: groups,
reporting_interval: reporting_interval,
log_level: log_level,
report: %{}
}
}
end
@impl true
def terminate(_, state) do
events =
state.metric_definitions
|> Map.keys()
for event <- events do
:telemetry.detach({__MODULE__, event, self()})
end
:ok
end
@impl true
def handle_cast({:handle_event, event_name, measurements, metadata}, state) do
metric_defs_for_event = state.metric_definitions[event_name]
report =
metric_defs_for_event
|> Enum.map(fn metric_def ->
measurement = extract_measurement(metric_def, measurements, metadata)
tags = extract_tags(metric_def, metadata)
{metric_def, measurement, tags}
end)
|> Enum.filter(fn {mdef, _m, _tags} -> keep?(mdef, metadata) end)
|> Enum.reduce(state.report, &update_report(event_name, &1, &2))
{:noreply, %{state | report: report}}
end
defp update_report(_event_name, {metric_def, measurement, _tags}, report) do
Map.update(
report,
metric_def.name,
new_metric_value(metric_def, measurement),
&update_metric_value(metric_def, &1, measurement)
)
end
defp new_metric_value(%Telemetry.Metrics.Counter{}, _measurement), do: %{counter: 1}
defp new_metric_value(%Telemetry.Metrics.Distribution{}, measurement),
do: %{distribution: [measurement]}
defp new_metric_value(%Telemetry.Metrics.LastValue{}, measurement),
do: %{last_value: measurement}
defp new_metric_value(%Telemetry.Metrics.Sum{}, measurement), do: %{sum: measurement}
defp new_metric_value(%Telemetry.Metrics.Summary{}, measurement), do: %{summary: [measurement]}
defp update_metric_value(%Telemetry.Metrics.Counter{}, current_value, _measurement) do
Map.update(current_value, :counter, 1, &(&1 + 1))
end
defp update_metric_value(%Telemetry.Metrics.Distribution{}, current_value, measurement) do
Map.update(current_value, :distribution, [measurement], &[measurement | &1])
end
defp update_metric_value(%Telemetry.Metrics.LastValue{}, current_value, measurement) do
Map.update(current_value, :last_value, measurement, measurement)
end
defp update_metric_value(%Telemetry.Metrics.Sum{}, current_value, measurement) do
Map.update(current_value, :sum, measurement, &(&1 + measurement))
end
defp update_metric_value(%Telemetry.Metrics.Summary{}, current_value, measurement) do
Map.update(current_value, :summary, [measurement], &[measurement | &1])
end
defp keep?(%{keep: nil}, _metadata), do: true
defp keep?(metric, metadata), do: metric.keep.(metadata)
defp extract_measurement(metric, measurements, metadata) do
case metric.measurement do
fun when is_function(fun, 2) -> fun.(measurements, metadata)
fun when is_function(fun, 1) -> fun.(measurements)
key -> measurements[key]
end
end
defp extract_tags(metric, metadata) do
tag_values = metric.tag_values.(metadata)
Map.take(tag_values, metric.tags)
end
@impl true
def handle_info(:report, state) do
report = build_report(state, DateTime.utc_now())
Logger.log(state.log_level, report)
Process.send_after(self(), :report, state.reporting_interval * 1_000)
{:noreply, %{state | report: %{}}}
end
@doc false
def build_report(state, timestamp) do
metric_def_groups = state.metric_definitions
[
"Telemetry report #{timestamp}:"
| Enum.flat_map(metric_def_groups, fn {event, defs} ->
measurement_groups = Enum.group_by(defs, &List.last(&1.name))
[
" Event #{inspect(event)}"
| Enum.flat_map(measurement_groups, fn {measurement_name, defs} ->
[
" Measurement \"#{measurement_name}\""
| Enum.map(defs, fn def ->
metric_report = Map.get(state.report, def.name, %{})
metric_text(def, metric_report)
end)
]
end)
]
end)
]
|> Enum.join("\n")
|> String.trim_trailing()
end
defp metric_text(%Telemetry.Metrics.Counter{}, report) do
counter = Map.get(report, :counter, 0)
" Counter: #{counter}"
end
defp metric_text(%Telemetry.Metrics.Distribution{} = def, report) do
distribution = Map.get(report, :distribution, [])
if Enum.empty?(distribution) do
" Distribution: No data for distribution!"
else
avg = Enum.sum(report.distribution) / Enum.count(report.distribution)
"""
Distribution:
mean: #{do_round(avg)} #{unit_to_string(def.unit)}
"""
end
end
defp metric_text(%Telemetry.Metrics.LastValue{} = def, report) do
if is_nil(report[:last_value]) do
" Last value: No data!"
else
" Last value: #{report.last_value |> do_round()} #{unit_to_string(def.unit)}"
end
end
defp metric_text(%Telemetry.Metrics.Sum{} = def, report) do
sum = Map.get(report, :sum, 0)
" Sum: #{sum |> do_round()} #{unit_to_string(def.unit)}"
end
defp metric_text(%Telemetry.Metrics.Summary{} = def, report) do
summary = Map.get(report, :summary, [])
if Enum.empty?(summary) do
" Summary: No data for summary!"
else
avg = Enum.sum(summary) / Enum.count(summary)
"""
Summary:
Average: #{round(avg)} #{unit_to_string(def.unit)}
Max: #{Enum.max(summary) |> do_round()} #{unit_to_string(def.unit)}
Min: #{Enum.min(summary) |> do_round()} #{unit_to_string(def.unit)}
"""
|> String.trim_trailing()
end
end
defp do_round(x) when is_float(x) do
Float.round(x, 3)
end
defp do_round(x) do
x
end
defp unit_to_string(:unit), do: ""
defp unit_to_string(:second), do: "s"
defp unit_to_string(:millisecond), do: "ms"
defp unit_to_string(:microsecond), do: "ΞΌs"
defp unit_to_string(:nanosecond), do: "ns"
defp unit_to_string(:byte), do: "B"
defp unit_to_string(:kilobyte), do: "kB"
defp unit_to_string(:megabyte), do: "MB"
end
|
lib/telemetry_metrics_logger.ex
| 0.89493 | 0.655253 |
telemetry_metrics_logger.ex
|
starcoder
|
defmodule Ockam.Example.Stream.BiDirectional.Remote do
@moduledoc """
Ping-pong example for bi-directional stream communication using remote subsctiption
Use-case: integrate ockam nodes which don't implement stream protocol yet
Pre-requisites:
Ockam hub running with stream service and TCP listener
Two ockam nodes "ping" and "pong":
Expected behaviour:
Two nodes "ping" and "pong" send messages to each other using two streams:
"pong_topic" to send messages to "pong" node
"ping_topic" to send messages to "ping" node
Implementation:
Stream service is running on the hub node
Stream subscription service can create consumers and publishers on the hub node
Ping and pong nodes call subscription service to get addresses to send messages to
PROTOCOLS:
This example using protocols to create remote consumer and publisher,
defined in ../../../stream/client/bi_directional/subscribe.ex
Ockam.Protocol.Stream.BiDirectional:
name: stream_bidirectional
request: {
stream_name: string
subscription_id: string
}
Ockam.Protocol.Stream.BiDirectional.EnsurePublisher
name: stream_bidirectional_publisher
request: {
publisher_stream: :string,
consumer_stream: :string,
subscription_id: :string
}
"""
alias Ockam.Example.Stream.Ping
alias Ockam.Example.Stream.Pong
alias Ockam.Message
alias Ockam.Workers.Call
@hub_tcp %Ockam.Transport.TCPAddress{ip: {127, 0, 0, 1}, port: 4000}
## This should be run on PONG node.
## It returns a forwarding alias to use to route messages to PONG
def init_pong() do
ensure_tcp(5000)
## We run a call named "pong" to create an alias route to "pong"
## This should happen before creating the proper "pong"
alias_address = register_alias("pong")
:timer.sleep(1000)
## Create the actual pong worker
{:ok, "pong"} = Pong.create(address: "pong")
## Call subsctiption service to create a remote consumer, which will forward
## messages through TCP to the pong NODE
subscribe("pong_topic")
alias_address
end
## This should be run on PING node after PONG initialized
## Accepting the PONG alias address
def run(pong_address) do
ensure_tcp(3000)
Ping.create(address: "ping")
subscribe("ping_topic")
# Call subscribe service to get remote publisher
reply =
Call.call(%{
onward_route: [@hub_tcp, "stream_subscribe"],
payload:
Ockam.Protocol.encode_payload(
Ockam.Protocol.Stream.BiDirectional.EnsurePublisher,
:request,
%{
publisher_stream: "pong_topic",
consumer_stream: "ping_topic",
subscription_id: "foo"
}
)
})
## Get the route to remote publisher
publisher_route = Message.return_route(reply)
## Send message THROUGH publisher to the destination address
send_message(publisher_route ++ [pong_address])
end
def send_message(route) do
msg = %{
onward_route: route,
return_route: ["ping"],
payload: "0"
}
Ockam.Router.route(msg)
end
## Calling subscribe service to create a remote consumer
def subscribe(stream) do
## Remote subscribe
subscribe_msg = %{
onward_route: [@hub_tcp, "stream_subscribe"],
return_route: [],
payload:
Ockam.Protocol.encode_payload(Ockam.Protocol.Stream.BiDirectional, :request, %{
stream_name: stream,
subscription_id: "foo"
})
}
Ockam.Router.route(subscribe_msg)
## No return yet, so just wait
:timer.sleep(2000)
end
def ensure_tcp(port) do
Ockam.Transport.TCP.create_listener(port: port, route_outgoing: true)
end
def register_alias(address) do
reply =
Call.call(
%{
onward_route: [@hub_tcp, "forwarding_service"],
payload: "register"
},
address: address
)
alias_route = Message.return_route(reply)
List.last(alias_route)
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/examples/stream/bi_directional/remote.ex
| 0.833663 | 0.522324 |
remote.ex
|
starcoder
|
defmodule DeltaCrdt.AWLWWMap do
defstruct dots: MapSet.new(),
value: %{}
require Logger
@doc false
def new(), do: %__MODULE__{}
defmodule Dots do
@moduledoc false
def compress(dots = %MapSet{}) do
Enum.reduce(dots, %{}, fn {c, i}, dots_map ->
Map.update(dots_map, c, i, fn
x when x > i -> x
_x -> i
end)
end)
end
def decompress(dots = %MapSet{}), do: dots
def decompress(dots) do
Enum.flat_map(dots, fn {i, x} ->
Enum.map(1..x, fn y -> {i, y} end)
end)
end
def next_dot(i, c = %MapSet{}) do
Logger.warn("inefficient next_dot computation")
next_dot(i, compress(c))
end
def next_dot(i, c) do
{i, Map.get(c, i, 0) + 1}
end
def union(dots1 = %MapSet{}, dots2 = %MapSet{}) do
MapSet.union(dots1, dots2)
end
def union(dots1 = %MapSet{}, dots2), do: union(dots2, dots1)
def union(dots1, dots2) do
Enum.reduce(dots2, dots1, fn {c, i}, dots_map ->
Map.update(dots_map, c, i, fn
x when x > i -> x
_x -> i
end)
end)
end
def difference(dots1 = %MapSet{}, dots2 = %MapSet{}) do
MapSet.difference(dots1, dots2)
end
def difference(_dots1, _dots2 = %MapSet{}),
do: raise("this should not happen")
def difference(dots1, dots2) do
Enum.reject(dots1, fn dot ->
member?(dots2, dot)
end)
|> MapSet.new()
end
def member?(dots = %MapSet{}, dot = {_, _}) do
MapSet.member?(dots, dot)
end
def member?(dots, {i, x}) do
Map.get(dots, i, 0) >= x
end
def strict_expansion?(_dots = %MapSet{}, _delta_dots),
do: raise "we should not get here"
def strict_expansion?(dots, delta_dots) do
Enum.all?(min_dots(delta_dots), fn {i, x} ->
Map.get(dots, i, 0) + 1 >= x
end)
end
def min_dots(dots = %MapSet{}) do
Enum.reduce(dots, %{}, fn {i, x}, min ->
Map.update(min, i, x, fn
min when min < x -> min
_min -> x
end)
end)
end
def min_dots(_dots) do
%{}
end
end
def add(key, value, i, state) do
rem = remove(key, i, state)
add =
fn aw_set, context ->
aw_set_add(i, {value, System.monotonic_time(:nanosecond)}, {aw_set, context})
end
|> apply_op(key, state)
case MapSet.size(rem.dots) do
0 -> add
_ -> join(rem, add, [key])
end
end
@doc false
def compress_dots(state) do
%{state | dots: Dots.compress(state.dots)}
end
defp aw_set_add(i, el, {aw_set, c}) do
d = Dots.next_dot(i, c)
{%{el => MapSet.new([d])}, MapSet.put(Map.get(aw_set, el, MapSet.new()), d)}
end
defp apply_op(op, key, %{value: m, dots: c}) do
{val, c_p} = op.(Map.get(m, key, %{}), c)
%__MODULE__{
dots: MapSet.new(c_p),
value: %{key => val}
}
end
def remove(key, _i, state) do
%{value: val} = state
to_remove_dots =
case Map.fetch(val, key) do
{:ok, value} -> Enum.flat_map(value, fn {_val, to_remove_dots} -> to_remove_dots end)
:error -> []
end
%__MODULE__{
dots: MapSet.new(to_remove_dots),
value: %{}
}
end
def clear(_i, state) do
Map.put(state, :value, %{})
end
@doc false
def join(delta1, delta2, keys) do
new_dots = Dots.union(delta1.dots, delta2.dots)
join_or_maps(delta1, delta2, [:join_or_maps, :join_dot_sets], keys)
|> Map.put(:dots, new_dots)
end
@doc false
def join_or_maps(delta1, delta2, nested_joins, keys) do
resolved_conflicts =
Enum.flat_map(keys, fn key ->
sub_delta1 = Map.put(delta1, :value, Map.get(delta1.value, key, %{}))
sub_delta2 = Map.put(delta2, :value, Map.get(delta2.value, key, %{}))
keys =
(Map.keys(sub_delta1.value) ++ Map.keys(sub_delta2.value))
|> Enum.uniq()
[next_join | other_joins] = nested_joins
%{value: new_sub} =
apply(__MODULE__, next_join, [sub_delta1, sub_delta2, other_joins, keys])
if Enum.empty?(new_sub) do
[]
else
[{key, new_sub}]
end
end)
|> Map.new()
new_val =
Map.drop(delta1.value, keys)
|> Map.merge(Map.drop(delta2.value, keys))
|> Map.merge(resolved_conflicts)
%__MODULE__{
value: new_val
}
end
@doc false
def join_dot_sets(%{value: s1, dots: c1}, %{value: s2, dots: c2}, [], _keys) do
s1 = MapSet.new(s1)
s2 = MapSet.new(s2)
new_s =
[
MapSet.intersection(s1, s2),
Dots.difference(s1, c2),
Dots.difference(s2, c1)
]
|> Enum.reduce(&MapSet.union/2)
%__MODULE__{value: new_s}
end
def read(%{value: values}) do
Map.new(values, fn {key, values} ->
{{val, _ts}, _c} = Enum.max_by(values, fn {{_val, ts}, _c} -> ts end)
{key, val}
end)
end
def read(_crdt, []), do: %{}
def read(%{value: values}, keys) when is_list(keys) do
read(%{value: Map.take(values, keys)})
end
def read(crdt, key) do
read(crdt, [key])
end
end
|
lib/delta_crdt/aw_lww_map.ex
| 0.641535 | 0.442817 |
aw_lww_map.ex
|
starcoder
|
defmodule EthGethAdapter.Balance do
@moduledoc false
import EthGethAdapter.Encoding
alias Ethereumex.HttpClient, as: Client
alias EthGethAdapter.ERC20
@doc """
Retrieve the balance of all given `contract_addresses` for the provided wallet `address`.
The `0x0000000000000000000000000000000000000000` address is used to represent Ether.
Any other given contract address will have their balance retrived on the corresponding
smart contract.
Returns a tuple of
```
{
:ok,
%{
"contract_address_1" => integer_balance_1,
"contract_address_2" => integer_balance_2
}
}
```
if successful or {:error, error_code} if failed.
"""
def get(address, contract_address, block \\ "latest")
def get(address, contract_addresses, block) when is_list(contract_addresses) do
case ERC20.abi_balance_of(address) do
{:ok, encoded_abi_data} ->
contract_addresses
|> Enum.map(fn contract_address ->
build_request!(contract_address, address, encoded_abi_data, block)
end)
|> request()
|> parse_response()
|> respond(contract_addresses)
error ->
error
end
end
def get(address, contract_address, block) do
get([contract_address], address, block)
end
# Batch request builders
defp build_request!("0x0000000000000000000000000000000000000000", address, _, block) do
{:eth_get_balance, [address, block]}
end
defp build_request!(contract_address, _address, encoded_abi_data, block)
when byte_size(contract_address) == 42 do
{:eth_call,
[
%{
data: encoded_abi_data,
to: contract_address
},
block
]}
end
defp build_request!(contract_address, _address, _encoded_abi_data, _block) do
raise ArgumentError, "#{contract_address} is not a valid contract address"
end
defp request([]), do: {:ok, []}
defp request(data), do: Client.batch_request(data)
# Response parsers
defp parse_response({:ok, data}) when is_list(data) do
balances = Enum.map(data, fn hex_balance -> int_from_hex(hex_balance) end)
{:ok, balances}
end
defp parse_response({:ok, data}), do: {:ok, int_from_hex(data)}
defp parse_response({:error, data}), do: {:error, data}
# Formatters
defp respond({:ok, balances}, addresses) do
response =
[addresses, balances]
|> Enum.zip()
|> Enum.into(%{})
{:ok, response}
end
defp respond({:error, _error} = error, _addresses), do: error
end
|
apps/eth_geth_adapter/lib/eth_geth_adapter/balance.ex
| 0.893652 | 0.786991 |
balance.ex
|
starcoder
|
defmodule EctoSanitizer do
@moduledoc """
Provides functions for sanitizing `Ecto.Changeset` fields.
"""
@doc """
Sanitizes all changes in the given changeset that apply to field which are of
the `:string` `Ecto` type.
Uses the `HtmlSanitizeEx.strip_tags/1` function on any change that satisfies
all of the following conditions:
1. The field associated with the change is of the type `:string`.
2. The field associated with the change is not in the blacklisted_fields list
of `opts` as defined using the `:except` key in `opts`.
Note that this function will change the value in the `:changes` map of an
`%Ecto.Changeset{}` struct if the given changes are sanitized.
## Examples
iex> attrs = %{string_field: "<script>Bad</script>"}
iex> result_changeset =
...> attrs
...> |> FakeEctoSchema.changeset()
...> |> EctoSanitizer.sanitize_all_strings()
iex> result_changeset.changes
%{string_field: "Bad"}
Fields can be exempted from sanitization via the `:except` option.
iex> attrs = %{string_field: "<script>Bad</script>"}
iex> result_changeset =
...> attrs
...> |> FakeEctoSchema.changeset()
...> |> EctoSanitizer.sanitize_all_strings(except: [:string_field])
iex> result_changeset.changes
%{string_field: "<script>Bad</script>"}
"""
def sanitize_all_strings(%Ecto.Changeset{} = changeset, opts \\ []) do
blacklisted_fields = Keyword.get(opts, :except, [])
sanitized_changes =
Enum.into(changeset.changes, %{}, fn change ->
strip_html_from_change(change, blacklisted_fields, changeset.types)
end)
%{changeset | changes: sanitized_changes}
end
defp strip_html_from_change({field, value}, blacklisted_fields, types) when is_binary(value) do
if field in blacklisted_fields do
{field, value}
else
if Map.get(types, field) == :string do
{field, HtmlSanitizeEx.strip_tags(value)}
else
{field, value}
end
end
end
defp strip_html_from_change(change, _, _), do: change
end
|
lib/ecto_sanitizer.ex
| 0.829665 | 0.418429 |
ecto_sanitizer.ex
|
starcoder
|
defmodule EVM.MachineState do
@moduledoc """
Module for tracking the current machine state, which is roughly
equivalent to the VM state for an executing contract.
This is most often seen as Β΅ in the Yellow Paper.
"""
alias EVM.{ExecEnv, Gas, MachineState, ProgramCounter, Stack}
alias EVM.Operation.Metadata
defstruct gas: nil,
program_counter: 0,
memory: <<>>,
active_words: 0,
previously_active_words: 0,
stack: [],
last_return_data: <<>>,
step: 0
@type program_counter :: integer()
@type memory :: binary()
@typedoc """
Yellow paper terms:
- g: gas
- pc: program_counter
- m: memory
- i: active_words
- s: stack
Other terms:
- step: the number of vm cycles the machine state gas gone through
"""
@type t :: %__MODULE__{
gas: Gas.t(),
program_counter: program_counter,
memory: memory,
active_words: integer(),
previously_active_words: integer(),
stack: Stack.t(),
last_return_data: binary(),
step: integer()
}
@doc """
Subtracts gas required by the current instruction from the specified machine
state.
## Examples
iex> machine_state = %EVM.MachineState{gas: 10, stack: [1, 1], program_counter: 0}
iex> exec_env = %EVM.ExecEnv{machine_code: <<EVM.Operation.metadata(:add).id>>}
iex> EVM.MachineState.subtract_gas(machine_state, exec_env)
%EVM.MachineState{gas: 7, stack: [1, 1]}
"""
@spec subtract_gas(MachineState.t(), ExecEnv.t()) :: MachineState.t()
def subtract_gas(machine_state, exec_env) do
case Gas.cost_with_status(machine_state, exec_env) do
{:changed, cost, new_call_gas} ->
new_stack = Stack.replace(machine_state.stack, 0, new_call_gas)
%{machine_state | gas: machine_state.gas - cost, stack: new_stack}
{:original, cost} ->
%{machine_state | gas: machine_state.gas - cost}
end
end
@doc """
Refunds gas in the machine state
## Examples
iex> machine_state = %EVM.MachineState{gas: 5}
iex> EVM.MachineState.refund_gas(machine_state, 5)
%EVM.MachineState{gas: 10}
"""
@spec refund_gas(MachineState.t(), integer()) :: MachineState.t()
def refund_gas(machine_state, amount) do
%{machine_state | gas: machine_state.gas + amount}
end
@doc """
After a memory operation, we may have incremented the total number
of active words. This function takes a memory offset accessed and
updates the machine state accordingly.
## Examples
iex> %EVM.MachineState{active_words: 2} |> EVM.MachineState.maybe_set_active_words(1)
%EVM.MachineState{active_words: 2}
iex> %EVM.MachineState{active_words: 2} |> EVM.MachineState.maybe_set_active_words(3)
%EVM.MachineState{active_words: 3}
iex> %EVM.MachineState{active_words: 2} |> EVM.MachineState.maybe_set_active_words(1)
%EVM.MachineState{active_words: 2}
"""
@spec maybe_set_active_words(t, EVM.val()) :: t
def maybe_set_active_words(machine_state, last_word) do
%{machine_state | active_words: max(machine_state.active_words, last_word)}
end
@doc """
Pops n values off the stack.
## Examples
iex> EVM.MachineState.pop_n(%EVM.MachineState{stack: [1, 2, 3]}, 2)
{[1 ,2], %EVM.MachineState{stack: [3]}}
"""
@spec pop_n(MachineState.t(), integer()) :: {list(EVM.val()), MachineState.t()}
def pop_n(machine_state, n) do
{values, stack} = Stack.pop_n(machine_state.stack, n)
machine_state = %{machine_state | stack: stack}
{values, machine_state}
end
@doc """
Push a values onto the stack
## Examples
iex> EVM.MachineState.push(%EVM.MachineState{stack: [2, 3]}, 1)
%EVM.MachineState{stack: [1, 2, 3]}
"""
@spec push(MachineState.t(), EVM.val()) :: MachineState.t()
def push(machine_state, value) do
%{machine_state | stack: Stack.push(machine_state.stack, value)}
end
@doc """
Increments the program counter
## Examples
iex> EVM.MachineState.move_program_counter(%EVM.MachineState{program_counter: 9}, EVM.Operation.metadata(:add), [1, 1])
%EVM.MachineState{program_counter: 10}
"""
@spec move_program_counter(MachineState.t(), Metadata.t(), list(EVM.val())) :: MachineState.t()
def move_program_counter(machine_state, operation_metadata, inputs) do
next_postion = ProgramCounter.next(machine_state.program_counter, operation_metadata, inputs)
%{machine_state | program_counter: next_postion}
end
@doc """
Increments the step (representing another vm cycle)
## Examples
iex> EVM.MachineState.increment_step(%EVM.MachineState{step: 9})
%EVM.MachineState{step: 10}
"""
@spec increment_step(MachineState.t()) :: MachineState.t()
def increment_step(machine_state) do
%{machine_state | step: machine_state.step + 1}
end
end
|
apps/evm/lib/evm/machine_state.ex
| 0.861188 | 0.653787 |
machine_state.ex
|
starcoder
|
defmodule Ecto.Query.Normalizer do
@moduledoc false
# Normalizes a query so that it is as consistent as possible.
alias Ecto.Query.QueryExpr
alias Ecto.Query.JoinExpr
alias Ecto.Query.Util
def normalize(query, opts \\ []) do
query
|> setup_sources
|> normalize_joins
|> auto_select(opts)
|> normalize_distinct
|> normalize_group_by
end
defp normalize_joins(query) do
%{query | joins: Enum.map(query.joins, &normalize_join(&1, query))}
end
# Transform an assocation join to an ordinary join
def normalize_join(%JoinExpr{assoc: nil} = join, _query), do: join
def normalize_join(%JoinExpr{assoc: {left, right}} = join, query) do
model = Util.find_source(query.sources, left) |> Util.model
if nil?(model) do
raise Ecto.QueryError, file: join.file, line: join.line,
reason: "association join cannot be performed without a model"
end
refl = model.__schema__(:association, right)
unless refl do
raise Ecto.QueryError, file: join.file, line: join.line,
reason: "could not find association `#{right}` on model #{inspect model}"
end
associated = refl.associated
assoc_var = Util.model_var(query, associated)
on_expr = on_expr(join.on, refl, assoc_var, left)
on = %QueryExpr{expr: on_expr, file: join.file, line: join.line}
%{join | source: associated, on: on}
end
defp on_expr(on_expr, refl, assoc_var, struct_var) do
key = refl.key
assoc_key = refl.assoc_key
relation = quote do
unquote(assoc_var).unquote(assoc_key) == unquote(struct_var).unquote(key)
end
if on_expr do
quote do: unquote(on_expr.expr) and unquote(relation)
else
relation
end
end
# Auto select the model in the from expression
defp auto_select(query, opts) do
if !opts[:skip_select] && query.select == nil do
var = {:&, [], [0]}
%{query | select: %QueryExpr{expr: var}}
else
query
end
end
# Group by all fields
defp normalize_group_by(query) do
entities = normalize_models(query.group_bys, query.sources)
%{query | group_bys: entities}
end
# Add distinct on all field when model is in field list
defp normalize_distinct(query) do
entities = normalize_models(query.distincts, query.sources)
%{query | distincts: entities}
end
# Expand model into all of its fields in an expression
defp normalize_models(query_expr, sources) do
Enum.map(query_expr, fn expr ->
new_expr =
Enum.flat_map(expr.expr, fn
{:&, _, _} = var ->
model = Util.find_source(sources, var) |> Util.model
fields = model.__schema__(:field_names)
Enum.map(fields, &{var, &1})
field ->
[field]
end)
%{expr | expr: new_expr}
end)
end
# Adds all sources to the query for fast access
defp setup_sources(query) do
froms = if query.from, do: [query.from], else: []
sources = Enum.reduce(query.joins, froms, fn
%JoinExpr{assoc: {left, right}}, acc ->
model = Util.find_source(Enum.reverse(acc), left) |> Util.model
if model && (refl = model.__schema__(:association, right)) do
assoc = refl.associated
[ {assoc.__schema__(:source), assoc} | acc ]
else
[nil|acc]
end
# TODO: Validate this on join creation
%JoinExpr{source: source}, acc when is_binary(source) ->
[ {source, nil} | acc ]
%JoinExpr{source: model}, acc when is_atom(model) ->
[ {model.__schema__(:source), model} | acc ]
end)
%{query | sources: sources |> Enum.reverse |> list_to_tuple}
end
end
|
lib/ecto/query/normalizer.ex
| 0.732687 | 0.470068 |
normalizer.ex
|
starcoder
|
defmodule Axon.Training.Callbacks do
@moduledoc """
Axon training callbacks.
"""
@doc """
Standard IO Logger callback.
Logs training results to standard out.
"""
def standard_io_logger(train_state, :before_train, opts) do
epochs = opts[:epochs]
metrics = Map.keys(train_state[:metrics])
IO.puts("Training model for #{epochs} epochs")
IO.puts("Metrics: #{inspect(metrics)}")
{:cont, train_state}
end
def standard_io_logger(train_state, :after_batch, opts) do
log_every = opts[:log_every]
case log_every do
:none ->
:ok
:every ->
log_batch(
train_state[:epoch],
train_state[:epoch_step],
train_state[:epoch_loss],
train_state[:metrics]
)
log_every when is_integer(log_every) ->
if Nx.remainder(train_state[:epoch_step], log_every) == Nx.tensor(0) do
log_batch(
train_state[:epoch],
train_state[:epoch_step],
train_state[:epoch_loss],
train_state[:metrics]
)
end
end
{:cont, train_state}
end
def standard_io_logger(train_state, :after_epoch, _opts) do
epoch = Nx.to_scalar(train_state[:epoch])
# Should this really be a part of train state, maybe an extra metadata argument?
time = train_state[:time]
epoch_loss = train_state[:epoch_loss]
IO.puts("\n")
IO.puts("Epoch #{epoch + 1} time: #{time / 1_000_000}s")
IO.puts("Epoch #{epoch + 1} loss: #{:io_lib.format("~.5f", [Nx.to_scalar(epoch_loss)])}")
train_state[:metrics]
|> Enum.each(fn {k, v} ->
IO.puts("Epoch #{epoch} #{Atom.to_string(k)}: #{:io_lib.format("~.5f", [Nx.to_scalar(v)])}")
end)
IO.puts("\n")
{:cont, train_state}
end
def standard_io_logger(train_state, :after_train, _opts) do
IO.puts("Training finished")
{:cont, train_state}
end
def standard_io_logger(train_state, _, _opts), do: {:cont, train_state}
defp log_batch(epoch, step, loss, metrics) do
metrics =
metrics
|> Enum.map(fn {k, v} ->
"Average #{Atom.to_string(k)}: #{:io_lib.format("~.5f", [Nx.to_scalar(v)])}"
end)
metrics =
Enum.join(
["Average Loss: #{:io_lib.format("~.5f", [Nx.to_scalar(loss)])}" | metrics],
" - "
)
IO.write(
"\rEpoch #{Nx.to_scalar(epoch) + 1}, batch #{Nx.to_scalar(step)} - " <>
"#{metrics}"
)
end
end
|
lib/axon/training/callbacks.ex
| 0.805288 | 0.474144 |
callbacks.ex
|
starcoder
|
defmodule Openflow.Action do
@moduledoc """
Openflow parser handler
"""
@type type ::
Openflow.Action.Output.t()
| Openflow.Action.CopyTtlOut.t()
| Openflow.Action.CopyTtlIn.t()
| Openflow.Action.SetMplsTtl.t()
| Openflow.Action.DecMplsTtl.t()
| Openflow.Action.PushVlan.t()
| Openflow.Action.PopVlan.t()
| Openflow.Action.PushMpls.t()
| Openflow.Action.PopMpls.t()
| Openflow.Action.SetQueue.t()
| Openflow.Action.Group.t()
| Openflow.Action.SetNwTtl.t()
| Openflow.Action.DecNwTtl.t()
| Openflow.Action.SetField.t()
| Openflow.Action.PushPbb.t()
| Openflow.Action.PopPbb.t()
| Openflow.Action.NxResubmit.t()
| Openflow.Action.NxSetTunnel.t()
| Openflow.Action.NxRegMove.t()
| Openflow.Action.NxRegLoad.t()
| Openflow.Action.NxNote.t()
| Openflow.Action.NxSetTunnel64.t()
| Openflow.Action.NxMultipath.t()
| Openflow.Action.NxBundle.t()
| Openflow.Action.NxBundleLoad.t()
| Openflow.Action.NxResubmitTable.t()
| Openflow.Action.NxOutputReg.t()
| Openflow.Action.NxLearn.t()
| Openflow.Action.NxExit.t()
| Openflow.Action.NxDecTtl.t()
| Openflow.Action.NxFinTimeout.t()
| Openflow.Action.NxController.t()
| Openflow.Action.NxDecTtlCntIds.t()
| Openflow.Action.NxWriteMetadata.t()
| Openflow.Action.NxPushMpls.t()
| Openflow.Action.NxPopMpls.t()
| Openflow.Action.NxStackPush.t()
| Openflow.Action.NxStackPop.t()
| Openflow.Action.NxSample.t()
| Openflow.Action.NxOutputReg2.t()
| Openflow.Action.NxRegLoad2.t()
| Openflow.Action.NxConjunction.t()
| Openflow.Action.NxConntrack.t()
| Openflow.Action.NxNat.t()
| Openflow.Action.NxController2.t()
| Openflow.Action.NxSample2.t()
| Openflow.Action.NxOutputTrunc.t()
| Openflow.Action.NxGroup.t()
| Openflow.Action.NxSample3.t()
| Openflow.Action.NxClone.t()
| Openflow.Action.NxCtClear.t()
| Openflow.Action.NxResubmitTableCt.t()
| Openflow.Action.NxLearn2.t()
def read(action_bin) do
do_read([], action_bin)
end
def to_binary(actions) when is_list(actions) do
to_binary(<<>>, actions)
end
def to_binary(action) do
to_binary([action])
end
# private functions
defp do_read(acc, <<>>), do: Enum.reverse(acc)
defp do_read(acc, <<0::32, _::bytes>>), do: Enum.reverse(acc)
defp do_read(acc, <<type::16, length::16, _::bytes>> = binary) do
<<action_bin::size(length)-bytes, rest::bytes>> = binary
codec = Openflow.Enums.to_atom(type, :action_type)
do_read([codec.read(action_bin) | acc], rest)
end
defp to_binary(acc, []), do: acc
defp to_binary(acc, [action | rest]) do
codec = action.__struct__
to_binary(<<acc::bytes, codec.to_binary(action)::bytes>>, rest)
end
end
|
lib/openflow/action.ex
| 0.513912 | 0.451145 |
action.ex
|
starcoder
|
defmodule Garlic.Circuit.Cell do
@moduledoc "Tor circuit cell"
# tor-spec.txt 3. Cell Packet format
# On a version 1 connection, each cell contains the following
# fields:
# CircID [CIRCID_LEN bytes]
# Command [1 byte]
# Payload (padded with padding bytes) [PAYLOAD_LEN bytes]
# On a version 2 or higher connection, all cells are as in version 1
# connections, except for variable-length cells, whose format is:
# CircID [CIRCID_LEN octets]
# Command [1 octet]
# Length [2 octets; big-endian integer]
# Payload (some commands MAY pad) [Length bytes]
alias Garlic.Circuit
@type destroy_reason ::
:none
| :protocol
| :internal
| :requested
| :hibernating
| :resourcelimit
| :connectfailed
| :or_identity
| :channel_closed
| :finished
| :timeout
| :destroyed
| :nosuchservice
@spec decode(binary) ::
{:error, atom}
| {:more, binary}
| {:ok, {0, :versions}, binary}
| {:ok, {0, :certs, binary}, binary}
| {:ok, {0, :auth_challenge}, binary}
| {:ok, {Circuit.id(), :relay, inner_cell :: binary}, binary}
| {:ok, {Circuit.id(), :destroy, destroy_reason}, binary}
| {:ok,
{0, :netinfo,
{timestamp :: pos_integer, my_address :: tuple, their_address :: tuple}}, binary}
| {:ok, {Circuit.id(), :created2, {server_public_key :: binary, auth :: binary}},
binary}
def decode(<<circuit_id::32, 3, inner_cell::binary-size(509), tail::binary>>) do
{:ok, {circuit_id, :relay, inner_cell}, tail}
end
def decode(<<circuit_id::32, 4, reason, tail::binary>>) do
reason =
Enum.at(
~w(none protocol internal requested hibernating resourcelimit connectfailed or_identity
channel_closed finished timeout destroyed nosuchservice)a,
reason
)
{:ok, {circuit_id, :destroy, reason}, tail}
end
def decode(
<<circuit_id::16, 7, payload_size::size(16), _::binary-size(payload_size), tail::binary>>
) do
{:ok, {circuit_id, :versions}, tail}
end
def decode(<<circuit_id::32, 8, timestamp::32, tail::binary>>) do
{[my_address], <<address_count, tail::binary>>} = parse_addresses(1, tail)
{addresses, _} = parse_addresses(address_count, tail)
{:ok, {circuit_id, :netinfo, {timestamp, my_address, addresses}}, ""}
end
def decode(
<<circuit_id::32, 11, fc00:db20:35b:7399::5, server_public_key::binary-size(32), auth::binary-size(32),
_::binary>>
) do
{:ok, {circuit_id, :created2, {server_public_key, auth}}, ""}
end
def decode(
<<circuit_id::32, 129, payload_size::size(16), payload::binary-size(payload_size),
tail::binary>>
) do
<<certs_count, data::binary>> = payload
{certs, <<>>} = parse_certs(certs_count, data)
{:ok, {circuit_id, :certs, Enum.into(certs, %{})}, tail}
end
def decode(
<<circuit_id::32, 130, payload_size::size(16), _::binary-size(payload_size),
tail::binary>>
) do
{:ok, {circuit_id, :auth_challenge}, tail}
end
def decode(buffer) when byte_size(buffer) > 509, do: {:error, :unknown_cell}
def decode(buffer), do: {:more, buffer}
defp parse_addresses(0, tail), do: {[], tail}
defp parse_addresses(
count,
<<type, address_length::size(8), addresses::binary-size(address_length), tail::binary>>
) do
{other_addresses, tail} = parse_addresses(count - 1, tail)
{[{type, addresses} | other_addresses], tail}
end
defp parse_certs(0, tail), do: {[], tail}
defp parse_certs(
count,
<<type, cert_length::size(16), cert::binary-size(cert_length), tail::binary>>
) do
{other_certs, tail} = parse_certs(count - 1, tail)
{[{type, cert} | other_certs], tail}
end
end
|
lib/garlic/circuit/cell.ex
| 0.808219 | 0.474144 |
cell.ex
|
starcoder
|
defmodule StringMatcher do
@moduledoc ~S"""
StringMatcher allows you to pass multiple regular expressions and a string and get values back.
## Example
Let's say you have a text that is:
```
Del 5 av 6. Shakespeare Γ€r mycket nΓΆjd med sin senaste pjΓ€s, SΓ₯ tuktas en argbigga. Men av nΓ₯gon anledning uppskattas inte berΓ€ttelsen om hur en stark kvinna fΓΆrnedras av en man av kvinnorna i Shakespeares nΓ€rhet.
Originaltitel: Upstart Crow.
Produktion: BBC 2017.
```
First we would split the text into an array based on `\n` and `.` so that we can loop over the long text, as our matches only returns the first match back.
Then you would do:
```
StringMatcher.new()
|> StringMatcher.add_regexp(
~r/Del\s+(?<episode_num>[0-9]+?)\s+av\s+(?<of_episodes>[0-9]+?)/i,
%{}
)
|> StringMatcher.add_regexp(~r/Originaltitel: (?<original_title>.*)\./i, %{})
|> StringMatcher.add_regexp(
~r/Produktion: (?<production_company>.*?) (?<production_year>[0-9]+)\./i,
%{}
)
|> StringMatcher.match_captures(string)
```
This should return a tuple with a map. The map is returned value of the regular expressions.
If no match is found you will receive `{:error, "no match"}`
Please take a look at our tests to see a working variant of parsing the text above.
"""
@doc """
Create a new list for strings
Returns `[]`
## Examples
iex> StringMatcher.new()
[]
"""
def new do
[]
end
@doc """
Add a regexp to the list if its the correct format.
Returns a list of regular expressions.
## Examples
iex> StringMatcher.add_regexp([], ~r/S(?<season_num>\d+)E(?<episode_num>\d+)/i, %{})
[{~r/S(?<season_num>\d+)E(?<episode_num>\d+)/i, %{}}]
"""
def add_regexp(list, %Regex{} = regexp, result) when is_list(list) and is_map(result) do
list
|> Enum.concat([{regexp, result}])
end
def add_regexp(_, _, _), do: {:error, "wrong format"}
@doc """
Match a string to a regexp.
Returns the values that are passed as the second argument.
## Examples
iex> StringMatcher.add_regexp([], ~r/S(?<season_num>\d+)E(?<episode_num>\d+)/i, %{}) |> StringMatcher.match("Prison Break E01")
{:error, "no match"}
iex> StringMatcher.add_regexp([], ~r/S(?<season_num>\d+)E(?<episode_num>\d+)/i, %{}) |> StringMatcher.match_captures("Prison Break S01E01")
{:ok, %{"episode_num" => "01", "season_num" => "01"}}
iex> StringMatcher.add_regexp([], ~r/S(?<season_num>\d+)E(?<episode_num>\d+)/i, %{"name" => "Fargo"}) |> StringMatcher.match("Prison Break S01E01")
{:ok, %{"name" => "Fargo"}}
"""
def match(list, string) when is_list(list) and is_binary(string) do
Enum.reduce(list, nil, fn
{regexp, result}, nil ->
if Regex.match?(regexp, string) do
{:ok, result}
end
_, matched ->
matched
end)
|> case do
{:ok, result} -> {:ok, result}
_ -> {:error, "no match"}
end
end
def match(_, _), do: {:error, "wrong format"}
@doc """
Match a string to a regexp.
Returns either the values passed as the second argument otherwise it returns the captures.
## Examples
iex> StringMatcher.add_regexp([], ~r/S(?<season_num>\d+)E(?<episode_num>\d+)/i, %{}) |> StringMatcher.match_captures("Prison Break S01E01")
{:ok, %{"episode_num" => "01", "season_num" => "01"}}
iex> StringMatcher.add_regexp([], ~r/S(?<season_num>\d+)E(?<episode_num>\d+)/i, %{"name" => "Fargo"}) |> StringMatcher.match_captures("Prison Break S01E01")
{:ok, %{"name" => "Fargo"}}
"""
def match_captures(list, string) when is_list(list) and is_binary(string) do
Enum.reduce(list, nil, fn
{regexp, result2}, nil ->
if Regex.match?(regexp, string) do
empty?(result2, regexp, string)
end
_, matched ->
matched
end)
|> case do
{:ok, result} -> {:ok, result}
_ -> {:error, "no match"}
end
end
def match_captures(_, _), do: {:error, "wrong format"}
defp empty?(result, regexp, string) do
if Enum.empty?(result) do
{:ok, Regex.named_captures(regexp, string)}
else
{:ok, result}
end
end
end
|
lib/string_matcher.ex
| 0.872639 | 0.908252 |
string_matcher.ex
|
starcoder
|
defmodule Mix.Tasks.PromEx.Dashboard.Publish do
@moduledoc """
This mix task will publish dashboards to Grafana for a PromEx module. It is
recommended that you use the functionality that is part of the PromEx supervision
tree in order to upload dashboards as opposed to this, given that mix may not
always be available (like in a mix release). This is more so a convenience for
testing and validating dashboards without starting the whole application.
The following CLI flags are supported:
```md
-m, --module The PromEx module which will be used to render the dashboards.
This is needed to fetch any relevant assigns from the
`c:PromEx.dashboard_assigns/0` callback and to get the Grafana
configuration from app config.
-t, --timeout The timeout value defines how long the mix task will wait while
uploading dashboards.
```
"""
@shortdoc "Upload dashboards to Grafana"
use Mix.Task
alias Mix.Shell.IO
alias PromEx.DashboardUploader
@impl true
def run(args) do
# Compile the project
Mix.Task.run("compile")
# Get CLI args and set up uploader
%{module: prom_ex_module, timeout: timeout} = parse_options(args)
uploader_process_name = Mix.Tasks.PromEx.Publish.Uploader
"Elixir.#{prom_ex_module}"
|> String.to_atom()
|> Code.ensure_compiled()
|> case do
{:module, module} ->
module
{:error, reason} ->
raise "#{prom_ex_module} is not a valid PromEx module because #{inspect(reason)}"
end
|> check_grafana_configuration()
|> upload_dashboards(uploader_process_name, timeout)
end
defp parse_options(args) do
cli_options = [module: :string, timeout: :integer]
cli_aliases = [m: :module, t: :timeout]
# Parse out the arguments and put defaults where necessary
args
|> OptionParser.parse(aliases: cli_aliases, strict: cli_options)
|> case do
{options, _remaining_args, [] = _errors} ->
Map.new(options)
{_options, _remaining_args, errors} ->
raise "Invalid CLI args were provided: #{inspect(errors)}"
end
|> Map.put_new(:timeout, 10_000)
|> Map.put_new_lazy(:module, fn ->
Mix.Project.config()
|> Keyword.get(:app)
|> Atom.to_string()
|> Macro.camelize()
|> Kernel.<>(".PromEx")
end)
end
defp check_grafana_configuration(prom_ex_module) do
if prom_ex_module.init_opts().grafana_config == :disabled do
raise "#{prom_ex_module} has the Grafana option disabled. Please update your configuration and rerun."
end
prom_ex_module
end
defp upload_dashboards(prom_ex_module, uploader_process_name, timeout) do
# We don't want errors in DashboardUploader to kill the mix task
Process.flag(:trap_exit, true)
# Start the DashboardUploader
default_dashboard_opts = [otp_app: prom_ex_module.__otp_app__()]
{:ok, pid} =
DashboardUploader.start_link(
name: uploader_process_name,
prom_ex_module: prom_ex_module,
default_dashboard_opts: default_dashboard_opts
)
receive do
{:EXIT, ^pid, :normal} ->
IO.info("\nPromEx dashboard upload complete! Review the above statuses for each dashboard.")
{:EXIT, ^pid, error_reason} ->
IO.error(
"PromEx was unable to upload your dashboards to Grafana because:\n#{
Code.format_string!(inspect(error_reason))
}"
)
after
timeout ->
raise "PromEx timed out trying to upload your dashboards to Grafana"
end
end
end
|
lib/mix/tasks/prom_ex.dashboard.publish.ex
| 0.802594 | 0.72754 |
prom_ex.dashboard.publish.ex
|
starcoder
|
defmodule OT.Text.Scanner do
@moduledoc """
Enumerates over a pair of operations, yielding a full or partial component
from each
"""
alias OT.Text.{Component, Operation}
@typedoc "A type which is not to be split when iterating"
@type skip_type :: :delete | :insert | nil
@typedoc """
The input to the scannerβa tuple containing two operations
"""
@type input :: {Operation.t(), Operation.t()}
@typedoc """
An operation's next scanned full or partial component, and its resulting
tail operation
"""
@type operation_split :: {Component.t() | nil, Operation.t()}
@typedoc """
A tuple representing the new head component and tail operation of the two
operations being scanned over
"""
@type output :: {operation_split, operation_split}
@doc """
Given a pair of two operations, return the next two full or partial components
where the second component potentially affects the first.
A third parameter may be passed that specifies that components of a given
type are not to be split up: For example, when transforming operation `a`
over operation `b`, the insert operations from `a` should not be split in
order to preserve user intent.
When any operation's components are exhausted, it will be represented by
the tuple `{nil, []}`.
## Examples
iex> OT.Text.Scanner.next({[4, %{i: "Foo"}], [2]})
{{2, [2, %{i: "Foo"}]}, {2, []}}
iex> OT.Text.Scanner.next({[%{i: "Foo"}], [2]}, :insert)
{{%{i: "Foo"}, []}, {2, []}}
iex> OT.Text.Scanner.next({[%{d: "Foo"}], [2]})
{{%{d: "Fo"}, [%{d: "o"}]}, {2, []}}
"""
@spec next(input, skip_type) :: output
def next(input, skip_type \\ nil)
# Both operations are exhausted.
def next({[], []}, _), do: {{nil, []}, {nil, []}}
# Operation a is exhausted.
def next({[], [head_b | tail_b]}, _), do: {{nil, []}, {head_b, tail_b}}
# Operation b is exhausted.
def next({[head_a | tail_a], []}, _), do: {{head_a, tail_a}, {nil, []}}
def next(result = {[head_a | tail_a], [head_b | tail_b]}, skip_type) do
cond do
Component.no_op?(head_a) && Component.no_op?(head_b) ->
next({tail_a, tail_b}, skip_type)
Component.no_op?(head_a) ->
next({tail_a, [head_b | tail_b]}, skip_type)
Component.no_op?(head_b) ->
next({[head_a | tail_a], tail_b}, skip_type)
true ->
do_next(
result,
Component.compare(head_a, head_b),
Component.type(head_a) == skip_type
)
end
end
# A > B and is splittable, so split A
@spec do_next(input, Component.comparison(), boolean) :: output
defp do_next({[head_a | tail_a], [head_b | tail_b]}, :gt, false) do
{head_a, remainder_a} = Component.split(head_a, Component.length(head_b))
{{head_a, [remainder_a | tail_a]}, {head_b, tail_b}}
end
# B < A, so split B
defp do_next({[head_a | tail_a], [head_b | tail_b]}, :lt, _) do
{head_b, remainder_b} = Component.split(head_b, Component.length(head_a))
{{head_a, tail_a}, {head_b, [remainder_b | tail_b]}}
end
# A > B and is not splittlable, or A == B, so do not split
defp do_next({[head_a | tail_a], [head_b | tail_b]}, _, _) do
{{head_a, tail_a}, {head_b, tail_b}}
end
end
|
lib/ot/text/scanner.ex
| 0.910022 | 0.64072 |
scanner.ex
|
starcoder
|
defmodule Membrane.SRTP.Decryptor do
@moduledoc """
Converts SRTP packets to plain RTP.
Decryptor expects that buffers passed to `handle_process/4` have already parsed headers
in the metadata field as they contain information about header length. The header
length is needed to avoid parsing the header twice in case of any elements preceding
the decryptor needed the information to e.g. drop the packet before reaching the decryptor.
`ExLibSRTP` expects a valid SRTP packet containing the header, after decryption, the
payload binary again includes the header. The header's length simply allows stripping
the header without any additional parsing.
Requires adding [srtp](https://github.com/membraneframework/elixir_libsrtp) dependency to work.
"""
use Membrane.Filter
alias Membrane.Buffer
alias Membrane.RTP.Utils
require Membrane.Logger
def_input_pad :input, caps: :any, demand_unit: :buffers
def_output_pad :output, caps: :any
def_options policies: [
spec: [ExLibSRTP.Policy.t()],
description: """
List of SRTP policies to use for decrypting packets.
See `t:ExLibSRTP.Policy.t/0` for details.
"""
]
@impl true
def handle_init(%__MODULE__{policies: policies}) do
state = %{
policies: policies,
srtp: nil
}
{:ok, state}
end
@impl true
def handle_stopped_to_prepared(_ctx, state) do
srtp = ExLibSRTP.new()
state.policies
|> Bunch.listify()
|> Enum.each(&ExLibSRTP.add_stream(srtp, &1))
{:ok, %{state | srtp: srtp}}
end
@impl true
def handle_prepared_to_stopped(_ctx, state) do
{:ok, %{state | srtp: nil, policies: []}}
end
@impl true
def handle_event(_pad, %{handshake_data: handshake_data}, _ctx, %{policies: []} = state) do
{_local_keying_material, remote_keying_material, protection_profile} = handshake_data
{:ok, crypto_profile} =
ExLibSRTP.Policy.crypto_profile_from_dtls_srtp_protection_profile(protection_profile)
policy = %ExLibSRTP.Policy{
ssrc: :any_inbound,
key: remote_keying_material,
rtp: crypto_profile,
rtcp: crypto_profile
}
:ok = ExLibSRTP.add_stream(state.srtp, policy)
{{:ok, redemand: :output}, Map.put(state, :policies, [policy])}
end
@impl true
def handle_event(pad, other, ctx, state), do: super(pad, other, ctx, state)
@impl true
def handle_demand(:output, _size, :buffers, _ctx, %{policies: []} = state) do
{:ok, state}
end
@impl true
def handle_demand(:output, size, :buffers, _ctx, state) do
{{:ok, demand: {:input, size}}, state}
end
@impl true
def handle_process(:input, buffer, _ctx, state) do
%Buffer{
payload: payload,
metadata: %{
rtp: %{
has_padding?: has_padding?,
total_header_size: total_header_size
}
}
} = buffer
state.srtp
|> ExLibSRTP.unprotect(payload)
|> case do
{:ok, payload} ->
# decrypted payload contains the header that we can simply strip without any parsing as we know its length
<<_header::binary-size(total_header_size), payload::binary>> = payload
{:ok, {payload, _size}} = Utils.strip_padding(payload, has_padding?)
{{:ok, buffer: {:output, %Buffer{buffer | payload: payload}}}, state}
{:error, reason} ->
Membrane.Logger.warn("""
Couldn't unprotect srtp packet:
#{inspect(payload, limit: :infinity)}
Reason: #{inspect(reason)}. Ignoring packet.
""")
{:ok, state}
end
end
end
|
lib/membrane/srtp/decryptor.ex
| 0.851706 | 0.420064 |
decryptor.ex
|
starcoder
|
defmodule Dat.Hypercore.Placeholder do
def get_batch(feed, start, _end, {:config, timeout, value_encoding}) do
[]
end
def head(feed, {:config, timeout, value_encoding}) do
{:error, ''}
end
def download(feed, {:range, start, _end, linear}) do
{:error, ['']}
end
def undownload(feed, {:range, start, _end, linear}) do
{:error}
end
def signature(feed, {:index}) do
{:error, 'last_signed_block', 'signature'}
end
def verify(feed, signature) do
{:error, true}
end
def root_hashes(feed, index) do
{:error, [{'roots', 'index', 'size', 'hash'}]}
end
# total_number_of_downloaded_blocks_within_range
def downloaded(feed, start, _end) do
0
end
def has_local(feed, index) do
true
end
def has_local(feed, start, _end) do
true
end
def clear do
end
def close do
end
def seek do
end
def update do
end
def set_downloading do
end
def set_uploading do
end
# create_read_stream
# create_write_stream
def is_writable do
end
def is_readable do
end
def key do
end
def discovery_key do
end
def length do
end
def byte_length do
end
def stats do
end
def on_peer_add do
end
def on_peer_remove do
end
def on_peer_open do
end
def connected_peers do
end
def register_replication_extension do
end
def send() do
end
def broadcast() do
end
def on_ready() do
end
def on_error() do
end
def on_download() do
end
def on_upload() do
end
def on_sync() do
end
def on_close() do
end
end
defmodule Dat.Hypercore do
@moduledoc """
Documentation for Dat Hypercore.
"""
def start(_args) do
# the initial cluster members
members = Enum.map([:a@localhost, :b@localhost, :c@localhost], fn node -> { :rakv, node } end)
# an arbitrary cluster name
clusterName = <<"dat_hypercore">>
# the config passed to `init/1`, must be a `map`
config = %{}
# the machine configuration
machine = {:module, Dat.Hypercore.Machine, config}
# ensure ra is started
Application.ensure_all_started(:ra)
# start a cluster instance running the `ra_kv` machine
:ra.start_cluster(clusterName, machine, members)
end
## Client API
def new(serverid) do
:ra.process_command(serverid, {:new})
end
def get(serverid, key) do
:ra.process_command(serverid, {:get, key})
end
def set(serverid, key, feed = %Dat.Hypercore.Feed{}, index, config = %Dat.Hypercore.Config{}) do
:ra.process_command(serverid, {:set, key, feed, index, config})
end
def append(serverid, key, block) do
:ra.process_command(serverid, {:append, key, block})
end
end
|
lib/dat_hypercore.ex
| 0.655997 | 0.400691 |
dat_hypercore.ex
|
starcoder
|
defmodule Timex.PosixTimezone do
@moduledoc """
Used when parsing POSIX-TZ timezone rules.
"""
alias Timex.TimezoneInfo
defstruct name: nil,
std_abbr: nil,
std_offset: 0,
dst_abbr: nil,
dst_offset: nil,
dst_start: nil,
dst_end: nil
@type rule_bound ::
{{:julian_leap, 0..365}, Time.t()}
| {{:julian, 1..365}, Time.t()}
| {{:mwd, {month :: 1..12, week :: 1..5, day_of_week :: 0..6}}, Time.t()}
| nil
@type t :: %__MODULE__{
name: nil | String.t(),
std_abbr: nil | String.t(),
dst_abbr: nil | String.t(),
std_offset: integer(),
dst_offset: integer(),
dst_start: rule_bound(),
dst_end: rule_bound()
}
@doc """
Obtains a `NaiveDateTime` representing the start of DST for this zone.
Returns nil if there is no DST period.
"""
@spec dst_start(t, DateTime.t() | NaiveDateTime.t() | Date.t()) :: NaiveDateTime.t() | nil
def dst_start(posix_tz, date)
def dst_start(%__MODULE__{dst_start: nil}, _), do: nil
def dst_start(%__MODULE__{dst_start: dst_start}, %{year: year}) do
bound_to_naive_datetime(dst_start, year)
end
@doc """
Obtains a `NaiveDateTime` representing the end of DST for this zone.
Returns nil if there is no DST period.
"""
@spec dst_end(t, DateTime.t() | NaiveDateTime.t() | Date.t()) :: NaiveDateTime.t() | nil
def dst_end(posix_tz, date)
def dst_end(%__MODULE__{dst_end: nil}, _), do: nil
def dst_end(%__MODULE__{dst_end: dst_end}, %{year: year}) do
bound_to_naive_datetime(dst_end, year)
end
@doc """
Returns a `TimezoneInfo` struct representing this timezone for the given datetime
"""
@spec to_timezone_info(t, DateTime.t() | NaiveDateTime.t() | Date.t()) :: TimezoneInfo.t()
def to_timezone_info(%__MODULE__{} = tz, date) do
date = to_naive_datetime(date)
if is_dst?(tz, date) do
%TimezoneInfo{
full_name: tz.name,
abbreviation: tz.dst_abbr,
offset_std: tz.dst_offset,
offset_utc: tz.std_offset,
from: :min,
until: :max
}
else
%TimezoneInfo{
full_name: tz.name,
abbreviation: tz.std_abbr,
offset_std: 0,
offset_utc: tz.std_offset,
from: :min,
until: :max
}
end
end
@doc """
Returns a `Calendar.TimeZoneDatabase` compatible map, representing this timezone for the given datetime
"""
def to_period_for_date(%__MODULE__{} = tz, date) do
date = to_naive_datetime(date)
if is_dst?(tz, date) do
std_offset = tz.dst_offset
utc_offset = tz.std_offset
%{
std_offset: std_offset,
utc_offset: utc_offset,
zone_abbr: tz.dst_abbr,
time_zone: tz.name
}
else
%{std_offset: 0, utc_offset: tz.std_offset, zone_abbr: tz.std_abbr, time_zone: tz.name}
end
end
@doc """
Returns a boolean indicating if the datetime provided occurs during DST of the given POSIX timezone.
"""
@spec is_dst?(t, DateTime.t() | NaiveDateTime.t() | Date.t()) :: boolean
def is_dst?(%__MODULE__{} = tz, date) do
with %NaiveDateTime{} = dst_start <- dst_start(tz, date),
%NaiveDateTime{} = dst_end <- dst_end(tz, date) do
cond do
NaiveDateTime.compare(date, dst_start) == :lt ->
false
NaiveDateTime.compare(date, dst_end) == :gt ->
false
:else ->
true
end
else
nil ->
false
end
end
defp bound_to_naive_datetime({{:mwd, month, week, weekday}, time}, year) do
month_start = Timex.Date.new!(year, month, 1)
month_start_dow = Timex.Date.day_of_week(month_start, :sunday) - 1
if weekday == month_start_dow and week == 1 do
# Got lucky, we're done
Timex.NaiveDateTime.new!(month_start, time)
else
first_week_date =
if month_start_dow <= weekday do
# The week starting on the 1st includes our weekday, so it is the first week of the month
%{month_start | day: month_start.day + (weekday - month_start_dow)}
else
# The week starting on the 1st does not include our weekday, so shift forward a week
eow = Timex.Date.end_of_week(month_start)
%{eow | day: eow.day + 1 + weekday}
end
cond do
week == 1 ->
first_week_date
:else ->
day_shift = (week - 1) * 7
day = first_week_date.day + day_shift
ldom = :calendar.last_day_of_the_month(year, month)
date =
if ldom > day do
# Last occurrence is in week 4, so shift back a week
%{first_week_date | day: day - 7}
else
%{first_week_date | day: day}
end
Timex.NaiveDateTime.new!(date, time)
end
end
end
defp bound_to_naive_datetime({{:julian, day}, time}, year) do
date = Timex.Calendar.Julian.date_for_day_of_year(day - 1, year, leaps: false)
Timex.NaiveDateTime.new!(date, time)
end
defp bound_to_naive_datetime({{:julian_leap, day}, time}, year) do
date = Timex.Calendar.Julian.date_for_day_of_year(day, year, leaps: true)
Timex.NaiveDateTime.new!(date, time)
end
defp to_naive_datetime(%NaiveDateTime{} = date), do: date
defp to_naive_datetime(%DateTime{} = date), do: DateTime.to_naive(date)
defp to_naive_datetime(%Date{} = date), do: Timex.NaiveDateTime.new!(date, ~T[12:00:00])
end
|
lib/timezone/posix_timezone.ex
| 0.876311 | 0.4917 |
posix_timezone.ex
|
starcoder
|
defmodule AWS.AppStream do
@moduledoc """
Amazon AppStream 2.0
This is the *Amazon AppStream 2.0 API Reference*. This reference provides
descriptions and syntax for each of the actions and data types in AppStream
2.0. AppStream 2.0 is a fully managed application streaming service. You
centrally manage your desktop applications on AppStream 2.0 and securely
deliver them to any computer. AppStream 2.0 manages the AWS resources
required to host and run your applications, scales automatically, and
provides access to your users on demand.
To learn more about AppStream 2.0, see the following resources:
<ul> <li> [Amazon AppStream 2.0 product
page](http://aws.amazon.com/appstream2)
</li> <li> [Amazon AppStream 2.0
documentation](http://aws.amazon.com/documentation/appstream2)
</li> </ul>
"""
@doc """
Associates the specified fleet with the specified stack.
"""
def associate_fleet(client, input, options \\ []) do
request(client, "AssociateFleet", input, options)
end
@doc """
Associates the specified users with the specified stacks. Users in a user
pool cannot be assigned to stacks with fleets that are joined to an Active
Directory domain.
"""
def batch_associate_user_stack(client, input, options \\ []) do
request(client, "BatchAssociateUserStack", input, options)
end
@doc """
Disassociates the specified users from the specified stacks.
"""
def batch_disassociate_user_stack(client, input, options \\ []) do
request(client, "BatchDisassociateUserStack", input, options)
end
@doc """
Copies the image within the same region or to a new region within the same
AWS account. Note that any tags you added to the image will not be copied.
"""
def copy_image(client, input, options \\ []) do
request(client, "CopyImage", input, options)
end
@doc """
Creates a Directory Config object in AppStream 2.0. This object includes
the information required to join streaming instances to an Active Directory
domain.
"""
def create_directory_config(client, input, options \\ []) do
request(client, "CreateDirectoryConfig", input, options)
end
@doc """
Creates a fleet. A fleet consists of streaming instances that run a
specified image.
"""
def create_fleet(client, input, options \\ []) do
request(client, "CreateFleet", input, options)
end
@doc """
Creates an image builder. An image builder is a virtual machine that is
used to create an image.
The initial state of the builder is `PENDING`. When it is ready, the state
is `RUNNING`.
"""
def create_image_builder(client, input, options \\ []) do
request(client, "CreateImageBuilder", input, options)
end
@doc """
Creates a URL to start an image builder streaming session.
"""
def create_image_builder_streaming_u_r_l(client, input, options \\ []) do
request(client, "CreateImageBuilderStreamingURL", input, options)
end
@doc """
Creates a stack to start streaming applications to users. A stack consists
of an associated fleet, user access policies, and storage configurations.
"""
def create_stack(client, input, options \\ []) do
request(client, "CreateStack", input, options)
end
@doc """
Creates a temporary URL to start an AppStream 2.0 streaming session for the
specified user. A streaming URL enables application streaming to be tested
without user setup.
"""
def create_streaming_u_r_l(client, input, options \\ []) do
request(client, "CreateStreamingURL", input, options)
end
@doc """
Creates a new user in the user pool.
"""
def create_user(client, input, options \\ []) do
request(client, "CreateUser", input, options)
end
@doc """
Deletes the specified Directory Config object from AppStream 2.0. This
object includes the information required to join streaming instances to an
Active Directory domain.
"""
def delete_directory_config(client, input, options \\ []) do
request(client, "DeleteDirectoryConfig", input, options)
end
@doc """
Deletes the specified fleet.
"""
def delete_fleet(client, input, options \\ []) do
request(client, "DeleteFleet", input, options)
end
@doc """
Deletes the specified image. You cannot delete an image when it is in use.
After you delete an image, you cannot provision new capacity using the
image.
"""
def delete_image(client, input, options \\ []) do
request(client, "DeleteImage", input, options)
end
@doc """
Deletes the specified image builder and releases the capacity.
"""
def delete_image_builder(client, input, options \\ []) do
request(client, "DeleteImageBuilder", input, options)
end
@doc """
Deletes permissions for the specified private image. After you delete
permissions for an image, AWS accounts to which you previously granted
these permissions can no longer use the image.
"""
def delete_image_permissions(client, input, options \\ []) do
request(client, "DeleteImagePermissions", input, options)
end
@doc """
Deletes the specified stack. After the stack is deleted, the application
streaming environment provided by the stack is no longer available to
users. Also, any reservations made for application streaming sessions for
the stack are released.
"""
def delete_stack(client, input, options \\ []) do
request(client, "DeleteStack", input, options)
end
@doc """
Deletes a user from the user pool.
"""
def delete_user(client, input, options \\ []) do
request(client, "DeleteUser", input, options)
end
@doc """
Retrieves a list that describes one or more specified Directory Config
objects for AppStream 2.0, if the names for these objects are provided.
Otherwise, all Directory Config objects in the account are described. These
objects include the information required to join streaming instances to an
Active Directory domain.
Although the response syntax in this topic includes the account password,
this password is not returned in the actual response.
"""
def describe_directory_configs(client, input, options \\ []) do
request(client, "DescribeDirectoryConfigs", input, options)
end
@doc """
Retrieves a list that describes one or more specified fleets, if the fleet
names are provided. Otherwise, all fleets in the account are described.
"""
def describe_fleets(client, input, options \\ []) do
request(client, "DescribeFleets", input, options)
end
@doc """
Retrieves a list that describes one or more specified image builders, if
the image builder names are provided. Otherwise, all image builders in the
account are described.
"""
def describe_image_builders(client, input, options \\ []) do
request(client, "DescribeImageBuilders", input, options)
end
@doc """
Retrieves a list that describes the permissions for shared AWS account IDs
on a private image that you own.
"""
def describe_image_permissions(client, input, options \\ []) do
request(client, "DescribeImagePermissions", input, options)
end
@doc """
Retrieves a list that describes one or more specified images, if the image
names or image ARNs are provided. Otherwise, all images in the account are
described.
"""
def describe_images(client, input, options \\ []) do
request(client, "DescribeImages", input, options)
end
@doc """
Retrieves a list that describes the active streaming sessions for a
specified stack and fleet. If a value for `UserId` is provided for the
stack and fleet, only streaming sessions for that user are described. If an
authentication type is not provided, the default is to authenticate users
using a streaming URL.
"""
def describe_sessions(client, input, options \\ []) do
request(client, "DescribeSessions", input, options)
end
@doc """
Retrieves a list that describes one or more specified stacks, if the stack
names are provided. Otherwise, all stacks in the account are described.
"""
def describe_stacks(client, input, options \\ []) do
request(client, "DescribeStacks", input, options)
end
@doc """
Retrieves a list that describes the UserStackAssociation objects. You must
specify either or both of the following:
<ul> <li> The stack name
</li> <li> The user name (email address of the user associated with the
stack) and the authentication type for the user
</li> </ul>
"""
def describe_user_stack_associations(client, input, options \\ []) do
request(client, "DescribeUserStackAssociations", input, options)
end
@doc """
Retrieves a list that describes one or more specified users in the user
pool.
"""
def describe_users(client, input, options \\ []) do
request(client, "DescribeUsers", input, options)
end
@doc """
Disables the specified user in the user pool. Users can't sign in to
AppStream 2.0 until they are re-enabled. This action does not delete the
user.
"""
def disable_user(client, input, options \\ []) do
request(client, "DisableUser", input, options)
end
@doc """
Disassociates the specified fleet from the specified stack.
"""
def disassociate_fleet(client, input, options \\ []) do
request(client, "DisassociateFleet", input, options)
end
@doc """
Enables a user in the user pool. After being enabled, users can sign in to
AppStream 2.0 and open applications from the stacks to which they are
assigned.
"""
def enable_user(client, input, options \\ []) do
request(client, "EnableUser", input, options)
end
@doc """
Immediately stops the specified streaming session.
"""
def expire_session(client, input, options \\ []) do
request(client, "ExpireSession", input, options)
end
@doc """
Retrieves the name of the fleet that is associated with the specified
stack.
"""
def list_associated_fleets(client, input, options \\ []) do
request(client, "ListAssociatedFleets", input, options)
end
@doc """
Retrieves the name of the stack with which the specified fleet is
associated.
"""
def list_associated_stacks(client, input, options \\ []) do
request(client, "ListAssociatedStacks", input, options)
end
@doc """
Retrieves a list of all tags for the specified AppStream 2.0 resource. You
can tag AppStream 2.0 image builders, images, fleets, and stacks.
For more information about tags, see [Tagging Your
Resources](https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html)
in the *Amazon AppStream 2.0 Developer Guide*.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Starts the specified fleet.
"""
def start_fleet(client, input, options \\ []) do
request(client, "StartFleet", input, options)
end
@doc """
Starts the specified image builder.
"""
def start_image_builder(client, input, options \\ []) do
request(client, "StartImageBuilder", input, options)
end
@doc """
Stops the specified fleet.
"""
def stop_fleet(client, input, options \\ []) do
request(client, "StopFleet", input, options)
end
@doc """
Stops the specified image builder.
"""
def stop_image_builder(client, input, options \\ []) do
request(client, "StopImageBuilder", input, options)
end
@doc """
Adds or overwrites one or more tags for the specified AppStream 2.0
resource. You can tag AppStream 2.0 image builders, images, fleets, and
stacks.
Each tag consists of a key and an optional value. If a resource already has
a tag with the same key, this operation updates its value.
To list the current tags for your resources, use `ListTagsForResource`. To
disassociate tags from your resources, use `UntagResource`.
For more information about tags, see [Tagging Your
Resources](https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html)
in the *Amazon AppStream 2.0 Developer Guide*.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Disassociates one or more specified tags from the specified AppStream 2.0
resource.
To list the current tags for your resources, use `ListTagsForResource`.
For more information about tags, see [Tagging Your
Resources](https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html)
in the *Amazon AppStream 2.0 Developer Guide*.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates the specified Directory Config object in AppStream 2.0. This object
includes the information required to join streaming instances to an Active
Directory domain.
"""
def update_directory_config(client, input, options \\ []) do
request(client, "UpdateDirectoryConfig", input, options)
end
@doc """
Updates the specified fleet.
If the fleet is in the `STOPPED` state, you can update any attribute except
the fleet name. If the fleet is in the `RUNNING` state, you can update the
`DisplayName` and `ComputeCapacity` attributes. If the fleet is in the
`STARTING` or `STOPPING` state, you can't update it.
"""
def update_fleet(client, input, options \\ []) do
request(client, "UpdateFleet", input, options)
end
@doc """
Adds or updates permissions for the specified private image.
"""
def update_image_permissions(client, input, options \\ []) do
request(client, "UpdateImagePermissions", input, options)
end
@doc """
Updates the specified fields for the specified stack.
"""
def update_stack(client, input, options \\ []) do
request(client, "UpdateStack", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "appstream2"}
host = get_host("appstream2", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "PhotonAdminProxyService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/appstream.ex
| 0.850748 | 0.541954 |
appstream.ex
|
starcoder
|
defmodule FinTex.Tan.StartCode do
@moduledoc false
alias FinTex.Helper.Conversion
alias FinTex.Tan.DataElement
import Conversion
defdelegate render_data(m), to: DataElement
defdelegate bitsum(n, bits), to: DataElement
@bit_controlbyte 7
@type t :: %__MODULE__{
version: :hhd13 | :hhd14,
length: non_neg_integer,
lde: non_neg_integer,
control_bytes: [non_neg_integer],
data: String.t
}
defstruct [
:version,
:length,
:lde,
:control_bytes,
:data
]
def new(code) when is_binary(code) do
{lde, code} = code |> String.split_at(2)
{lde, _} = lde |> Integer.parse(16)
len = lde |> bitsum(5)
{{control_bytes, code}, version} = if lde |> bit?(@bit_controlbyte) do
{parse_control_bytes(code), :hhd14}
else
{{[], code}, :hhd13}
end
{data, code} = code |> String.split_at(len)
m = %__MODULE__{
version: version,
length: len,
lde: lde,
control_bytes: control_bytes,
data: data
}
{m, code}
end
def render_length(m = %{version: version, control_bytes: control_bytes}) do
s = DataElement.render_length(m, version)
if version == :hhd13 || control_bytes |> Enum.empty? do
s
else
reincode(m, s)
end
end
defp reincode(%{control_bytes: control_bytes}, s) do
use Bitwise
{len, _} = s |> Integer.parse(16)
len = if control_bytes |> Enum.count > 0 do
len + (1 <<< @bit_controlbyte)
else
len
end
len |> to_hex(2)
end
defp bit?(n, bit) when is_integer(n) and is_integer(bit) do
use Bitwise
n |> band(1 <<< bit) != 0
end
defp parse_control_bytes(bytes \\ [], code, counter \\ 0)
defp parse_control_bytes(bytes, code, 9) do
{bytes, code}
end
defp parse_control_bytes(bytes, code, counter) do
{control_byte, code} = code |> String.split_at(2)
{control_byte, _} = control_byte |> Integer.parse(16)
bytes = bytes ++ [control_byte]
if control_byte |> bit?(@bit_controlbyte) do
parse_control_bytes(bytes, code, counter + 1)
else
{bytes, code}
end
end
end
|
lib/tan/start_code.ex
| 0.504639 | 0.475179 |
start_code.ex
|
starcoder
|
defmodule Legion.Messaging.Switching.Globals do
@moduledoc """
Provides functions for altering/retrieving global switches to messaging.
**This module is NOT transaction-safe.**
## Enabling/disabling mediums
Suppose you need to disable the a medium globally. You might use `enable_medium/2` and
`disable_medium/2` functions to alter the runtime configuration.
enable_medium(some_user_or_id, :apm)
disable_medium(some_user_or_id, :apm)
Or, rather you can use convenience macros if you `require` them in your module.
require Legion.Messaging.Switching.Globals
enable_apm_medium(some_user_or_id)
disable_apm_medium(some_user_or_id)
Notice that, the underlying implementation will not insert a new registry entry if value for the
setting has not changed. Hence, calling those functions multiple times will not perform any write
operation.
## Redirecting a medium to another medium
Sometimes you may want to redirect a messaging medium to another medium, probably due to cost
reduction and integration maintenance.
You can redirect a medium to another medium with the following call.
redirect_medium(some_user_or_id, :mailing, :apm, for: 3_600)
The above API call will redirect all mailing messages to APM medium.
However, while sending a message, you might opt for restricting redirections on such operations.
send_sms_message(some_user_or_id, "this is the message", redirection: :restrict)
The message will not be sent to the user no matter what, what is more, it will throw an error to the user.
Some messages, like one-time-codes, should not be redirected to another medium.
The user of the messaging API can also force the actual medium to be run instead of redirection.
send_sms_message(some_user_or_id, "some pretty otc", redirection: :ignore)
If there was a redirection, it will be ignored, although the same rules for enabling/disabling medium for the actual
medium will be still applied.
"""
import Legion.Messaging.Message, only: :macros
import Legion.Messaging.Settings
alias Legion.Messaging.Message.Medium
alias Legion.Identity.Information.Registration, as: User
@apm_env Application.get_env(:legion, Legion.Messaging.Medium.APM)
@apm_state Keyword.fetch!(@apm_env, :is_enabled?)
@push_env Application.get_env(:legion, Legion.Messaging.Medium.Push)
@push_state Keyword.fetch!(@push_env, :is_enabled?)
@mailing_env Application.get_env(:legion, Legion.Messaging.Medium.Mailing)
@mailing_state Keyword.fetch!(@mailing_env, :is_enabled?)
@sms_env Application.get_env(:legion, Legion.Messaging.Medium.SMS)
@sms_state Keyword.fetch!(@sms_env, :is_enabled?)
@platform_env Application.get_env(:legion, Legion.Messaging.Medium.Platform)
@platform_state Keyword.fetch!(@platform_env, :is_enabled?)
@env Application.get_env(:legion, Legion.Messaging.Switching.Globals)
@history_buffer_len Keyword.fetch!(@env, :history_buffer_length)
@available_pushes Medium.__enum_map__()
@doc """
Enables APM medium.
This macro curries the `enable_medium/2` function with corresponding medium.
"""
defmacro enable_apm_medium(user_or_id),
do: quote(do: enable_medium(unquote(user_or_id), :apm))
@doc """
Enables push medium.
This macro curries the `enable_medium/2` function with corresponding medium.
"""
defmacro enable_push_medium(user_or_id),
do: quote(do: enable_medium(unquote(user_or_id), :push))
@doc """
Enables mailing medium.
This macro curries the `enable_medium/2` function with corresponding medium.
"""
defmacro enable_mailing_medium(user_or_id),
do: quote(do: enable_medium(unquote(user_or_id), :mailing))
@doc """
Enables SMS medium.
This macro curries the `enable_medium/2` function with corresponding medium.
"""
defmacro enable_sms_medium(user_or_id),
do: quote(do: enable_medium(unquote(user_or_id), :sms))
@doc """
Enables platform medium.
This macro curries the `enable_medium/2` function with corresponding medium.
"""
defmacro enable_platform_medium(user_or_id),
do: quote(do: enable_medium(unquote(user_or_id), :platform))
@doc """
Disables in-platform messaging medium.
This macro curries the `disable_medium/2` function with corresponding medium.
"""
defmacro disable_apm_medium(user_or_id),
do: quote(do: disable_medium(unquote(user_or_id), :apm))
@doc """
Disables push medium.
This macro curries the `disable_medium/2` function with corresponding medium.
"""
defmacro disable_push_medium(user_or_id),
do: quote(do: disable_medium(unquote(user_or_id), :push))
@doc """
Disables mailing medium.
This macro curries the `disable_medium/2` function with corresponding medium.
"""
defmacro disable_mailing_medium(user_or_id),
do: quote(do: disable_medium(unquote(user_or_id), :mailing))
@doc """
Disables SMS medium.
This macro curries the `disable_medium/2` function with corresponding medium.
"""
defmacro disable_sms_medium(user_or_id),
do: quote(do: disable_medium(unquote(user_or_id), :sms))
@doc """
Disables in-platform messaging medium.
This macro curries the `disable_medium/2` function with corresponding medium.
"""
defmacro disable_platform_medium(user_or_id),
do: quote(do: disable_medium(unquote(user_or_id), :platform))
@doc """
Enables given medium globally.
"""
@spec enable_medium(User.user_or_id(), Medium.t()) ::
:ok
| :error
def enable_medium(user_or_id, medium) when is_medium(medium),
do: set_medium_availability(user_or_id, medium, true)
@doc """
Disables given medium globally.
"""
@spec disable_medium(User.user_or_id(), Medium.t()) ::
:ok
| :error
def disable_medium(user_or_id, medium) when is_medium(medium),
do: set_medium_availability(user_or_id, medium, false)
@doc """
Returns a boolean value indicating if medium is enabled globally.
"""
def is_medium_enabled?(medium) when is_medium(medium) do
medium
|> medium_availability_key()
|> get(%{"next_value" => initial_availability(medium)})
|> Map.get("next_value")
end
for type <- @available_pushes do
defp initial_availability(unquote(type)) do
unquote(Module.get_attribute(__MODULE__, :"#{Atom.to_string(type)}_state"))
end
end
defp set_medium_availability(user, medium, availability)
when is_boolean(availability) and is_medium(medium) do
if is_medium_enabled?(medium) == availability do
:ok
else
key = medium_availability_key(medium)
put(user, key, %{next_value: availability})
end
end
@doc """
Redirects a medium to another medium.
## Examples
redirect_medium(user_id, :apm, :push) # redirects APM medium to push medium
redirect_medium(user_id, :apm, :platform) # redirects APM medium to platform medium
## Timed redirections
You may also redirect a medium to another medium for a given amount of time.
redirect_medium(user_id, :apm, :push, for: 3_600) # redirects APM medium to push medium for an hour
## Deferring redirections
Redirections could be also deferred for a given amount of time.
redirect_medium(user_id, :apm, :push, after: 3_600) # redirects APM medium to push medium after an hour
Redirections could be both deferred and timed.
The following usage implies both applications.
redirect_medium(user_id, :apm, :push, after: 3_600, for: 6_400) # same redirection, but active after an hour for two hours
Note that redirections can override each other.
The user interface for performing redirections should prompt whether the user is aware of overriding an existing redirection.
redirect_medium(user_id, :apm, :push) # redirects APM medium to push medium
redirect_medium(user_id, :apm, :mailing, for: 3_600) # redirects APM medium to mailing medium for an hour, afterwards push redirection will be active until further cancellation
See `cancel_redirection_for_medium/3` for cancelling redirections.
"""
@spec redirect_medium(User.user_or_id(), Medium.t(), Medium.t(), Keyword.t()) ::
:ok
| {:error, :invalid_duration}
| {:error, :invalid_deferral}
| {:error, :unavailable}
def redirect_medium(user_or_id, from, to, options \\ [])
when is_medium(from) and is_medium(to) do
key = medium_redirection_key(from)
valid_for = Keyword.get(options, :for, nil)
valid_after = Keyword.get(options, :after, 0)
cond do
valid_for < 0 ->
{:error, :invalid_duration}
valid_after < 0 ->
{:error, :invalid_deferral}
true ->
put(user_or_id, key, %{
action: :redirect,
to: to,
valid_for: valid_for,
valid_after: valid_after
})
end
end
@doc """
Cancels redirection setting currently applied for specified medium.
## Examples
redirect_medium(user_or_id, :apm, :mailing, for: 3_600) # redirects APM medium to mailing medium for an hour
cancel_redirection_for_medium(user_or_id, :apm) # all redirections for the APM medium are now cancelled
See `redirect_medium/4` for making redirections.
"""
@spec cancel_redirection_for_medium(User.user_or_id(), Medium.t()) ::
:ok
| {:error, :no_entry}
| {:error, :unavailable}
def cancel_redirection_for_medium(user_or_id, medium)
when is_medium(medium) do
if is_medium_redirected?(medium) do
key = medium_redirection_key(medium)
put(user_or_id, key, %{action: :cancel})
else
{:error, :no_entry}
end
end
@doc """
Returns true if given medium is redirected currently, otherwise false.
"""
@spec is_medium_redirected?(Medium.t()) :: boolean()
def is_medium_redirected?(medium)
when is_medium(medium) do
redirection_for_medium(medium) != nil
end
@doc """
Returns the redirection medium for the medium in given timestamp, if it is redirected.
Otherwise, returns `nil`.
"""
@spec redirection_for_medium(Medium.t(), NaiveDateTime.t()) :: Medium.t() | nil
def redirection_for_medium(medium, timestamp \\ NaiveDateTime.utc_now())
when is_medium(medium) do
key = medium_redirection_key(medium)
entries = take(key, @history_buffer_len)
case find_affecting_redirection(entries, timestamp) do
nil ->
nil
{%{"to" => medium}, _inserted_at} ->
String.to_existing_atom(medium)
end
end
defp find_affecting_redirection([head | tail], timestamp) do
if is_redirection_active(head, timestamp),
do: head,
else: find_affecting_redirection(tail, timestamp)
end
defp find_affecting_redirection([], _), do: nil
defp is_redirection_active({entry, inserted_at}, timestamp) do
valid_for = Map.get(entry, "valid_for")
valid_after = Map.get(entry, "valid_after", 0)
activation_time = NaiveDateTime.add(inserted_at, valid_after)
if NaiveDateTime.compare(timestamp, activation_time) in [:gt, :eq] do
if valid_for do
valid_until = NaiveDateTime.add(activation_time, valid_for)
NaiveDateTime.compare(timestamp, valid_until) == :lt
else
true
end
else
false
end
end
defp medium_availability_key(medium) when is_medium(medium),
do: "Messaging.Switching.Globals.is_#{Atom.to_string(medium)}_enabled?"
defp medium_redirection_key(medium) when is_medium(medium),
do: "Messaging.Switching.Globals.#{Atom.to_string(medium)}_redirection"
end
|
apps/legion/lib/messaging/switching/globals.ex
| 0.804329 | 0.481515 |
globals.ex
|
starcoder
|
defmodule AWS.MigrationHub do
@moduledoc """
The AWS Migration Hub API methods help to obtain server and application
migration status and integrate your resource-specific migration tool by
providing a programmatic interface to Migration Hub.
Remember that you must set your AWS Migration Hub home region before you
call any of these APIs, or a `HomeRegionNotSetException` error will be
returned. Also, you must make the API calls while in your home region.
"""
@doc """
Associates a created artifact of an AWS cloud resource, the target
receiving the migration, with the migration task performed by a migration
tool. This API has the following traits:
<ul> <li> Migration tools can call the `AssociateCreatedArtifact` operation
to indicate which AWS artifact is associated with a migration task.
</li> <li> The created artifact name must be provided in ARN (Amazon
Resource Name) format which will contain information about type and region;
for example: `arn:aws:ec2:us-east-1:488216288981:image/ami-6d0ba87b`.
</li> <li> Examples of the AWS resource behind the created artifact are,
AMI's, EC2 instance, or DMS endpoint, etc.
</li> </ul>
"""
def associate_created_artifact(client, input, options \\ []) do
request(client, "AssociateCreatedArtifact", input, options)
end
@doc """
Associates a discovered resource ID from Application Discovery Service with
a migration task.
"""
def associate_discovered_resource(client, input, options \\ []) do
request(client, "AssociateDiscoveredResource", input, options)
end
@doc """
Creates a progress update stream which is an AWS resource used for access
control as well as a namespace for migration task names that is implicitly
linked to your AWS account. It must uniquely identify the migration tool as
it is used for all updates made by the tool; however, it does not need to
be unique for each AWS account because it is scoped to the AWS account.
"""
def create_progress_update_stream(client, input, options \\ []) do
request(client, "CreateProgressUpdateStream", input, options)
end
@doc """
Deletes a progress update stream, including all of its tasks, which was
previously created as an AWS resource used for access control. This API has
the following traits:
<ul> <li> The only parameter needed for `DeleteProgressUpdateStream` is the
stream name (same as a `CreateProgressUpdateStream` call).
</li> <li> The call will return, and a background process will
asynchronously delete the stream and all of its resources (tasks,
associated resources, resource attributes, created artifacts).
</li> <li> If the stream takes time to be deleted, it might still show up
on a `ListProgressUpdateStreams` call.
</li> <li> `CreateProgressUpdateStream`, `ImportMigrationTask`,
`NotifyMigrationTaskState`, and all Associate[*] APIs related to the tasks
belonging to the stream will throw "InvalidInputException" if the stream of
the same name is in the process of being deleted.
</li> <li> Once the stream and all of its resources are deleted,
`CreateProgressUpdateStream` for a stream of the same name will succeed,
and that stream will be an entirely new logical resource (without any
resources associated with the old stream).
</li> </ul>
"""
def delete_progress_update_stream(client, input, options \\ []) do
request(client, "DeleteProgressUpdateStream", input, options)
end
@doc """
Gets the migration status of an application.
"""
def describe_application_state(client, input, options \\ []) do
request(client, "DescribeApplicationState", input, options)
end
@doc """
Retrieves a list of all attributes associated with a specific migration
task.
"""
def describe_migration_task(client, input, options \\ []) do
request(client, "DescribeMigrationTask", input, options)
end
@doc """
Disassociates a created artifact of an AWS resource with a migration task
performed by a migration tool that was previously associated. This API has
the following traits:
<ul> <li> A migration user can call the `DisassociateCreatedArtifacts`
operation to disassociate a created AWS Artifact from a migration task.
</li> <li> The created artifact name must be provided in ARN (Amazon
Resource Name) format which will contain information about type and region;
for example: `arn:aws:ec2:us-east-1:488216288981:image/ami-6d0ba87b`.
</li> <li> Examples of the AWS resource behind the created artifact are,
AMI's, EC2 instance, or RDS instance, etc.
</li> </ul>
"""
def disassociate_created_artifact(client, input, options \\ []) do
request(client, "DisassociateCreatedArtifact", input, options)
end
@doc """
Disassociate an Application Discovery Service discovered resource from a
migration task.
"""
def disassociate_discovered_resource(client, input, options \\ []) do
request(client, "DisassociateDiscoveredResource", input, options)
end
@doc """
Registers a new migration task which represents a server, database, etc.,
being migrated to AWS by a migration tool.
This API is a prerequisite to calling the `NotifyMigrationTaskState` API as
the migration tool must first register the migration task with Migration
Hub.
"""
def import_migration_task(client, input, options \\ []) do
request(client, "ImportMigrationTask", input, options)
end
@doc """
Lists all the migration statuses for your applications. If you use the
optional `ApplicationIds` parameter, only the migration statuses for those
applications will be returned.
"""
def list_application_states(client, input, options \\ []) do
request(client, "ListApplicationStates", input, options)
end
@doc """
Lists the created artifacts attached to a given migration task in an update
stream. This API has the following traits:
<ul> <li> Gets the list of the created artifacts while migration is taking
place.
</li> <li> Shows the artifacts created by the migration tool that was
associated by the `AssociateCreatedArtifact` API.
</li> <li> Lists created artifacts in a paginated interface.
</li> </ul>
"""
def list_created_artifacts(client, input, options \\ []) do
request(client, "ListCreatedArtifacts", input, options)
end
@doc """
Lists discovered resources associated with the given `MigrationTask`.
"""
def list_discovered_resources(client, input, options \\ []) do
request(client, "ListDiscoveredResources", input, options)
end
@doc """
Lists all, or filtered by resource name, migration tasks associated with
the user account making this call. This API has the following traits:
<ul> <li> Can show a summary list of the most recent migration tasks.
</li> <li> Can show a summary list of migration tasks associated with a
given discovered resource.
</li> <li> Lists migration tasks in a paginated interface.
</li> </ul>
"""
def list_migration_tasks(client, input, options \\ []) do
request(client, "ListMigrationTasks", input, options)
end
@doc """
Lists progress update streams associated with the user account making this
call.
"""
def list_progress_update_streams(client, input, options \\ []) do
request(client, "ListProgressUpdateStreams", input, options)
end
@doc """
Sets the migration state of an application. For a given application
identified by the value passed to `ApplicationId`, its status is set or
updated by passing one of three values to `Status`: `NOT_STARTED |
IN_PROGRESS | COMPLETED`.
"""
def notify_application_state(client, input, options \\ []) do
request(client, "NotifyApplicationState", input, options)
end
@doc """
Notifies Migration Hub of the current status, progress, or other detail
regarding a migration task. This API has the following traits:
<ul> <li> Migration tools will call the `NotifyMigrationTaskState` API to
share the latest progress and status.
</li> <li> `MigrationTaskName` is used for addressing updates to the
correct target.
</li> <li> `ProgressUpdateStream` is used for access control and to provide
a namespace for each migration tool.
</li> </ul>
"""
def notify_migration_task_state(client, input, options \\ []) do
request(client, "NotifyMigrationTaskState", input, options)
end
@doc """
Provides identifying details of the resource being migrated so that it can
be associated in the Application Discovery Service repository. This
association occurs asynchronously after `PutResourceAttributes` returns.
<important> <ul> <li> Keep in mind that subsequent calls to
PutResourceAttributes will override previously stored attributes. For
example, if it is first called with a MAC address, but later, it is desired
to *add* an IP address, it will then be required to call it with *both* the
IP and MAC addresses to prevent overriding the MAC address.
</li> <li> Note the instructions regarding the special use case of the [
`ResourceAttributeList`
](https://docs.aws.amazon.com/migrationhub/latest/ug/API_PutResourceAttributes.html#migrationhub-PutResourceAttributes-request-ResourceAttributeList)
parameter when specifying any "VM" related value.
</li> </ul> </important> <note> Because this is an asynchronous call, it
will always return 200, whether an association occurs or not. To confirm if
an association was found based on the provided details, call
`ListDiscoveredResources`.
</note>
"""
def put_resource_attributes(client, input, options \\ []) do
request(client, "PutResourceAttributes", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "mgh"}
host = build_host("mgh", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSMigrationHub.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/migration_hub.ex
| 0.873012 | 0.472014 |
migration_hub.ex
|
starcoder
|
if Code.ensure_loaded?(Ecto.Type) do
defmodule Cldr.UnitWithUsage.Ecto.Map.Type do
@moduledoc """
Implements Ecto.Type behaviour for `Cldr.Unit`, where the underlying schema type
is a map.
This is the required option for databases such as MySQL that do not support
composite types.
In order to preserve precision, the value is serialized as a string since the
JSON representation of a numeric value is either an integer or a float.
`Decimal.to_string/1` is not guaranteed to produce a string that will round-trip
convert back to the identical number. However given enough precision in the
[Decimal context](https://hexdocs.pm/decimal/Decimal.Context.html#content) then
round trip conversion should be expected. The default precision in the context
is 28 digits.
"""
@behaviour Ecto.Type
defdelegate cast(unit), to: Cldr.Unit.Ecto.Composite.Type
# New for ecto_sql 3.2
defdelegate embed_as(term), to: Cldr.Unit.Ecto.Composite.Type
defdelegate equal?(term1, term2), to: Cldr.Unit.Ecto.Composite.Type
def type() do
:map
end
# "New" values with usage
def load(%{"unit" => unit_name, "value" => value, "usage" => usage}) when is_binary(value) do
with {value, ""} <- Cldr.Decimal.parse(value),
{:ok, unit} <- Cldr.Unit.new(unit_name, value, usage: usage) do
{:ok, unit}
else
_ -> :error
end
end
# "New" values with usage
def load(%{"unit" => unit_name, "value" => value, "usage" => usage}) when is_integer(value) do
with {:ok, unit} <- Cldr.Unit.new(unit_name, value, usage: usage) do
{:ok, unit}
else
_ -> :error
end
end
# "Old" values
def load(%{"unit" => unit_name, "value" => value}) when is_binary(value) do
with {value, ""} <- Cldr.Decimal.parse(value),
{:ok, unit} <- Cldr.Unit.new(unit_name, value) do
{:ok, unit}
else
_ -> :error
end
end
# "Old" values
def load(%{"unit" => unit_name, "value" => value}) when is_integer(value) do
with {:ok, unit} <- Cldr.Unit.new(unit_name, value) do
{:ok, unit}
else
_ -> :error
end
end
def dump(%Cldr.Unit{unit: unit_name, value: value, usage: usage}) do
{:ok,
%{"unit" => to_string(unit_name), "value" => to_string(value), "usage" => to_string(usage)}}
end
def dump(_) do
:error
end
end
end
|
lib/cldr/unit/ecto/unit_with_usage_ecto_map_type.ex
| 0.744285 | 0.648543 |
unit_with_usage_ecto_map_type.ex
|
starcoder
|
defmodule Milight.Light.RGBW do
alias Milight.Light.RGBW
@type t :: %RGBW{}
@type command :: :on | :off | {:hue, float} | {:brightness, float}
@type group :: 1..4
defstruct c: :on, group: nil
def on(), do: %RGBW{c: :on}
def off(), do: %RGBW{c: :off}
def hue(v) when v >= 0.0 and v <= 1.0 do
%RGBW{c: {:hue, v}}
end
def brightness(v) when v >= 0.0 and v <= 1.0 do
%RGBW{c: {:brightness, v}}
end
def group(cmd, group) when group in 1..4 do
%RGBW{cmd | group: group}
end
defimpl Milight.Command.Encodable do
import Milight.Command
@spec encode(RGBW.t) :: [Encodable.code]
def encode(%RGBW{c: c, group: g}), do: encode(c, g)
defp encode(:on, nil), do: [packet(0x42, 0x00), delay()]
defp encode(:on, 1), do: [packet(0x45, 0x00), delay()]
defp encode(:on, 2), do: [packet(0x47, 0x00), delay()]
defp encode(:on, 3), do: [packet(0x49, 0x00), delay()]
defp encode(:on, 4), do: [packet(0x4b, 0x00), delay()]
defp encode(:off, nil), do: [packet(0x41, 0x00)]
defp encode(:off, 1), do: [packet(0x46, 0x00)]
defp encode(:off, 2), do: [packet(0x48, 0x00)]
defp encode(:off, 3), do: [packet(0x4a, 0x00)]
defp encode(:off, 4), do: [packet(0x4c, 0x00)]
defp encode({:hue, v}, group) do
encode(:on, group) ++ [packet(0x40, encode_hue(v))]
end
defp encode({:brightness, v}, group) do
encode(:on, group) ++ [packet(0x4e, encode_brightness(v))]
end
@spec encode_hue(float) :: byte
defp encode_hue(v) do
v = (1.0 - v) + (2 / 3)
v = if v > 1.0, do: v - 1.0, else: v
trunc(v * 256)
end
@spec encode_brightness(float) :: byte
defp encode_brightness(v) do
trunc(v * 25) + 2
end
end
defimpl Milight.Command.Mergeable do
@spec merge(RGBW.t, RGBW.t) :: RGBW.t | false
def merge(%RGBW{c: lhs, group: g}, %RGBW{c: rhs, group: g}) do
if c = merge_command(lhs, rhs) do
%RGBW{c: c, group: g}
else
false
end
end
def merge(_, _) do
false
end
defp merge_command(:on = lhs, :off), do: lhs
defp merge_command(:off = lhs, :on), do: lhs
defp merge_command(:on, {c, _} = rhs) when c in [:hue, :brightness], do: rhs
defp merge_command({c, _} = lhs, {c, _}) when c in [:hue, :brightness], do: lhs
defp merge_command({c, _} = lhs, :on) when c in [:hue, :brightness], do: lhs
defp merge_command(_, _), do: false
end
end
|
lib/milight/light/rgbw.ex
| 0.841109 | 0.435181 |
rgbw.ex
|
starcoder
|
defmodule Money.DDL do
@moduledoc """
Functions to return SQL DDL commands that support the
creation and deletion of the `money_with_currency` database
type and associated aggregate functions.
"""
# @doc since: "2.7.0"
@default_db :postgres
@supported_db_types :code.priv_dir(:ex_money_sql)
|> Path.join("SQL")
|> File.ls!()
|> Enum.map(&String.to_atom/1)
@doc """
Returns the SQL string which when executed will
define the `money_with_currency` data type.
## Arguments
* `db_type`: the type of the database for which the SQL
string should be returned. Defaults to `:postgres` which
is currently the only supported database type.
"""
def create_money_with_currency(db_type \\ @default_db) do
read_sql_file(db_type, "create_money_with_currency.sql")
end
@doc """
Returns the SQL string which when executed will
drop the `money_with_currency` data type.
## Arguments
* `db_type`: the type of the database for which the SQL
string should be returned. Defaults to `:postgres` which
is currently the only supported database type.
"""
def drop_money_with_currency(db_type \\ @default_db) do
read_sql_file(db_type, "drop_money_with_currency.sql")
end
@doc """
Returns the SQL string which when executed will
define aggregate functions for the `money_with_currency`
data type.
## Arguments
* `db_type`: the type of the database for which the SQL
string should be returned. Defaults to `:postgres` which
is currently the only supported database type.
"""
def define_aggregate_functions(db_type \\ @default_db) do
read_sql_file(db_type, "define_aggregate_functions.sql")
end
@doc """
Returns the SQL string which when executed will
drop the aggregate functions for the `money_with_currency`
data type.
## Arguments
* `db_type`: the type of the database for which the SQL
string should be returned. Defaults to `:postgres` which
is currently the only supported database type.
"""
def drop_aggregate_functions(db_type \\ @default_db) do
read_sql_file(db_type, "drop_aggregate_functions.sql")
end
@doc """
Returns the SQL string which when executed will
define a `+` operator for the `money_with_currency`
data type.
## Arguments
* `db_type`: the type of the database for which the SQL
string should be returned. Defaults to `:postgres` which
is currently the only supported database type.
"""
def define_plus_operator(db_type \\ @default_db) do
read_sql_file(db_type, "define_plus_operator.sql")
end
@doc """
Returns the SQL string which when executed will
drop the `+` operator for the `money_with_currency`
data type.
## Arguments
* `db_type`: the type of the database for which the SQL
string should be returned. Defaults to `:postgres` which
is currently the only supported database type.
"""
def drop_plus_operator(db_type \\ @default_db) do
read_sql_file(db_type, "drop_plus_operator.sql")
end
@doc """
Returns a string that will Ecto `execute` each SQL
command.
## Arguments
* `sql` is a string of SQL commands that are
separated by three newlines ("\\n"),
that is to say two blank lines between commands
in the file.
## Example
iex> Money.DDL.execute "SELECT name FROM customers;\n\n\nSELECT id FROM orders;"
"execute \"\"\"\nSELECT name FROM customers;\n\n\nSELECT id FROM orders;\n\"\"\""
"""
def execute_each(sql) do
sql
|> String.split("\n\n\n")
|> Enum.map(&execute/1)
|> Enum.join("\n")
end
@doc """
Returns a string that will Ecto `execute` a single SQL
command.
## Arguments
* `sql` is a single SQL command
## Example
iex> Money.DDL.execute "SELECT name FROM customers;"
"execute \"SELECT name FROM customers;\""
"""
def execute(sql) do
sql = String.trim_trailing(sql, "\n")
if String.contains?(sql, "\n") do
"execute \"\"\"\n" <> sql <> "\n\"\"\""
else
"execute " <> inspect(sql)
end
end
defp read_sql_file(db_type, file_name) when db_type in @supported_db_types do
base_dir(db_type)
|> Path.join(file_name)
|> File.read!()
end
defp read_sql_file(db_type, file_name) do
raise ArgumentError,
"Database type #{db_type} does not have a SQL definition " <> "file #{inspect(file_name)}"
end
@app Mix.Project.config[:app]
defp base_dir(db_type) do
:code.priv_dir(@app)
|> Path.join(["SQL", "/#{db_type}"])
end
end
|
lib/money/ddl.ex
| 0.828904 | 0.562447 |
ddl.ex
|
starcoder
|
defmodule Cumulus do
alias Cumulus.{Bucket, Object}
alias HTTPoison.Response
@api_host "https://www.googleapis.com"
@storage_namespace "storage/v1"
@upload_namespace "upload/storage/v1"
@auth_scope "https://www.googleapis.com/auth/cloud-platform"
@doc """
This is the function responsible for returning the URL of a given bucket /
object combination.
"""
def object_url(bucket, object) when is_binary(bucket) and is_binary(object),
do: "#{bucket_url(bucket)}/#{object_namespace(object)}"
@doc """
This is the function responsible for returning the URL of a given bucket /
object combination's media (i.e., the file itself, not the metadata about the
file).
"""
def object_media_url(bucket, object) when is_binary(bucket) and is_binary(object),
do: "#{object_url(bucket, object)}?alt=media"
@doc """
This is the function responsible for returning the URL of a given bucket.
"""
def bucket_url(bucket) when is_binary(bucket),
do: "#{@api_host}/#{@storage_namespace}/#{bucket_namespace(bucket)}"
@doc """
This is the function responsible for returning the URL of a given bucket
for upload purposes. This is separate from `bucket_url/1` because Google
uses a different endpoint for uploading files.
"""
def bucket_upload_url(bucket, object) when is_binary(bucket) and is_binary(object),
do: "#{@api_host}/#{@upload_namespace}/#{bucket_namespace(bucket)}/o?uploadType=resumable&name=#{object}"
@doc """
This is the function responsible for finding a bucket in Google Cloud Storage
and returning it. Possible return values are:
- `{:error, :not_found}` is used for buckets that are not found in the system
- `{:error, :not_authorized}` is used for buckets that you do not have access
to
- `{:error, :invalid_format}` is used for responses where we cannot parse the
response as a bucket
- `{:error, :invalid_request}` is used for requests where the bucket name is
invalid
- `{:ok, bucket}` is for successful responses and where we can successfully
parse the response as a bucket
"""
def get_bucket(bucket) when is_binary(bucket) do
with {:ok, %Response{body: body, status_code: 200}} <- HTTPoison.get(bucket_url(bucket), [auth_header()]),
{:ok, data} <- Poison.decode(body),
{:ok, bucket} <- Bucket.from_json(data) do
{:ok, bucket}
else
{:ok, %Response{status_code: 400}} -> {:error, :invalid_request}
{:ok, %Response{status_code: 401}} -> {:error, :not_authorized}
{:ok, %Response{status_code: 404}} -> {:error, :not_found}
{:error, :invalid_format} -> {:error, :invalid_format}
end
end
@doc """
This is the function responsible for finding an object in Google Cloud Storage
and returning it. Possible return values are:
- `{:error, :not_found}` is used for buckets that are not found in the system
- `{:error, :not_authorized}` is used for buckets that you do not have access
to
- `{:error, :invalid_format}` is used for responses where we cannot parse the
response as an object
- `{:error, :invalid_request}` is used for requests where the bucket or
object name is invalid
- `{:ok, object}` is for successful responses where we can successfully
parse the response as an object
"""
def get_object(bucket, object) when is_binary(bucket) and is_binary(object) do
with {:ok, %Response{body: body, status_code: 200}} <- HTTPoison.get(object_url(bucket, object), [auth_header()]),
{:ok, data} <- Poison.decode(body),
{:ok, object} <- Object.from_json(data) do
{:ok, object}
else
{:ok, %Response{status_code: 400}} -> {:error, :invalid_request}
{:ok, %Response{status_code: 401}} -> {:error, :not_authorized}
{:ok, %Response{status_code: 404}} -> {:error, :not_found}
{:error, :invalid_format} -> {:error, :invalid_format}
end
end
@doc """
This function is used to save a file into a given bucket.
Valid options include:
- `mime` Is the mimetype of the file. This is useful for handling uploads in
Plug, since it does not save a file extension in the file path.
Possible return values are:
- `{:error, :not_found}` is used for buckets that are not found in the system
- `{:error, :not_authorized}` is used for buckets that you do not have access
to
- `{:error, :invalid_request}` is used for requests where the bucket or
object name is invalid
- `{:ok, object}` means the file was saved successfully
"""
def save_object(bucket, object, filepath, opts \\ [], key \\ nil, hash \\ nil) do
headers =
case [key, hash] do
[k, h] when is_binary(k) and is_binary(h) -> crypt_headers(k, h)
_ -> [auth_header()]
end
mime = Keyword.get(opts, :mime, MIME.from_path(filepath))
headers = [{:"X-Upload-Content-Type", mime} | headers]
case HTTPoison.post(bucket_upload_url(bucket, object), "", headers) do
{:ok, %Response{status_code: 200, headers: headers}} ->
location = get_location(headers)
put_file(location, filepath, key, hash)
{:ok, %Response{status_code: 400}} -> {:error, :invalid_request}
{:ok, %Response{status_code: 401}} -> {:error, :not_authorized}
{:ok, %Response{status_code: 404}} -> {:error, :not_found}
end
end
@doc """
This is the function responsible for finding an object in Google Cloud Storage
and deleting it. Possible return values are:
- `{:error, :not_found}` is used for buckets that are not found in the system
- `{:error, :not_authorized}` is used for buckets that you do not have access
to
- `{:error, :invalid_request}` is used for requests where the bucket or
object name is invalid
- `:ok` is used to return the object's contents
"""
def delete_object(bucket, object) do
case HTTPoison.delete(object_url(bucket, object), [auth_header()]) do
{:ok, %Response{status_code: 204}} -> :ok
{:ok, %Response{status_code: 400}} -> {:error, :invalid_request}
{:ok, %Response{status_code: 401}} -> {:error, :not_authorized}
{:ok, %Response{status_code: 404}} -> {:error, :not_found}
end
end
@doc """
This is the function responsible for finding an object in Google Cloud Storage
and returning the file itself. Possible return values are:
- `{:error, :not_found}` is used for buckets that are not found in the system
- `{:error, :not_authorized}` is used for buckets that you do not have access
to
- `{:error, :invalid_request}` is used for requests where the bucket or
object name is invalid
- `{:ok, body}` is used to return the object's contents
"""
def get_object_media(bucket, object, key \\ nil, hash \\ nil) when is_binary(bucket) and is_binary(object) do
headers =
case [key, hash] do
[k, h] when is_binary(k) and is_binary(h) -> crypt_headers(k, h)
_ -> [auth_header()]
end
case HTTPoison.get(object_media_url(bucket, object), headers) do
{:ok, %Response{status_code: 200, body: body}} -> {:ok, body}
{:ok, %Response{status_code: 400}} -> {:error, :invalid_request}
{:ok, %Response{status_code: 401}} -> {:error, :not_authorized}
{:ok, %Response{status_code: 404}} -> {:error, :not_found}
end
end
defp get_location(headers) do
Enum.reduce(headers, nil, &check_location_header/2)
end
defp check_location_header({"Location", value}, _), do: value
defp check_location_header({_, _}, acc), do: acc
defp auth_header do
{:ok, %Goth.Token{token: token, type: type}} = Goth.Token.for_scope(@auth_scope)
{:Authorization, "#{type} #{token}"}
end
defp crypt_headers(key, hash) when is_binary(key) and is_binary(hash) do
[auth_header() | [
"x-goog-encryption-algorithm": "AES256",
"x-goog-encryption-key": key,
"x-goog-encryption-key-sha256": hash
]]
end
defp bucket_namespace(bucket) when is_binary(bucket), do: "b/#{bucket}"
defp object_namespace(object) when is_binary(object),
do: "o/#{encode_path_component(object)}"
defp encode_path_component(component), do: URI.encode_www_form(component)
defp put_file(location, filepath, key, hash) do
headers =
case [key, hash] do
[k, h] when is_binary(k) and is_binary(h) -> crypt_headers(k, h)
_ -> [auth_header()]
end
with {:ok, bytes} <- File.read(filepath),
{:ok, %Response{status_code: 200, body: body}} <- HTTPoison.put(location, bytes, headers),
{:ok, data} <- Poison.decode(body),
{:ok, object} <- Object.from_json(data) do
{:ok, object}
else
{:ok, %Response{status_code: 400}} -> {:error, :invalid_request}
{:ok, %Response{status_code: 401}} -> {:error, :not_authorized}
{:ok, %Response{status_code: 404}} -> {:error, :not_found}
{:error, :invalid_format} -> {:error, :invalid_format}
end
end
end
|
lib/cumulus.ex
| 0.790409 | 0.461805 |
cumulus.ex
|
starcoder
|
defmodule HubGateway.ZWave.Devices.Thermostat do
@moduledoc """
This module represents the capability of zwave thermostat device.
"""
use HubGateway.ZWave.Device,
core_type: "thermostat",
attributes: [
{"unit_type", "temperature unit"},
{"system_mode", "mode"},
{"running_mode", "operating state"},
{"current_temp", "temperature"},
{"occ_heat_sp", "heating 1"},
{"occ_cool_sp", "cooling 1"},
{"fan_mode", "fan mode"},
{"fan_state", "fan state"},
{"battery", "battery level"},
]
@allowed_fan_mode [
"Auto Low",
"On Low",
"Auto High",
"On High",
"Circulate",
]
@allowed_fan_state [
"Idle",
"Running",
"Running High",
]
def get_core_type(_), do: "thermostat"
def get_value_for_type("system_mode", value) when value in ~w(auto off heat cool), do: {:ok, String.capitalize(value)}
def get_value_for_type(setpoint, value) when setpoint in ~w(occ_heat_sp occ_cool_sp)
and is_number(value), do: {:ok, value}
def get_value_for_type("fan_mode" = attr, value) do
norm_value = norm_zwave_str(value)
if norm_value in @allowed_fan_mode do
{:ok, norm_value}
else
{:error, "Invalid value, '#{value}', for attribute '#{attr}'"}
end
end
def get_value_for_attribute("temperature unit", "Celsius"), do: {:ok, "c"}
def get_value_for_attribute("temperature unit", _), do: {:ok, "f"}
def get_value_for_attribute("mode", "Aux Heat"), do: {:ok, "heat"}
def get_value_for_attribute("mode", mode) when mode in ~w(Auto Off Heat Cool), do: {:ok, String.downcase(mode)}
def get_value_for_attribute("operating state", "Idle"), do: {:ok, "idle"}
def get_value_for_attribute("operating state", "Fan Only"), do: {:ok, "fan_only"}
def get_value_for_attribute("operating state", heat_state) when heat_state in ["Heating", "Pending Heat"], do: {:ok, "heat"}
def get_value_for_attribute("operating state", cool_state) when cool_state in ["Cooling", "Pending Cool"], do: {:ok, "cool"}
def get_value_for_attribute(temps, temp)
when temps in ["temperature", "heating 1", "cooling 1"] and
is_number(temp) and temp > 0, do: {:ok, temp}
def get_value_for_attribute("fan mode", value) when value in @allowed_fan_mode, do: {:ok, denorm_zwave_str(value)}
def get_value_for_attribute("fan state", value) when value in @allowed_fan_state, do: {:ok, denorm_zwave_str(value)}
def get_value_for_attribute("battery level", battery) when is_number(battery) and battery >= 0, do: {:ok, battery}
end
|
hgw/lib/hub_gateway/helpers/zwave/devices/thermostat.ex
| 0.776665 | 0.435121 |
thermostat.ex
|
starcoder
|
defmodule AdventOfCode.Y2020.Day12_1 do
def test_data() do
"""
F10
N3
F7
R90
F11
"""
|> String.split("\n", trim: true)
end
# Data structure is {position, direction} where both position and direction are {west, north}
def run() do
# test_data()
AdventOfCode.Helpers.Data.read_from_file("2020/day12.txt")
|> iterate()
|> IO.inspect()
|> distance()
end
def distance({west, north}), do: abs(west) + abs(north)
def iterate(instructions) do
iterate(instructions, {{0, 0}, {1, 0}}, [])
end
def iterate([], {position, _}, history) do
IO.inspect(history, label: History)
position
end
def iterate([h | t], boat, history) do
new_boat = execute(h, boat)
iterate(t, new_boat, [new_boat | history])
end
def execute(<<instr::binary-size(1), distance::binary>>, boat) do
execute(instr, String.to_integer(distance), boat)
end
def execute("N", distance, boat), do: execute({0, 1}, distance, boat)
def execute("S", distance, boat), do: execute({0, -1}, distance, boat)
def execute("W", distance, boat), do: execute({-1, 0}, distance, boat)
def execute("E", distance, boat), do: execute({1, 0}, distance, boat)
def execute("F", distance, {_, direction} = boat),
do: execute(direction, distance, boat)
def execute("R", 0, boat), do: boat
def execute("R", degrees, {pos, direction}) do
new_direction =
case direction do
{1, 0} -> {0, -1}
{0, -1} -> {-1, 0}
{-1, 0} -> {0, 1}
{0, 1} -> {1, 0}
end
execute("R", degrees - 90, {pos, new_direction})
end
def execute("L", 0, boat), do: boat
def execute("L", degrees, {pos, direction}) do
new_direction =
case direction do
{1, 0} -> {0, 1}
{0, 1} -> {-1, 0}
{-1, 0} -> {0, -1}
{0, -1} -> {1, 0}
end
execute("L", degrees - 90, {pos, new_direction})
end
def execute({west, north}, distance, {{pos_w, pos_n}, direction}) do
{{pos_w + west * distance, pos_n + north * distance}, direction}
end
end
|
lib/2020/day12_1.ex
| 0.595728 | 0.591576 |
day12_1.ex
|
starcoder
|
defmodule Akd.Build.Phoenix.Brunch do
@moduledoc """
A native Hook module that comes shipped with Akd.
This module uses `Akd.Hook`.
Provides a set of operations that build a brunch release for a given phoenix app
at a deployment's `build_at` destination. This hook assumes that an executable
brunch binary file is already present or initialized by either
a previously executed hook or manually.
Ensures to cleanup and empty the deps folder created by this build.
Doesn't have any Rollback operations.
# Options:
* `run_ensure`: `boolean`. Specifies whether to a run a command or not.
* `ignore_failure`: `boolean`. Specifies whether to continue if this hook fails.
* `brunch`: `string`. Path to brunch executable from project's root.
* `brunch_config`: `string`. Path to brunch config from project's root.
* `cmd_envs`: `list` of `tuples`. Specifies the environments to provide while
building the distillery release.
# Defaults:
* `run_ensure`: `true`
* `ignore_failure`: `false`
* `brunch`: "node_modules/brunch/bin/brunch"
* `brunch_config`: "."
"""
use Akd.Hook
@default_opts [run_ensure: true, ignore_failure: false,
brunch: "node_modules/brunch/bin/brunch", brunch_config: "."]
@doc """
Callback implementation for `get_hooks/2`.
This function returns a list of operations that can be used to build a brunch
release on the `build_at` destination of a deployment.
## Examples
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.Build.Phoenix.Brunch.get_hooks(deployment, [])
[%Akd.Hook{ensure: [], ignore_failure: false,
main: [%Akd.Operation{cmd: "mix deps.get \\n mix compile",
cmd_envs: [{"MIX_ENV", "prod"}],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "cd . \\n node_modules/brunch/bin/brunch build --production",
cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "mix phx.digest",
cmd_envs: [{"MIX_ENV", "prod"}],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: true}]
"""
@spec get_hooks(Akd.Deployment.t, Keyword.t) :: list(Akd.Hook.t)
def get_hooks(deployment, opts \\ []) do
opts = uniq_merge(opts, @default_opts)
brunch = Keyword.get(opts, :brunch)
brunch_config = Keyword.get(opts, :brunch_config)
[build_hook(deployment, brunch, brunch_config, opts)]
end
# This function takes a deployment and options and returns an Akd.Hook.t
# struct using FormHook DSL
defp build_hook(deployment, brunch, brunch_config, opts) do
destination = Akd.DestinationResolver.resolve(:build, deployment)
mix_env = deployment.mix_env
cmd_envs = Keyword.get(opts, :cmd_envs, [])
cmd_envs = [{"MIX_ENV", mix_env} | cmd_envs]
form_hook opts do
main "mix deps.get \n mix compile", destination,
cmd_envs: cmd_envs
main "cd #{brunch_config} \n #{brunch} build --production", destination
main "mix phx.digest", destination, cmd_envs: cmd_envs
# ensure "rm -rf deps", destination
end
end
# This function takes two keyword lists and merges them keeping the keys
# unique. If there are multiple values for a key, it takes the value from
# the first value of keyword1 corresponding to that key.
defp uniq_merge(keyword1, keyword2) do
keyword2
|> Keyword.merge(keyword1)
|> Keyword.new()
end
end
|
lib/akd/phx/brunch.ex
| 0.873889 | 0.599544 |
brunch.ex
|
starcoder
|
if Code.ensure_loaded?(Decimal) and Code.ensure_loaded?(Money) do
defmodule Amenities.Monies do
@moduledoc """
Money Helpers
"""
@doc """
Converts a `Money` to a `Decimal` type
"""
@spec to_decimal(Money.t()) :: Decimal.t()
def to_decimal(%Money{} = money) do
money
|> Money.to_string(separator: "", symbol: false)
|> Decimal.new()
end
def to_decimal(%Decimal{} = decimal), do: decimal
def to_decimal(float) when is_float(float), do: Decimal.from_float(float)
def to_decimal(binary) when is_binary(binary), do: Decimal.new(binary)
def to_decimal(integer) when is_integer(integer), do: Decimal.new(integer)
def to_decimal(decimal), do: Decimal.new(decimal)
@doc """
Converts a `Money` to a `Integer` type
"""
@spec to_integer(Money.t()) :: integer
def to_integer(money) when is_integer(money), do: money
def to_integer(money) when is_binary(money), do: money |> String.to_float() |> round()
def to_integer(%Decimal{} = money), do: Decimal.to_integer(money)
def to_integer(money) do
money
|> Money.to_string(separator: "", symbol: false)
|> to_integer()
end
def safe_to_integer(nil), do: 0
def safe_to_integer(money), do: to_integer(money)
@spec safe_to_string(Money.t() | integer | Decimal.t() | nil) :: String.t() | nil
def safe_to_string(nil), do: nil
def safe_to_string(amount) when is_binary(amount), do: amount
def safe_to_string(%Money{} = money) do
money
|> Money.to_string(symbol: true)
end
def safe_to_string(money) when is_integer(money) do
money
|> Decimal.new()
|> Decimal.div(Decimal.new(100))
|> Decimal.to_string(:normal)
|> Money.parse!()
|> safe_to_string()
end
def safe_to_string(money) do
money
# |> Decimal.new()
# |> Decimal.div(Decimal.new(100))
|> Decimal.to_string(:normal)
|> Money.parse!()
|> safe_to_string()
end
def cast(nil) do
Money.new(0)
end
def cast(money) when is_integer(money) do
money
|> Decimal.new()
|> Decimal.div(Decimal.new(100))
|> Decimal.to_string(:normal)
|> Money.parse!()
end
def cast(money) when is_binary(money) do
money
|> Decimal.parse()
|> Decimal.div(Decimal.new(100))
|> Decimal.to_string(:normal)
|> Money.parse!()
end
def cast(money) do
money
|> Decimal.div(Decimal.new(100))
|> Decimal.to_string(:normal)
|> Money.parse!()
end
end
end
|
lib/amenities/monies.ex
| 0.817028 | 0.454048 |
monies.ex
|
starcoder
|
defmodule Aoc2021.Day6 do
@moduledoc """
See https://adventofcode.com/2021/day/6
"""
defmodule Parser do
@moduledoc false
@spec read_input(Path.t()) :: [non_neg_integer()]
def read_input(path) do
path
|> File.stream!()
|> Stream.take(1)
|> Stream.map(&parse_line/1)
|> Enum.to_list()
|> hd()
end
defp parse_line(line) do
line
|> String.trim()
|> String.split(",")
|> Enum.map(&String.to_integer/1)
end
end
defmodule Part1 do
@moduledoc """
Exponential growth.
For each initial fish we know the amount of delays until it reproduces, so
can count them off. So this is variable initial delay.
For each born fish we know that there's 2 extra day delay until it
reproduces with cycle of 6.
We can assume that the initial delay for the initial fish was also 8 and
sync the formula to that.
I was thinking about exponential growth formula, but couldn't make it work.
Reached out to forums and the solution is obvious: make a bucket per
maturity day and update the fish counts in the buckets. Simple, way simpler
what I was thiking about.
"""
@spec bucket_solve([non_neg_integer()], non_neg_integer()) :: non_neg_integer()
def bucket_solve(inputs, t) do
initial = Enum.frequencies(inputs)
simulate(initial, t, 0)
end
defp simulate(fish, max_t, max_t) do
fish
|> Map.values()
|> Enum.sum()
end
defp simulate(fish, max_t, t) do
fish
|> Enum.reduce(%{}, fn
{0, count}, acc ->
acc
|> add_fish(6, count)
|> add_fish(8, count)
{t, count}, acc ->
add_fish(acc, t - 1, count)
end)
|> simulate(max_t, t + 1)
end
defp add_fish(fish, t, count) do
Map.update(fish, t, count, fn x -> x + count end)
end
end
@spec solve_part1(Path.t()) :: non_neg_integer()
def solve_part1(path \\ "priv/day6/input.txt") do
path
|> Parser.read_input()
|> Part1.bucket_solve(80)
end
@spec solve_part2(Path.t()) :: non_neg_integer()
def solve_part2(path \\ "priv/day6/input.txt") do
path
|> Parser.read_input()
|> Part1.bucket_solve(256)
end
defmodule LinAlg do
@moduledoc """
Solve day 6 using Nx.
"""
@k0 Nx.tensor([
[0, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0]
])
@k80 Enum.reduce(2..80, @k0, fn _, t -> Nx.dot(t, @k0) end)
@k256 Enum.reduce(2..256, @k0, fn _, t -> Nx.dot(t, @k0) end)
def k0, do: @k0
def solve(path \\ "priv/day6/input.txt") do
initial =
path
|> Parser.read_input()
|> Enum.frequencies()
|> ensure_zeroes_present()
|> Enum.sort()
|> Enum.map(fn {_, v} -> v end)
|> Nx.tensor()
p1 = calculate(initial, @k80)
p2 = calculate(initial, @k256)
{p1, p2}
end
defp ensure_zeroes_present(histogram) do
Enum.reduce(0..8, histogram, fn k, acc -> Map.put_new(acc, k, 0) end)
end
defp calculate(initial, k) do
initial
|> Nx.dot(k)
|> Nx.sum()
|> Nx.to_number()
end
end
end
|
lib/aoc2021/day6.ex
| 0.761184 | 0.599808 |
day6.ex
|
starcoder
|
defmodule Rollout do
@moduledoc """
Rollout allows you to flip features quickly and easily. It relies on
distributed erlang and uses LWW-register and Hybrid-logical clocks
to provide maximum availability. Rollout has no dependency on an external
service such as redis which means rollout feature flags can be used in the
critical path of a request with minimal latency increase.
## Usage
Rollout provides a simple api for enabling and disabling feature flags across
your cluster. A feature flag can be any term.
```elixir
# Check if a feature is active
Rollout.active?(:blog_post_comments)
# => false
# Activate the feature
Rollout.activate(:blog_post_comments)
# De-activate the feature
Rollout.deactivate(:blog_post_comments)
```
You can also activate a feature a certain percentage of the time.
```elixir
Rollout.activate_percentage(:blog_post_comments, 20)
```
You can run this function on one node in your cluster and the updates will
be propogated across the system. This means that updates to feature flags may
not be instantaneous across the cluster but under normal conditions should propogate
quickly. This is a tradeoff I've made in order to maintain the low latency when
checking if a flag is enabled.
## How does Rollout work?
Rollout utilizes [Groot](https://github.com/keathley/groot) for replicating flags
across your cluster. Please look at the groot docs for implementation details.
"""
@doc """
Checks to see if a feature is active or not.
"""
@spec active?(term()) :: boolean()
def active?(flag) do
case Groot.get(flag) do
nil ->
false
0 ->
false
100 ->
true
val ->
:rand.uniform(100) <= val
end
end
@doc """
Fully activates a feature flag.
"""
@spec activate(term()) :: :ok
def activate(flag) do
activate_percentage(flag, 100)
end
@doc """
Disables a feature flag.
"""
@spec deactivate(term()) :: :ok
def deactivate(flag) do
activate_percentage(flag, 0)
end
@doc """
Activates a feature flag for a percentage of requests. An integer between 0 and 100
must be provided. Deciding whether a flag is active is done with the following
calculation: `:rand.uniform(100) <= provided_percentage`
"""
@spec activate_percentage(term(), 0..100) :: :ok
def activate_percentage(flag, percentage) when is_integer(percentage) and 0 <= percentage and percentage <= 100 do
Groot.set(flag, percentage)
end
end
|
lib/rollout.ex
| 0.82887 | 0.874185 |
rollout.ex
|
starcoder
|
defmodule Ueberauth.Strategy.CAS do
@moduledoc """
CAS Strategy for Γberauth. Redirects the user to a CAS login page
and verifies the Service Ticket the CAS server returns after a
successful login.
The login flow looks like this:
1. User is redirected to the CAS server's login page by
`Ueberauth.Strategy.CAS.handle_request!/1`
2. User signs in to the CAS server.
3. CAS server redirects back to the Elixir application, sending
a Service Ticket in the URL parameters.
4. This Service Ticket is validated by this Γberauth CAS strategy,
fetching the user's information at the same time.
5. User can proceed to use the Elixir application.
## Protocol compliance
This strategy only supports a subset of the CAS protocol (version 2.0 and 3.0).
Notable, there is no support for proxy-related stuff.
More specifically, it supports following CAS URIs:
- `/login`
The strategy supports calling `/login` to enable the user to login.
This is known as the [credential requestor][login]
mode in the CAS specification.
The strategy only supports the `service` parameter, and currently does
not provide support for `renew`, `gateway` or `method`.
- `/serviceValidate`
After a successful login, the strategy validates the ticket and retrieves
information about the user, as described in the [specification][validate].
The strategy only supports the required params, `service` and `ticket`.
There is no support for other params.
The validation path can be overridden via configuration to comply with
CAS 3.0 and use `/p3/serviceValidate`.
## Errors
If the login fails, the strategy will fail with error key `missing_ticket`.
If the ticket validation fails, the error key depends:
- If the response is no valid XML, the error key is `malformed_xml`.
- If there is proper error code in the CAS serviceResponse, the error code will
be used as error key, while the description will be used as error message.
- In other cases, the error key will be `unknown_error`.
## User data
In the ticket validation step (step 4), user information is retrieved.
See `Ueberauth.Strategy.CAS.User` for documentation on accessing CAS attributes.
Some attributes are mapped to Γberauth info fields, as described below.
### Default mapping
By default, attributes are the same as the Γberauth field.
For example, the field `:last_name` will be set from an attribute `cas:lastName`.
### Configuring Γberauth mapping
The mapping can be specified in the configuration:
```elixir
config :ueberauth, Ueberauth,
providers: [cas: {Ueberauth.Strategy.CAS, [
base_url: "http://cas.example.com",
validation_path: "/serviceValidate",
callback: "http://your-app.example.com/auth/cas/callback",
attributes: %{
last_name: "surname"
},
]}]
```
[login]: https://apereo.github.io/cas/6.2.x/protocol/CAS-Protocol-Specification.html#21-login-as-credential-requestor
[validate]: https://apereo.github.io/cas/6.2.x/protocol/CAS-Protocol-Specification.html#25-servicevalidate-cas-20
"""
use Ueberauth.Strategy
alias Ueberauth.Auth.Info
alias Ueberauth.Auth.Credentials
alias Ueberauth.Auth.Extra
alias Ueberauth.Strategy.CAS
@doc """
Ueberauth `request` handler. Redirects to the CAS server's login page.
"""
def handle_request!(conn) do
conn
|> redirect!(redirect_url(conn))
end
@doc """
Ueberauth after login callback with a valid CAS Service Ticket.
"""
def handle_callback!(%Plug.Conn{params: %{"ticket" => ticket}} = conn) do
conn
|> handle_ticket(ticket)
end
@doc """
Ueberauth after login callback missing a CAS Service Ticket.
"""
def handle_callback!(conn) do
conn
|> set_errors!([error("missing_ticket", "No service ticket received")])
end
@doc """
Ueberauth cleanup callback. Clears CAS session information from `conn`.
"""
def handle_cleanup!(conn) do
conn
|> put_private(:cas_ticket, nil)
|> put_private(:cas_user, nil)
end
@doc "Ueberauth UID callback."
def uid(conn), do: conn.private.cas_user.name
@doc """
Ueberauth extra information callback. Returns all information the CAS
server returned about the user that authenticated.
"""
def extra(conn) do
%Extra{
raw_info: %{
user: conn.private.cas_user
}
}
end
@doc """
Ueberauth user information.
"""
def info(conn) do
user = conn.private.cas_user
attributes = user.attributes
%Info{
name: user.name,
email: get_attribute(attributes, :email),
birthday: get_attribute(attributes, :birthday),
description: get_attribute(attributes, :description),
first_name: get_attribute(attributes, :first_name),
last_name: get_attribute(attributes, :last_name),
nickname: get_attribute(attributes, :nickname),
phone: get_attribute(attributes, :phone)
}
end
@doc """
Ueberauth credentials callback. Contains CAS Service Ticket and user roles.
"""
def credentials(conn) do
%Credentials{
expires: false,
token: conn.private.cas_ticket,
token_type: "service_ticket",
other: Map.get(conn.private.cas_user.attributes, "roles")
}
end
defp redirect_url(conn) do
CAS.API.login_url() <> "?service=#{callback_url(conn)}"
end
defp handle_ticket(conn, ticket) do
conn
|> put_private(:cas_ticket, ticket)
|> fetch_user(ticket)
end
defp fetch_user(conn, ticket) do
ticket
|> CAS.API.validate_ticket(conn)
|> handle_validate_ticket_response(conn)
end
defp handle_validate_ticket_response({:error, {code, message}}, conn) do
conn
|> set_errors!([error(code, message)])
end
defp handle_validate_ticket_response({:error, reason}, conn) do
conn
|> set_errors!([error("NETWORK_ERROR", "An error occurred: #{reason}")])
end
defp handle_validate_ticket_response({:ok, %CAS.User{} = user}, conn) do
conn
|> put_private(:cas_user, user)
end
defp get_attribute(attributes, key) do
{_, settings} = Application.get_env(:ueberauth, Ueberauth)[:providers][:cas]
name =
Keyword.get(settings, :attributes, %{})
|> Map.get(key, Atom.to_string(key))
value = Map.get(attributes, name)
if is_list(value) do
Enum.at(value, 0)
else
value
end
end
end
|
lib/ueberauth/strategy/cas.ex
| 0.793666 | 0.779154 |
cas.ex
|
starcoder
|
defmodule Ecto.Query.API do
use Ecto.Query.Typespec
@moduledoc """
The Query API available by default in Ecto queries.
All queries in Ecto are typesafe and this module defines all
database functions based on their type. Note that this module defines
only the API, each database adapter still needs to support the
functions outlined here.
"""
## Types
deft float
deft integer
deft decimal
deft boolean
deft binary
deft string
deft array(var)
deft datetime
deft date
deft time
deft interval
defa number :: decimal | float | integer
## Operators
@doc "Boolean not."
def not(arg)
defs not(boolean) :: boolean
@doc "Equality."
def left == right
defs number == number :: boolean
defs var == var :: boolean
@doc "Inequality."
def left != right
defs number != number :: boolean
defs var != var :: boolean
@doc "Left less than or equal to right."
def left <= right
defs number <= number :: boolean
defs var <= var :: boolean
@doc "Left greater than or equal to right."
def left >= right
defs number >= number :: boolean
defs var >= var :: boolean
@doc "Left less than right."
def left < right
defs number < number :: boolean
defs var < var :: boolean
@doc "Left greater than right."
def left > right
defs number > number :: boolean
defs var > var :: boolean
@doc "Boolean and."
def left and right
defs boolean and boolean :: boolean
@doc "Boolean or."
def left or right
defs boolean or boolean :: boolean
@doc "Returns `true` if argument is null."
def is_nil(arg)
defs is_nil(_) :: boolean
@doc """
Return `true` if `left` is in `right` array, `false`
otherwise.
"""
def left in right
defs var in array(var) :: boolean
## Functions
@doc """
References a field. This can be used when a field needs
to be dynamically referenced.
## Examples
x = :title
from(p in Post, select: field(p, ^x))
"""
def field(_var, _atom), do: raise "field/2 should have been expanded"
@doc """
Casts a list to an array.
## Example
ids = [1, 2, 3]
from(c in Comment, where c.id in array(^ids, :integer)
"""
def array(_list, _atom), do: raise "array/2 should have been expanded"
@doc """
Casts a binary literal to a binary type. By default a
binary literal is of the string type.
"""
def binary(_string), do: raise "binary/1 should have been expanded"
@doc """
Casts a binary literal to a `uuid` type. By default a
binary literal is of the string type.
"""
def uuid(_string), do: raise "uuid/1 should have been expanded"
@doc "Case-insensitive pattern match."
def ilike(left, right)
defs ilike(string, string) :: boolean
@doc "Case-sensitive pattern match."
def like(left, right)
defs like(string, string) :: boolean
## Aggregate functions
@doc "Aggregate function, averages the given field over the current group."
@aggregate true
def avg(numbers)
defs avg(number) :: number
@doc """
Aggregate function, counts the number of occurrences of the given field
in the current group.
"""
@aggregate true
def count(arg)
defs count(_) :: integer
@doc """
Aggregate function, the maximum number of the given field in the current
group.
"""
@aggregate true
def max(numbers)
defs max(integer) :: integer
defs max(float) :: float
defs max(date) :: date
defs max(datetime) :: datetime
defs max(time) :: time
@doc """
Aggregate function, the minimum number of the given field in the current
group.
"""
@aggregate true
def min(numbers)
defs min(integer) :: integer
defs min(float) :: float
defs min(date) :: date
defs min(datetime) :: datetime
defs min(time) :: time
@doc "Aggregate function, sums the given field over the current group."
@aggregate true
def sum(numbers)
defs sum(integer) :: integer
defs sum(float) :: float
end
|
lib/ecto/query/api.ex
| 0.928238 | 0.444083 |
api.ex
|
starcoder
|
defmodule Cldr.Unit.Conversion do
@moduledoc """
Unit conversion functions for the units defined
in `Cldr`.
"""
@enforce_keys [:factor, :offset, :base_unit]
defstruct factor: 1,
offset: 0,
base_unit: nil
@type factor :: integer | float | Ratio.t()
@type offset :: integer | float
@type t :: %{
factor: factor(),
base_unit: [atom(), ...],
offset: offset()
}
alias Cldr.Unit
alias Cldr.Unit.BaseUnit
@doc """
Returns the conversion that calculates
the base unit into another unit or
and error.
"""
def conversion_for(unit_1, unit_2) do
with {:ok, base_unit_1, _conversion_1} <- base_unit_and_conversion(unit_1),
{:ok, base_unit_2, conversion_2} <- base_unit_and_conversion(unit_2) do
conversion_for(unit_1, unit_2, base_unit_1, base_unit_2, conversion_2)
end
end
# Base units match so are compatible
defp conversion_for(_unit_1, _unit_2, base_unit, base_unit, conversion_2) do
{:ok, conversion_2, :forward}
end
# Its invertable so see if thats convertible. Note that
# there is no difference in the conversion for an inverted
# conversion. Its only a hint so that in convert_from_base/2
# we know to divide, not multiple the value.
defp conversion_for(unit_1, unit_2, base_unit_1, _base_unit_2, {numerator_2, denominator_2}) do
inverted_conversion = {denominator_2, numerator_2}
with {:ok, base_unit_2} <- BaseUnit.canonical_base_unit(inverted_conversion) do
if base_unit_1 == base_unit_2 do
{:ok, {numerator_2, denominator_2}, :inverted}
else
{:error, Unit.incompatible_units_error(unit_1, unit_2)}
end
end
end
# Not invertable so not compatible
defp conversion_for(unit_1, unit_2, _base_unit_1, _base_unit_2, _conversion) do
{:error, Unit.incompatible_units_error(unit_1, unit_2)}
end
@doc """
Returns the base unit and the base unit
conversionfor a given unit.
## Argument
* `unit` is either a `t:Cldr.Unit`, an `atom` or
a `t:String`
## Returns
* `{:ok, base_unit, conversion}` or
* `{:error, {exception, reason}}`
## Example
iex> Cldr.Unit.Conversion.base_unit_and_conversion :square_kilometer
{
:ok,
:square_meter,
[square_kilometer: %Cldr.Unit.Conversion{base_unit: [:square, :meter], factor: 1000000, offset: 0}]
}
iex> Cldr.Unit.Conversion.base_unit_and_conversion :square_table
{:error, {Cldr.UnknownUnitError, "Unknown unit was detected at \\"table\\""}}
"""
def base_unit_and_conversion(%Unit{base_conversion: conversion}) do
{:ok, base_unit} = BaseUnit.canonical_base_unit(conversion)
{:ok, base_unit, conversion}
end
def base_unit_and_conversion(unit_name) when is_atom(unit_name) or is_binary(unit_name) do
with {:ok, _unit, conversion} <- Cldr.Unit.validate_unit(unit_name),
{:ok, base_unit} <- BaseUnit.canonical_base_unit(conversion) do
{:ok, base_unit, conversion}
end
end
@doc """
Convert one unit into another unit of the same
unit type (length, volume, mass, ...)
## Arguments
* `unit` is any unit returned by `Cldr.Unit.new/2`
* `to_unit` is any unit name returned by `Cldr.Unit.known_units/0`
## Returns
* a `Unit.t` of the unit type `to_unit` or
* `{:error, {exception, message}}`
## Examples
iex> Cldr.Unit.convert Cldr.Unit.new!(:mile, 1), :foot
{:ok, Cldr.Unit.new!(:foot, 5280)}
iex> Cldr.Unit.convert Cldr.Unit.new!(:mile, 1), :gallon
{:error, {Cldr.Unit.IncompatibleUnitsError,
"Operations can only be performed between units with the same base unit. Received :mile and :gallon"}}
"""
@spec convert(Unit.t(), Unit.unit()) :: {:ok, Unit.t()} | {:error, {module(), String.t()}}
def convert(%Unit{value: value, base_conversion: from_conversion} = unit, to_unit) do
with {:ok, to_conversion, maybe_inverted} <- conversion_for(unit, to_unit) do
converted_value = convert(value, from_conversion, to_conversion, maybe_inverted)
Unit.new(to_unit, converted_value, usage: unit.usage, format_options: unit.format_options)
end
end
defp convert(value, from, to, maybe_inverted) when is_number(value) or is_map(value) do
use Ratio
value
|> Ratio.new()
|> convert_to_base(from)
|> maybe_invert_value(maybe_inverted)
|> convert_from_base(to)
end
def maybe_invert_value(value, :inverted) do
use Ratio
1 / value
end
def maybe_invert_value(value, _) do
value
end
# All conversions are ultimately a list of
# 2-tuples of the unit and conversion struct
defp convert_to_base(value, {_, %__MODULE__{} = from}) do
use Ratio
%{factor: from_factor, offset: from_offset} = from
value * from_factor + from_offset
end
# A per module is a 2-tuple of the numerator and
# denominator. Both are lists of conversion tuples.
defp convert_to_base(value, {numerator, denominator}) do
use Ratio
convert_to_base(1.0, numerator) / convert_to_base(1.0, denominator) * value
end
# We recurse over the list of conversions
# and accumulate the value as we go
defp convert_to_base(value, []) do
value
end
defp convert_to_base(value, [first | rest]) do
convert_to_base(value, first) |> convert_to_base(rest)
end
# But if we meet a shape of data we don't
# understand then its a raisable error
defp convert_to_base(_value, conversion) do
raise ArgumentError, "Conversion not recognised: #{inspect(conversion)}"
end
defp convert_from_base(value, {_, %__MODULE__{} = to}) do
use Ratio
%{factor: to_factor, offset: to_offset} = to
(value - to_offset) / to_factor
end
defp convert_from_base(value, {numerator, denominator}) do
use Ratio
convert_from_base(1.0, numerator) / convert_from_base(1.0, denominator) * value
end
defp convert_from_base(value, []) do
value
end
defp convert_from_base(value, [first | rest]) do
convert_from_base(value, first) |> convert_from_base(rest)
end
defp convert_from_base(_value, conversion) do
raise ArgumentError, "Conversion not recognised: #{inspect(conversion)}"
end
@doc """
Convert one unit into another unit of the same
unit type (length, volume, mass, ...) and raises
on a unit type mismatch
## Arguments
* `unit` is any unit returned by `Cldr.Unit.new/2`
* `to_unit` is any unit name returned by `Cldr.Unit.known_units/0`
## Returns
* a `Unit.t` of the unit type `to_unit` or
* raises an exception
## Examples
iex> Cldr.Unit.Conversion.convert!(Cldr.Unit.new!(:celsius, 0), :fahrenheit)
...> |> Cldr.Unit.round
#Cldr.Unit<:fahrenheit, 32.0>
iex> Cldr.Unit.Conversion.convert!(Cldr.Unit.new!(:fahrenheit, 32), :celsius)
...> |> Cldr.Unit.round
#Cldr.Unit<:celsius, 0.0>
Cldr.Unit.Conversion.convert Cldr.Unit.new!(:mile, 1), :gallon
** (Cldr.Unit.IncompatibleUnitsError) Operations can only be performed between units of the same type. Received :mile and :gallon
"""
@spec convert!(Unit.t(), Unit.unit()) :: Unit.t() | no_return()
def convert!(%Unit{} = unit, to_unit) do
case convert(unit, to_unit) do
{:error, {exception, reason}} -> raise exception, reason
{:ok, unit} -> unit
end
end
@doc """
Convert a unit into its base unit.
For example, the base unit for `length`
is `meter`. The base unit is an
intermediary unit used in all
conversions.
## Arguments
* `unit` is any unit returned by `Cldr.Unit.new/2`
## Returns
* `unit` converted to its base unit as a `t:Unit.t()` or
* `{;error, {exception, reason}}` as an error
## Example
iex> unit = Cldr.Unit.new!(:kilometer, 10)
iex> Cldr.Unit.Conversion.convert_to_base_unit unit
{:ok, Cldr.Unit.new!(:meter, 10000)}
"""
def convert_to_base_unit(%Unit{} = unit) do
with {:ok, base_unit} <- Unit.base_unit(unit) do
convert(unit, base_unit)
end
end
def convert_to_base_unit(unit) when is_atom(unit) do
unit
|> Unit.new!(1.0)
|> convert_to_base_unit()
end
def convert_to_base_unit([unit | _rest]) when is_atom(unit) do
convert_to_base_unit(unit)
end
@doc """
Convert a unit into its base unit and
raises on error
For example, the base unit for `length`
is `meter`. The base unit is an
intermediary unit used in all
conversions.
## Arguments
* `unit` is any unit returned by `Cldr.Unit.new/2`
## Returns
* `unit` converted to its base unit as a `t:Unit.t()` or
* raises an exception
## Example
iex> unit = Cldr.Unit.new!(:kilometer, 10)
iex> Cldr.Unit.Conversion.convert_to_base_unit! unit
#Cldr.Unit<:meter, 10000>
"""
def convert_to_base_unit!(%Unit{} = unit) do
case convert_to_base_unit(unit) do
{:error, {exception, reason}} -> raise exception, reason
{:ok, unit} -> unit
end
end
end
|
lib/cldr/unit/conversion.ex
| 0.937705 | 0.568595 |
conversion.ex
|
starcoder
|
defmodule Rodeo.HTTP do
@moduledoc """
Encapsulates starting, configuring, reloading, and stopping of
cowboy web server instances.
"""
@defaultport 8080
def start(port \\ @defaultport, identifier \\ __MODULE__)
def start(:auto, identifier) do
find_available_tcp_port()
|> start(identifier)
end
@doc """
Starts the server on a given TCP port `port` (default: 8080). You can
pass `:auto` to find a random available TCP port. `identifier` is
passed to `:cowboy` as its listener id.
Returns a `{:ok, pid, port}` tuple.
"""
@spec start(integer(), atom()) :: {atom(), pid(), integer()}
def start(port, identifier) do
case :cowboy.start_http(identifier, 100, [port: port], [env: [dispatch: router()]]) do
{:ok, pid} ->
{:ok, pid, port}
{:error, reason} ->
{:error, reason}
end
end
@doc """
See `Rodeo.HTTP.router` for format of `matches`.
Example:
iex>Rodeo.HTTP.start
{:ok, #PID<0.306.0>}
iex>Rodeo.HTTP.reload( {"/my/new/match", OtherHandler, []} )
:ok
"""
def reload(matches, identifier \\ __MODULE__) do
:cowboy.set_env(identifier, :dispatch, router(matches))
end
@doc """
Shortcut for `Rodeo.HTTP.reload({"/", handler, []})`.
"""
def change_handler!(handler, identifier \\ __MODULE__) do
reload({"/[...]", handler, []}, identifier)
end
@doc """
Defines a default handler
"""
def router do
{ "/[...]", Rodeo.Handler.Default, [] }
|> router()
end
@doc """
`matches` must be in format: `{ "/path/match/[...]", Handler, opts }`
"""
def router(matches) when is_tuple(matches), do: router([matches])
@doc """
`matches` must be in format: `[{ "/foo/[...]", Handler, opts }]`
"""
def router(matches) when is_list(matches) do
:cowboy_router.compile([ { :_, matches } ])
end
defp find_available_tcp_port do
{:ok, sock} = :gen_tcp.listen 0, []
{:ok, port} = :inet.port(sock)
:ok = :gen_tcp.close sock
port
end
end
|
lib/rodeo/http.ex
| 0.765856 | 0.447279 |
http.ex
|
starcoder
|
defmodule Freddy.RPC.Server do
@moduledoc """
A behaviour module for implementing AMQP RPC server processes.
The `Freddy.RPC.Server` module provides a way to create processes that hold,
monitor, and restart a channel in case of failure, and have some callbacks
to hook into the process lifecycle and handle messages.
An example `Freddy.RPC.Server` process that responds messages with `"ping"` as
payload with a `"pong"` response, otherwise it does not reply but calls a given
handler function with the payload and a callback function asynchronously, so
the handler can respond when the message is processed:
defmodule MyRPC.Server do
use Freddy.RPC.Server
def start_link(conn, config, handler) do
Freddy.RPC.Server.start_link(__MODULE__, conn, config, handler)
end
def init(handler) do
{:ok, %{handler: handler}}
end
def handle_request("ping", _meta, state) do
{:reply, "pong", state}
end
def handle_request(payload, meta, %{handler: handler} = state) do
callback = &Freddy.RPC.Server.reply(meta, &1)
Task.start_link(fn -> handler.(payload, callback) end)
{:noreply, state}
end
end
## Channel handling
When the `Freddy.RPC.Server` starts with `start_link/5` it runs the `init/1` callback
and responds with `{:ok, pid}` on success, like a `GenServer`.
After starting the process it attempts to open a channel on the given connection.
It monitors the channel, and in case of failure it tries to reopen again and again
on the same connection.
## Context setup
The context setup process for a RPC server is to declare an exchange, then declare
a queue to consume, and then bind the queue to the exchange. It also creates a
default exchange to use it to respond to the reply-to queue.
Every time a channel is open the context is set up, meaning that the queue and
the exchange are declared and binded through the new channel based on the given
configuration.
The configuration must be a `Keyword.t` that contains the same keys as `Freddy.Consumer`.
Check out `Freddy.Consumer` documentation for the list of available configuration keys.
## Acknowledgement mode
By default RPC server starts in automatic acknowledgement mode. It means that all
incoming requests will be acknowledged automatically by RabbitMQ server once delivered
to a client (RPC server process).
If your logic requires manual acknowledgements, you should start server with configuration
option `[consumer: [no_ack: false]]` and acknowledge messages manually using `ack/2` function.
Below is an example of how to start a server in manual acknowledgement mode:
defmodule MyRPC.Server do
alias Freddy.RPC.Server
use Server
def start_link(conn, handler) do
config = [
queue: [name: "rpc-queue"],
consumer: [no_ack: false]
]
Server.start_link(__MODULE__, conn, config, handler)
end
def handle_request(payload, meta, handler) do
result = handler.handle_request(payload)
Server.ack(meta)
{:reply, result, handler}
end
end
"""
@type payload :: String.t()
@type request :: term
@type response :: term
@type routing_key :: String.t()
@type opts :: Keyword.t()
@type meta :: map
@type state :: term
@doc """
Called when the RPC server process is first started. `start_link/5` will block
until it returns.
It receives as argument the fourth argument given to `start_link/5`.
Returning `{:ok, state}` will cause `start_link/5` to return `{:ok, pid}`
and attempt to open a channel on the given connection, declare the exchange,
declare a queue, and start consumption. After that it will enter the main loop
with `state` as its internal state.
Returning `:ignore` will cause `start_link/5` to return `:ignore` and the
process will exit normally without entering the loop, opening a channel or calling
`c:terminate/2`.
Returning `{:stop, reason}` will cause `start_link/5` to return `{:error, reason}` and
the process will exit with reason `reason` without entering the loop, opening a channel,
or calling `c:terminate/2`.
"""
@callback init(initial :: term) ::
{:ok, state}
| :ignore
| {:stop, reason :: term}
@doc """
Called when the RPC server process has opened AMQP channel before registering
itself as a consumer.
First argument is a map, containing `:channel`, `:exchange` and `:queue` structures.
Returning `{:noreply, state}` will cause the process to enter the main loop
with the given state.
Returning `{:error, state}` will cause the process to reconnect (i.e. open
new channel, declare exchange and queue, etc).
Returning `{:stop, reason, state}` will terminate the main loop and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback handle_connected(Freddy.Consumer.connection_info(), state) ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:error, state}
| {:stop, reason :: term, state}
@doc """
Called when the AMQP server has registered the process as a consumer of the
server-named queue and it will start to receive messages.
Returning `{:noreply, state}` will causes the process to enter the main loop
with the given state.
Returning `{:stop, reason, state}` will not send the message, terminate
the main loop and call `c:terminate/2` before the process exits with
reason `reason`.
"""
@callback handle_ready(meta, state) ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
Called when the RPC server has been disconnected from the AMQP broker.
Returning `{:noreply, state}` will cause the process to enter the main loop
with the given state. The server will not consume any new messages until
connection to AMQP broker is restored.
Returning `{:stop, reason, state}` will terminate the main loop and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback handle_disconnected(reason :: term, state) ::
{:noreply, state}
| {:stop, reason :: term, state}
@doc """
Called when a request message is delivered from the queue before passing it into a
`handle_request` function.
The arguments are the message's payload, some metadata and the internal state.
The metadata is a map containing all metadata given by the AMQP client when receiving
the message plus the `:exchange` and `:queue` values.
Returning `{:ok, request, state}` will pass the returned request term into
`c:handle_message/3` function.
Returning `{:ok, request, meta, state}` will pass the returned request term
and the meta into `c:handle_message/3` function.
Returning `{:reply, response, state}` will publish response message without calling
`c:handle_message/3` function. Function `c:encode_response/3` will be called before
publishing the response.
Returning `{:reply, response, opts, state}` will publish the response message
with returned options without calling `c:handle_message/3` function. Function
`c:encode_response/3` will be called before publishing the response.
Returning `{:noreply, state}` will ignore that message and enter the main loop
again with the given state.
Returning `{:stop, reason, state}` will terminate the main loop and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback decode_request(payload, meta, state) ::
{:ok, request, state}
| {:reply, response, state}
| {:reply, response, opts, state}
| {:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
Called when a request message has been successfully decoded by `c:decode_request/3`
function.
The arguments are the message's decoded payload, some metadata and the internal state.
The metadata is a map containing all metadata given by the AMQP client when receiving
the message plus the `:exchange` and `:queue` values.
Returning `{:reply, response, state}` will publish response message. Function
`c:encode_response/3` will be called before publishing the response.
Returning `{:reply, response, opts, state}` will publish the response message
with returned options. Function `c:encode_response/3` will be called before publishing
the response.
Returning `{:noreply, state}` will ignore that message and enter the main loop
again with the given state.
Returning `{:stop, reason, state}` will terminate the main loop and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback handle_request(request, meta, state) ::
{:reply, response, state}
| {:reply, response, opts, state}
| {:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
Called before a response message will be published to the default exchange.
It receives as argument the message payload, the options for that
publication and the internal state.
Returning `{:reply, string, state}` will cause the returned `string` to be
published to the exchange, and the process to enter the main loop with the
given state.
Returning `{:reply, string, opts, state}` will cause the returned `string` to be
published to the exchange with the returned options, and enter the main loop with
the given state.
Returning `{:noreply, state}` will ignore that message and enter the main loop
again with the given state.
Returning `{:stop, reason, state}` will terminate the main loop and call
`c:terminate/2` before the process exits with reason `reason`.
"""
@callback encode_response(response, opts, state) ::
{:reply, payload, state}
| {:reply, payload, opts, state}
| {:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
Called when the process receives a call message sent by `call/3`. This
callback has the same arguments as the `GenServer` equivalent and the
`:reply`, `:noreply` and `:stop` return tuples behave the same.
"""
@callback handle_call(request, GenServer.from(), state) ::
{:reply, reply :: term, state}
| {:reply, reply :: term, state, timeout | :hibernate}
| {:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
| {:stop, reason :: term, reply :: term, state}
@doc """
Called when the process receives a cast message sent by `cast/2`. This
callback has the same arguments as the `GenServer` equivalent and the
`:noreply` and `:stop` return tuples behave the same.
"""
@callback handle_cast(request, state) ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
Called when the process receives a message. This callback has the same
arguments as the `GenServer` equivalent and the `:noreply` and `:stop`
return tuples behave the same.
"""
@callback handle_info(message :: term, state) ::
{:noreply, state}
| {:noreply, state, timeout | :hibernate}
| {:stop, reason :: term, state}
@doc """
This callback is the same as the `GenServer` equivalent and is called when the
process terminates. The first argument is the reason the process is about
to exit with.
"""
@callback terminate(reason :: term, state) :: any
defmacro __using__(_opts) do
quote do
@behaviour unquote(__MODULE__)
@impl true
def init(initial) do
{:ok, initial}
end
@impl true
def handle_connected(_meta, state) do
{:noreply, state}
end
@impl true
def handle_ready(_meta, state) do
{:noreply, state}
end
@impl true
def handle_disconnected(_meta, state) do
{:noreply, state}
end
@impl true
def decode_request(payload, _meta, state) do
case Jason.decode(payload) do
{:ok, decoded} -> {:ok, decoded, state}
{:error, reason} -> {:stop, {:bad_request, payload}, state}
end
end
@impl true
# Part of Freddy custom RPC protocol
# TODO: remove it from the library to a custom integration app
def encode_response({:ok, payload}, opts, state) do
encode_response(
%{success: true, output: payload},
Keyword.put(opts, :type, "response"),
state
)
end
def encode_response({:error, reason}, opts, state) do
encode_response(%{success: false, error: reason}, Keyword.put(opts, :type, "error"), state)
end
def encode_response(response, opts, state) do
case Jason.encode(response) do
{:ok, payload} ->
opts = Keyword.put(opts, :content_type, "application/json")
{:reply, payload, opts, state}
{:error, reason} ->
{:stop, {:bad_response, response}, state}
end
end
@impl true
def handle_call(message, _from, state) do
{:stop, {:bad_call, message}, state}
end
@impl true
def handle_cast(message, state) do
{:stop, {:bad_cast, message}, state}
end
@impl true
def handle_info(_message, state) do
{:noreply, state}
end
@impl true
def terminate(_reason, _state) do
:ok
end
defoverridable unquote(__MODULE__)
end
end
use Freddy.Consumer
alias Freddy.Core.Exchange
@doc """
Start a `Freddy.RPC.Server` process linked to the current process.
Arguments:
* `mod` - the module that defines the server callbacks (like `GenServer`)
* `connection` - the pid of a `Freddy.Connection` process
* `config` - the configuration of the RPC server
* `initial` - the value that will be given to `c:init/1`
* `opts` - the GenServer options
"""
@spec start_link(
module,
Freddy.Consumer.connection(),
Keyword.t(),
initial :: term,
GenServer.options()
) :: GenServer.on_start()
def start_link(mod, connection, config, initial, opts \\ []) do
Freddy.Consumer.start_link(__MODULE__, connection, prepare_config(config), {mod, initial}, opts)
end
@doc """
Start a `Freddy.RPC.Server` process without linking to the current process.
See `start_link/5` for more information.
"""
@spec start(
module,
Freddy.Consumer.connection(),
Keyword.t(),
initial :: term,
GenServer.options()
) :: GenServer.on_start()
def start(mod, connection, config, initial, opts \\ []) do
Freddy.Consumer.start(__MODULE__, connection, prepare_config(config), {mod, initial}, opts)
end
@doc """
Responds a request given its meta
"""
@spec reply(meta, response, Keyword.t()) :: :ok
def reply(%{rpc_server: server} = request_meta, response, opts \\ []) do
if rpc_request?(request_meta) do
cast(server, {:"$reply", request_meta, response, opts})
else
:ok
end
end
defdelegate ack(meta, opts \\ []), to: Freddy.Consumer
defdelegate call(server, message, timeout \\ 5000), to: Freddy.Consumer
defdelegate cast(server, message), to: Freddy.Consumer
defdelegate stop(server, reason \\ :normal), to: GenServer
defp prepare_config(config) do
Keyword.update(config, :consumer, [no_ack: true], &Keyword.put_new(&1, :no_ack, true))
end
import Record
defrecordp :state, mod: nil, given: nil
@impl true
def init({mod, initial}) do
case mod.init(initial) do
{:ok, given} ->
{:ok, state(mod: mod, given: given)}
:ignore ->
:ignore
{:stop, reason} ->
{:stop, reason}
end
end
@impl true
def handle_connected(meta, state(mod: mod, given: given) = state) do
case mod.handle_connected(meta, given) do
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:error, new_given} ->
{:error, state(state, given: new_given)}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
@impl true
def handle_ready(meta, state(mod: mod, given: given) = state) do
case mod.handle_ready(meta, given) do
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
@impl true
def handle_disconnected(reason, state(mod: mod, given: given) = state) do
case mod.handle_disconnected(reason, given) do
{:noreply, new_given} -> {:noreply, state(state, given: new_given)}
{:stop, reason, new_given} -> {:stop, reason, state(state, given: new_given)}
end
end
@impl true
def decode_message(payload, meta, given) do
{:ok, payload, meta, given}
end
@impl true
def handle_message(payload, meta, state(mod: mod, given: given) = state) do
meta = complete(meta)
case mod.decode_request(payload, meta, given) do
{:ok, new_payload, new_given} ->
new_payload
|> mod.handle_request(meta, new_given)
|> handle_mod_response(meta, state)
other ->
handle_mod_response(other, meta, state)
end
end
@impl true
def handle_call(request, from, state(mod: mod, given: given) = state) do
case mod.handle_call(request, from, given) do
{:reply, response, new_given} ->
{:reply, response, state(state, given: new_given)}
{:reply, response, new_given, timeout} ->
{:reply, response, state(state, given: new_given), timeout}
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:stop, reason, response, new_given} ->
{:stop, reason, response, state(state, given: new_given)}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
@impl true
def handle_cast({:"$reply", meta, response, opts}, state) do
send_response(response, opts, meta, state)
end
def handle_cast(message, state(mod: mod, given: given) = state) do
message
|> mod.handle_cast(given)
|> handle_async_response(state)
end
@impl true
def handle_info(message, state(mod: mod, given: given) = state) do
message
|> mod.handle_info(given)
|> handle_async_response(state)
end
@impl true
def terminate(reason, state(mod: mod, given: given)) do
mod.terminate(reason, given)
end
defp handle_mod_response(response, meta, state) do
case response do
{:reply, response, new_given} ->
send_response(response, [], meta, state(state, given: new_given))
{:reply, response, opts, new_given} ->
send_response(response, opts, meta, state(state, given: new_given))
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
defp send_response(response, opts, req_meta, state) do
if rpc_request?(req_meta) do
do_send_response(response, opts, req_meta, state)
else
{:noreply, state}
end
end
defp do_send_response(response, opts, req_meta, state(mod: mod, given: given) = state) do
case mod.encode_response(response, opts, given) do
{:reply, payload, new_given} ->
publish_response(req_meta, payload, opts)
{:noreply, state(state, given: new_given)}
{:reply, payload, opts, new_given} ->
publish_response(req_meta, payload, opts)
{:noreply, state(state, given: new_given)}
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
defp publish_response(
%{correlation_id: correlation_id, reply_to: target, channel: channel} = _meta,
payload,
opts
) do
opts = Keyword.put(opts, :correlation_id, correlation_id)
Exchange.publish(Exchange.default(), channel, payload, target, opts)
end
defp handle_async_response(response, state) do
case response do
{:noreply, new_given} ->
{:noreply, state(state, given: new_given)}
{:noreply, new_given, timeout} ->
{:noreply, state(state, given: new_given), timeout}
{:stop, reason, new_given} ->
{:stop, reason, state(state, given: new_given)}
end
end
defp complete(meta) do
Map.put(meta, :rpc_server, self())
end
defp rpc_request?(meta) when is_map(meta) do
is_binary(meta[:reply_to]) && is_binary(meta[:correlation_id])
end
end
|
lib/freddy/rpc/server.ex
| 0.866331 | 0.550305 |
server.ex
|
starcoder
|
defmodule Transmog do
@moduledoc """
`Transmog` is a module which makes it easy to perform a deep rename of keys in
a map or list of maps using a key mapping. The key mapping is a list of two
tuples which are dot notation strings representing the path to update and the
resulting name of the key after formatting using `format/2`.
The format for a key mapping disregards whether or not a list is present while
traversing the tree. If a list is discovered during the traversal then the
requested changes will be duplicated to all nested resources.
## Examples
iex> key_mapping = [{":a", "b"}, {":a.:b", "b.a"}]
This `key_mapping` above represents a format which changes `:a` to `"b"` in
the first level of the map. In the second level of the map (including lists of
maps) `:b` will be updated to `"a"`.
When running a format using that `key_mapping` we will expect to see the
following result.
## Examples
iex> source = [%{a: %{b: "c"}}, %{a: %{b: "d"}}]
iex> {:ok, formatted_source} = Transmog.format(source, key_mapping)
iex> formatted_source
[%{"b" => %{"a" => "c"}}, %{"b" => %{"a" => "d"}}]
"""
alias Transmog.Matcher
alias Transmog.Parser
alias Transmog.Permutation
@typedoc """
`formattable` is the type of values that can be formatted by `format/2`.
"""
@type formattable :: list | map
@typedoc """
`key` is the type of valid key parsed in a key mapping.
"""
@type key :: atom | binary
@typedoc """
`pair` is the type for a valid pair. A valid pair is a two tuple consisting of
two equal length list of keys. See `key` above.
"""
@type pair :: {[key], [key]}
@typedoc """
`raw_pair` is the type for a valid input pair. An input pair following the
string format using dot notation.
"""
@type raw_pair :: {binary, binary}
@typedoc """
`result` is the type for the output from `format/2`.
"""
@type result :: {:ok, formattable} | Parser.error()
@doc """
`format/2` takes either a list or a map and changes the keys of the maps
contained within using a key mapping as a guide. Before any formatting is
done the mapping is first validated by `Transmog.Parser`. If the mapping is
not valid then `{:error, :invalid_pair}` will be returned.
## Examples
iex> key_mapping = [{":a", "b"}, {":a.b", "b.:a"}]
iex> fields = %{a: %{"b" => "c"}}
iex> {:ok, formatted_fields} = Transmog.format(fields, key_mapping)
iex> formatted_fields
%{"b" => %{a: "c"}}
"""
@spec format(value :: formattable, pairs :: [pair] | [raw_pair]) :: result
def format(value, pairs) when is_list(value) and is_list(pairs) do
with {:ok, pairs} <- Parser.parse(pairs) do
{:ok, Enum.map(value, &format_level(&1, pairs))}
end
end
def format(%{} = value, pairs) when is_list(pairs) do
with {:ok, pairs} <- Parser.parse(pairs), do: {:ok, format_level(value, pairs)}
end
# Formats a single level of a map. In the event that there is another level of
# either a map or a list then a subset pair list will be computed and the
# function will recursively format the children.
@spec format_level(value :: map, pairs :: list(pair) | list(raw_pair)) :: map
defp format_level(%{} = value, pairs) when is_list(pairs) do
value
|> Enum.map(fn
{key, value} when is_list(value) ->
subset = Permutation.subset(pairs, key)
{Matcher.find(pairs, key), Enum.map(value, &format_level(&1, subset))}
{key, value} when is_map(value) ->
subset = Permutation.subset(pairs, key)
{Matcher.find(pairs, key), format_level(value, subset)}
{key, value} ->
{Matcher.find(pairs, key), value}
end)
|> Map.new()
end
end
|
lib/transmog.ex
| 0.934671 | 0.782122 |
transmog.ex
|
starcoder
|
defmodule ExRets.SearchArguments do
@moduledoc """
Arguments for a RETS Search Transaction.
"""
@moduledoc since: "0.1.0"
@enforce_keys [:search_type, :class]
defstruct search_type: nil,
class: nil,
count: :no_record_count,
format: "COMPACT-DECODED",
limit: "NONE",
offset: 1,
select: nil,
restricted_indicator: nil,
standard_names: false,
payload: nil,
query: nil,
query_type: "DMQL2"
@typedoc "Arguments for a RETS Search Transaction."
@typedoc since: "0.1.0"
@type t :: %__MODULE__{
search_type: search_type(),
class: class(),
count: count(),
format: format(),
limit: limit(),
offset: offset(),
select: select(),
restricted_indicator: restricted_indicator(),
standard_names: standard_names(),
payload: payload(),
query: query(),
query_type: query_type()
}
@typedoc "Specifies the resource to search. Required."
@typedoc since: "0.1.0"
@type search_type :: String.t()
@typedoc "Specifies the class of the resource to search. Required."
@typedoc since: "0.1.0"
@type class :: String.t()
@typedoc """
Controls whether the server's response includes a count.
Possible values:
* `:no_record_count` - no record count returned
* `:include_record_count` - record-count is returned in addition to the data
* `:only_record_count` - only a record-count is returned; no data is returned
"""
@typedoc since: "0.1.0"
@type count :: :no_record_count | :include_record_count | :only_record_count
@typedoc """
Selects one of the three supported data return formats for the query response.
Possible values:
* COMPACT
* COMPACT-DECODED
* STANDARD-XML
"""
@typedoc since: "0.1.0"
@type format :: String.t()
@typedoc """
Requests the server to apply or suspend a limit on the number of records returned in the search.
"""
@typedoc since: "0.1.0"
@type limit :: integer() | String.t()
@typedoc """
Retrieve records beginning with the record number indicated, with a value of 1 indicating to
start with the first record.
"""
@typedoc since: "0.1.0"
@type offset :: non_neg_integer()
@typedoc """
A comma-separated list of fields for the server to return.
"""
@typedoc since: "0.1.0"
@type select :: String.t() | nil
@typedoc """
Used in place of withheld field values.
"""
@typedoc since: "0.1.0"
@type restricted_indicator :: String.t() | nil
@typedoc """
Specifies whether to use standard names or system names.
This argument affects to all names used in `search_type`, `class`, `query`, and `select`
arguments.
Possible values:
* `false` - system names
* `true` - standard names
"""
@typedoc since: "0.1.0"
@type standard_names :: boolean()
@typedoc """
Request a specific XML format for the return set.
Only set `payload` OR `format` and optionally `select`.
"""
@typedoc since: "0.1.0"
@type payload :: String.t() | nil
@typedoc """
Query as specified by the language denoted in `query_type`.
"""
@typedoc since: "0.1.0"
@type query :: String.t() | nil
@typedoc """
Designates the query language used in `query`.
"""
@typedoc since: "0.1.0"
@type query_type :: String.t() | nil
@doc """
Encodes search arguments `t:t/0` into a query string.
## Examples
iex> search_arguments = %ExRets.SearchArguments{
...> search_type: "Property",
...> class: "Residential"
...> }
iex> ExRets.SearchArguments.encode_query(search_arguments)
"Class=Residential&Count=0&Format=COMPACT-DECODED&Limit=NONE&Offset=1&QueryType=DMQL2&SearchType=Property&StandardNames=0"
"""
@doc since: "0.1.0"
@spec encode_query(search_arguments :: t()) :: String.t()
def encode_query(%__MODULE__{} = search_arguments) do
search_arguments
|> Map.from_struct()
|> Enum.into(%{}, &format_key_and_value/1)
|> Enum.reject(fn {_, v} -> is_nil(v) end)
|> URI.encode_query()
end
defp format_key_and_value({:count, :no_record_count}), do: {"Count", 0}
defp format_key_and_value({:count, :include_record_count}), do: {"Count", 1}
defp format_key_and_value({:count, :only_record_count}), do: {"Count", 2}
defp format_key_and_value({:count, _}), do: {"Count", 0}
defp format_key_and_value({:standard_names, false}), do: {"StandardNames", 0}
defp format_key_and_value({:standard_names, true}), do: {"StandardNames", 1}
defp format_key_and_value({k, v}), do: {to_camel_case(k), v}
defp to_camel_case(atom) do
atom
|> to_string()
|> String.split("_")
|> Enum.map(&String.capitalize/1)
|> Enum.join()
end
end
|
lib/ex_rets/search_arguments.ex
| 0.926087 | 0.465873 |
search_arguments.ex
|
starcoder
|
defmodule Day10 do
def part1(input) do
asteroids = make_map_set(input)
x_range = 0..byte_size(hd(input))-1
y_range = 0..length(input)-1
limits = {x_range, y_range}
asteroids
|> Enum.map(fn pos ->
{num_visible(asteroids, pos, limits), pos}
end)
|> Enum.max
end
def part2(input, center) do
asteroids = make_map_set(input)
asteroids = MapSet.delete(asteroids, center)
x_range = 0..byte_size(hd(input))-1
y_range = 0..length(input)-1
limits = {x_range, y_range}
blocked = all_blocked(asteroids, center, limits)
vaporize_asteroids(asteroids, center, blocked)
end
defp vaporize_asteroids(asteroids, center, blocked) do
asteroids
|> Stream.reject(fn pos -> pos in blocked end)
|> Enum.sort_by(fn pos ->
angle = asteroid_angle(pos, center)
distance = asteroid_distance(pos, center)
{angle, - distance}
end, &>=/2)
|> Enum.drop(199)
|> hd
|> result
end
defp result({x, y}), do: x * 100 + y
defp asteroid_angle({x, y}, {xc, yc}) do
:math.atan2(x - xc, y - yc)
end
defp asteroid_distance({x, y}, {xc, yc}) do
xdist = x - xc
ydist = y - yc
:math.sqrt(xdist * xdist + ydist * ydist)
end
defp num_visible(asteroids, pos, limits) do
num_blocked = MapSet.size(all_blocked(asteroids, pos, limits))
MapSet.size(asteroids) - num_blocked - 1
end
defp all_blocked(asteroids, pos, limits) do
vectors(limits)
|> Enum.flat_map(fn ray ->
blocked(pos, ray, asteroids, limits)
end)
|> Enum.uniq
|> MapSet.new
end
defp blocked(from, {x_inc, y_inc}, asteroids, limits) do
Stream.iterate(from, fn {x, y} ->
{x + x_inc, y + y_inc}
end)
|> Stream.drop(1)
|> Enum.reduce_while(nil, fn pos, acc ->
case pos in asteroids do
true when acc === nil ->
{:cont, []}
true ->
{:cont, [pos | acc]}
false ->
case within_limits(pos, limits) do
true ->
{:cont, acc}
false when acc === nil ->
{:halt, []}
false ->
{:halt, acc}
end
end
end)
end
defp within_limits({x, y}, {x_range, y_range}) do
x in x_range and y in y_range
end
defp vectors({x_range, y_range}) do
_..max_x = x_range
_..max_y = y_range
Stream.flat_map(-max_x..max_x, fn x ->
Stream.map(-max_y..max_y, fn y ->
{x, y}
end)
end)
|> Stream.reject(fn {x, y} -> x === 0 and y === 0 end)
|> Stream.map(fn {x, y} ->
abs_gcd = abs(gcd(x, y))
{div(x, abs_gcd), div(y, abs_gcd)}
end)
|> Enum.uniq
end
defp gcd(a, 0), do: a
defp gcd(a, b) do
case rem(a, b) do
0 -> b
x -> gcd(b, x)
end
end
defp make_map_set(input) do
input
|> Stream.with_index
|> Enum.reduce(MapSet.new(), fn {line, y}, set ->
String.to_charlist(line)
|> Stream.with_index
|> Enum.reduce(set, fn {char, x}, set ->
case char do
?\# -> MapSet.put(set, {x, y})
?\. -> set
end
end)
end)
end
end
|
day10/lib/day10.ex
| 0.526586 | 0.711067 |
day10.ex
|
starcoder
|
defmodule Trades.Leader do
use GenServer, restart: :temporary
require Logger
alias Decimal, as: D
D.Context.set(%D.Context{D.Context.get() | precision: 9})
@short 60 * 10
@long 60 * 180
@trend 3600 * 24
defmodule Mas do
defstruct short_ma: Deque.new(2),
long_ma: Deque.new(2),
trend_ma: Deque.new(2),
short_events: Deque.new(1000),
long_events: Deque.new(50_000),
trend_events: Deque.new(100_000),
short_acc: 0,
long_acc: 0,
trend_acc: 0,
trend_deltas: Deque.new(100),
trades_bucket: %{ts: 0, price: 0, count: 0},
short_bucket: %{ts: 0, price: 0, count: 0},
long_bucket: %{ts: 0, price: 0, count: 0},
trend_bucket: %{ts: 0, price: 0, count: 0}
end
defmodule State do
@enforce_keys [
# :id,
:symbol
# :budget,
# :buy_down_interval,
# :profit_interval,
# :rebuy_interval,
# :rebuy_notified,
# :tick_size,
# :step_size
]
defstruct [
# :id,
:symbol,
:mas,
:trend,
:signal
# :budget,
# :buy_order,
# :sell_order,
# :buy_down_interval,
# :profit_interval,
# :rebuy_interval,
# :rebuy_notified,
# :tick_size,
# :step_size
]
end
def start_link(symbol) do
Logger.notice("Starting link: #{__MODULE__}-#{symbol}")
GenServer.start_link(
__MODULE__,
%State{
symbol: symbol,
mas: %Mas{}
},
name: :"#{__MODULE__}-#{symbol}"
)
end
def init(%State{symbol: symbol, mas: _mas} = state) do
symbol = String.downcase(symbol)
Streamer.start_streaming(symbol)
Phoenix.PubSub.subscribe(
Streamer.PubSub,
"trade_events:#{symbol}"
)
{:ok, state}
end
def handle_info(%Streamer.Binance.TradeEvent{trade_time: t_time, price: price}, state) do
symbol = String.downcase(state.symbol)
mas = state.mas
td_now = DateTime.now!("Etc/UTC")
ts_now = DateTime.to_unix(td_now, :second)
{new_short, short_events, short_bucket} =
event_append(mas.short_events, mas.short_bucket, t_time, price)
{new_long, long_events, long_bucket} =
event_append(mas.long_events, mas.long_bucket, t_time, price)
{new_trend, trend_events, trend_bucket} =
event_append(mas.trend_events, mas.trend_bucket, t_time, price)
mas = %Mas{
mas
| short_bucket: short_bucket,
long_bucket: long_bucket,
trend_bucket: trend_bucket
}
ma_data =
if new_short or new_long or new_trend do
{rm_short_acc, short_events} =
sum_drop_while(short_events, 0, fn e ->
[time, _price] = e
time < ts_now - @short
end)
{short_acc, sma} = update_ma(short_events, mas.short_acc, rm_short_acc)
{rm_long_acc, long_events} =
sum_drop_while(long_events, 0, fn e ->
[time, _price] = e
time < ts_now - @long
end)
{long_acc, lma} = update_ma(long_events, mas.long_acc, rm_long_acc)
{rm_trend_acc, trend_events} =
sum_drop_while(trend_events, 0, fn e ->
[time, _price] = e
time < ts_now - @trend
end)
{trend_acc, tma} = update_ma(trend_events, mas.trend_acc, rm_trend_acc)
Phoenix.PubSub.broadcast(
Streamer.PubSub,
"ma_events:#{symbol}",
%{
short_ma: sma,
long_ma: lma,
trend_ma: tma,
ts: ts_now
}
)
%{
short_ma: Deque.append(mas.short_ma, [ts_now, sma]),
long_ma: Deque.append(mas.long_ma, [ts_now, lma]),
trend_ma: Deque.append(mas.trend_ma, [ts_now, tma]),
short_events: short_events,
long_events: long_events,
trend_events: trend_events,
short_acc: short_acc,
long_acc: long_acc,
trend_acc: trend_acc
}
else
nil
end
mas =
if !is_nil(ma_data) do
%Mas{
mas
| short_ma: ma_data.short_ma,
long_ma: ma_data.long_ma,
trend_ma: ma_data.trend_ma,
short_events: ma_data.short_events,
long_events: ma_data.long_events,
trend_events: ma_data.trend_events,
short_acc: ma_data.short_acc,
long_acc: ma_data.long_acc,
trend_acc: ma_data.trend_acc
}
else
mas
end
%{trend: trend, trade: signal, trend_deltas: new_deltas} = conclusion(mas)
case state do
%State{trend: old_trend} when old_trend != trend ->
Logger.info(
"#{state.symbol} #{inspect(trend)} - #{inspect(signal)} - #{state.mas.short_events.size} . #{
state.mas.long_events.size
} . #{state.mas.trend_events.size}"
)
%State{signal: old_signal} when old_signal != signal ->
Logger.info(
"#{state.symbol} #{inspect(trend)} - #{inspect(signal)} - #{state.mas.short_events.size} . #{
state.mas.long_events.size
} . #{state.mas.trend_events.size}"
)
_ ->
None
end
{:noreply,
%State{state | trend: trend, signal: signal, mas: %{mas | trend_deltas: new_deltas}}}
end
defp update_ma(events, events_acc, rm_events_acc) do
{[_time, price], _q} = Deque.pop(events)
new_acc = D.sub(D.add(events_acc, price), rm_events_acc)
new_ma = D.div(new_acc, events.size)
{new_acc, new_ma}
end
defp calc_delta([current_ma, prev_ma]) do
[current_time, current_price] = current_ma
[prev_time, prev_price] = prev_ma
y = D.sub(current_time, prev_time)
x = D.sub(current_price, prev_price)
delta =
if D.eq?(y, 0) do
D.new(0)
else
D.div(x, y)
end
[current_time, delta]
end
defp conclusion(%Mas{
trend_ma: trend_ma,
trend_deltas: trend_deltas,
short_ma: short_ma,
long_ma: long_ma
}) do
a_mas = Enum.reverse(Enum.to_list(trend_ma))
ma_delta =
case a_mas do
[_, _] -> calc_delta(a_mas)
_default -> [0, D.new(0)]
end
trend_deltas = Deque.append(trend_deltas, ma_delta)
avg =
if trend_deltas.size > 1 do
D.div(
Enum.reduce(trend_deltas, 0, fn [_time, delta], acc -> D.add(acc, delta) end),
trend_deltas.size
)
else
D.new(0)
end
trend =
cond do
D.gt?(avg, "0") -> :bull
D.lt?(avg, "0") -> :bear
true -> :neutral
end
{[_time, short_ma], _q} =
if short_ma.size > 0 do
Deque.pop(short_ma)
else
{[0, 0], None}
end
{[_time, long_ma], _q} =
if long_ma.size > 0 do
Deque.pop(long_ma)
else
{[0, 0], None}
end
signal =
cond do
D.gt?(short_ma, long_ma) -> :sell
D.lt?(short_ma, long_ma) -> :buy
true -> :neutral
end
%{trend: trend, trade: signal, trend_deltas: trend_deltas}
end
defp event_append(coll, bucket, ts, price) do
# convert from milli to seconds
ts = div(ts, 1000)
data =
cond do
ts != bucket.ts and bucket.count > 0 ->
{true, Deque.append(coll, [bucket.ts, D.div(bucket.price, bucket.count)]),
%{ts: ts, price: price, count: 1}}
ts == bucket.ts ->
{false, coll, %{bucket | price: D.add(price, bucket.price), count: bucket.count + 1}}
bucket.count == 0 ->
{false, coll, %{bucket | price: price, count: 1, ts: ts}}
end
data
end
def sum_drop_while(deque, acc, fun) do
{x, new_deque} = Deque.popleft(deque)
{acc, popped_que} =
if !is_nil(x) do
if fun.(x) do
[_time, price] = x
acc = D.add(acc, price)
sum_drop_while(new_deque, acc, fun)
else
{acc, deque}
end
else
{acc, deque}
end
{acc, popped_que}
end
end
|
apps/trades/lib/trades/leader.ex
| 0.55447 | 0.408985 |
leader.ex
|
starcoder
|
defmodule Defparser do
@moduledoc """
Provides a way to define a parser for an arbitrary map with atom or string keys.
Works with `Ecto.Schema`s and `Ecto.Type`s.
## Example
iex(1)> defmodule Test do
...(1)> import Defparser
...(1)> defparser :user, %{
...(1)> username: %{first_name: :string, last_name: :string},
...(1)> birthdate: :date,
...(1)> favourite_numbers: [%{value: :integer}]
...(1)> }
...(1)> end
iex(2)> Test.parse_user(%{
...(2)> username: %{first_name: "User", last_name: "Name"},
...(2)> birthdate: "1990-01-01",
...(2)> favourite_numbers: [%{value: 1}, %{value: 2}]
...(2)> })
{:ok,
%{
__struct__: DefparserTest.Test.User,
username: %{
__struct__: DefparserTest.Test.User.Username,
first_name: "User",
last_name: "Name"
},
birthdate: ~D[1990-01-01],
favourite_numbers: [
%{
__struct__: DefparserTest.Test.User.FavouriteNumbers,
value: 1
},
%{
__struct__: DefparserTest.Test.User.FavouriteNumbers,
value: 2
}
]
}}
## Ecto.Type
You can provide an `Ecto.Type` as a field
iex(1)> defmodule AtomEctoType do
...(1)> @behaviour Ecto.Type
...(1)> def type, do: :string
...(1)> def cast(str), do: {:ok, String.to_atom(str)}
...(1)> def load(_), do: :error
...(1)> def dump(_), do: :error
...(1)> def embed_as(_), do: :error
...(1)> def equal?(a, b), do: a == b
...(1)> end
iex(2)> defmodule Parser do
...(2)> import Defparser
...(2)> defparser :data, %{atom: AtomEctoType}
...(2)> end
iex(3)> Parser.parse_data(%{atom: "atom"})
{:ok,
%{
__struct__: DefparserTest.Parser.Data,
atom: :atom
}}
## Ecto.Schema
You may want to pass already existing ecto schema
iex(1)> defmodule Schema do
...(1)> use Ecto.Schema
...(1)> @primary_key false
...(1)> embedded_schema do
...(1)> field :x, :integer
...(1)> end
...(1)> def changeset(schema, attrs) do
...(1)> Ecto.Changeset.cast(schema, attrs, [:x])
...(1)> end
...(1)> end
iex(2)> defmodule SchemaParser do
...(2)> import Defparser
...(2)> defparser :data, %{schema: {:embeds_one, Schema}}
...(2)> end
iex(3)> SchemaParser.parse_data(%{schema: %{x: "1"}})
{:ok,
%{
__struct__: DefparserTest.SchemaParser.Data,
schema: %{
__struct__: DefparserTest.Schema,
x: 1
}
}}
Besides `:embeds_one` it supports `:embeds_many` in case you expect
an array with the provided schema.
"""
@doc """
Defines a parser for arbitrary map.
"""
defmacro defparser(name, schema) do
{schema_definition, _} = Code.eval_quoted(schema, [], __CALLER__)
[{root, _} | _] =
schemas =
__CALLER__.module
|> base_namespace(name)
|> fetch_schemas(schema_definition)
schemas
|> Enum.map(&define_module/1)
|> Enum.each(&Code.compile_quoted(&1))
quote do
def unquote(:"parse_#{name}")(attrs) do
struct!(unquote(root))
|> unquote(root).changeset(attrs)
|> Ecto.Changeset.apply_action(:insert)
end
end
end
defp base_namespace(module, name) do
Module.concat([module, "#{Macro.camelize(to_string(name))}"])
end
defp fetch_schemas(namespace, map) do
{array_keys, rest} = fetch_arrays(map)
{schema_keys, value_keys} = fetch_maps(rest)
arrays =
fetch_all_schemas(
namespace,
Enum.map(array_keys, fn {k, [s]} -> {k, s} end)
)
schemas = fetch_all_schemas(namespace, schema_keys)
values = {namespace, schema_with_embeds(value_keys, arrays, schemas)}
[values] ++
Enum.flat_map(arrays, fn {_, _, s} -> s end) ++
Enum.flat_map(schemas, fn {_, _, s} -> s end)
end
defp fetch_arrays(map) do
Enum.split_with(map, fn
{_, [_]} -> true
{_, _} -> false
end)
end
defp fetch_maps(map) do
Enum.split_with(map, fn {_, x} -> is_map(x) end)
end
defp fetch_all_schemas(namespace, schemas) do
Enum.map(schemas, fn {k, s} ->
{
k,
namespace,
fetch_schemas(modulename_for_embed(namespace, k), s)
}
end)
end
defp schema_with_embeds(values, arrays, schemas) do
values ++
build_embeds_map(arrays, :embeds_many) ++
build_embeds_map(schemas, :embeds_one)
end
defp build_embeds_map(list, as) do
for {key, namespace, _schema} <- list do
{key, {as, modulename_for_embed(namespace, key)}}
end
end
defp modulename_for_embed(namespace, key) do
Module.concat(namespace, Macro.camelize("#{key}"))
end
defp define_module({module, schema}) do
quote do
defmodule unquote(module) do
use Ecto.Schema
@primary_key false
embedded_schema do
unquote(schema_body(schema))
end
def changeset(%__MODULE__{} = schema, attrs) do
unquote(__MODULE__).__schema_changeset__(
schema,
attrs,
unquote(schema_fields(schema)),
unquote(schema_assocs(schema))
)
end
end
end
end
defp schema_body(schema) do
for {key, type} <- schema do
case type do
{:embeds_one, ref} ->
quote do
embeds_one unquote(key), unquote(ref)
end
{:embeds_many, ref} ->
quote do
embeds_many unquote(key), unquote(ref)
end
type when is_atom(type) ->
quote do
field unquote(key), unquote(type)
end
end
end
end
defp schema_fields(schema) do
for {key, type} <- schema, is_atom(type), do: key
end
defp schema_assocs(schema) do
for {key, type} <- schema, assoc?(type), do: key
end
defp assoc?({_, _}), do: true
defp assoc?(_), do: false
@doc false
def __schema_changeset__(schema, attrs, fields, assocs) do
changeset = Ecto.Changeset.cast(schema, attrs, fields)
Enum.reduce(assocs, changeset, fn assoc, changeset ->
Ecto.Changeset.cast_embed(changeset, assoc)
end)
end
end
|
lib/defparser.ex
| 0.785884 | 0.501038 |
defparser.ex
|
starcoder
|
defmodule Rummage.Ecto.CustomHooks.SimpleSearch do
@moduledoc """
`Rummage.Ecto.CustomHooks.SimpleSearch` is a custom search hook that comes shipped
with `Rummage.Ecto`.
Usage:
For a regular search:
This returns a `queryable` which upon running will give a list of `Parent`(s)
searched by ascending `field_1`
```elixir
alias Rummage.Ecto.CustomHooks.SimpleSearch
searched_queryable = SimpleSearch.run(Parent, %{"search" => %{"field_1" => "field_!"}})
```
For a case-insensitive search:
This returns a `queryable` which upon running will give a list of `Parent`(s)
searched by ascending case insensitive `field_1`.
Keep in mind that `case_insensitive` can only be called for `text` fields
```elixir
alias Rummage.Ecto.CustomHooks.SimpleSearch
searched_queryable = SimpleSearch.run(Parent, %{"search" => %{"field_1.ci" => "field_!"}})
```
This module can be used by overriding the default search module. This can be done
in the following ways:
In the `Rummage.Ecto` call:
```elixir
Rummage.Ecto.rummage(queryable, rummage, search: Rummage.Ecto.CustomHooks.SimpleSearch)
```
OR
Globally for all models in `config.exs`:
```elixir
config :rummage_ecto,
Rummage.Ecto,
default_search: Rummage.Ecto.CustomHooks.SimpleSearch
```
"""
import Ecto.Query
@behaviour Rummage.Ecto.Hook
@doc """
Builds a search queryable on top of the given `queryable` from the rummage parameters
from the given `rummage` struct.
## Examples
When rummage struct passed doesn't have the key "search", it simply returns the
queryable itself:
iex> alias Rummage.Ecto.CustomHooks.SimpleSearch
iex> import Ecto.Query
iex> SimpleSearch.run(Parent, %{})
Parent
When the queryable passed is not just a struct:
iex> alias Rummage.Ecto.CustomHooks.SimpleSearch
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> SimpleSearch.run(queryable, %{})
#Ecto.Query<from p in "parents">
When rummage `struct` passed has the key `"search"`, but with a value of `%{}`, `""`
or `[]` it simply returns the `queryable` itself:
iex> alias Rummage.Ecto.CustomHooks.SimpleSearch
iex> import Ecto.Query
iex> SimpleSearch.run(Parent, %{"search" => %{}})
Parent
iex> alias Rummage.Ecto.CustomHooks.SimpleSearch
iex> import Ecto.Query
iex> SimpleSearch.run(Parent, %{"search" => ""})
Parent
iex> alias Rummage.Ecto.CustomHooks.SimpleSearch
iex> import Ecto.Query
iex> SimpleSearch.run(Parent, %{"search" => []})
Parent
When rummage struct passed has the key "search", with "field" and "term"
it returns a searched version of the queryable passed in as the argument:
iex> alias Rummage.Ecto.CustomHooks.SimpleSearch
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1" => "field_!"}}
%{"search" => %{"field_1" => "field_!"}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> SimpleSearch.run(queryable, rummage)
#Ecto.Query<from p in "parents", where: like(p.field_1, ^"%field_!%")>
When rummage struct passed has case-insensitive search, it returns
a searched version of the queryable with case_insensitive arguments:
iex> alias Rummage.Ecto.CustomHooks.SimpleSearch
iex> import Ecto.Query
iex> rummage = %{"search" => %{"field_1.ci" => "field_!"}}
%{"search" => %{"field_1.ci" => "field_!"}}
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> SimpleSearch.run(queryable, rummage)
#Ecto.Query<from p in "parents", where: ilike(p.field_1, ^"%field_!%")>
"""
@spec run(Ecto.Query.t, map) :: {Ecto.Query.t, map}
def run(queryable, rummage) do
search_params = Map.get(rummage, "search")
case search_params do
a when a in [nil, [], {}, ""] -> queryable
_ -> handle_search(queryable, search_params)
end
end
@doc """
Implementation of `before_hook` for `Rummage.Ecto.CustomHooks.SimpleSearch`. This just returns back `rummage` at this point.
It doesn't matter what `queryable` or `opts` are, it just returns back `rummage`.
## Examples
iex> alias Rummage.Ecto.CustomHooks.SimpleSearch
iex> SimpleSearch.before_hook(Parent, %{}, %{})
%{}
"""
@spec before_hook(Ecto.Query.t, map, map) :: map
def before_hook(_queryable, rummage, _opts), do: rummage
defp handle_search(queryable, search_params) do
search_params
|> Map.to_list
|> Enum.reduce(queryable, &search_queryable(&1, &2))
end
defp search_queryable(param, queryable) do
field = param
|> elem(0)
case Regex.match?(~r/\w.ci+$/, field) do
true ->
field = field
|> String.split(".")
|> Enum.drop(-1)
|> Enum.join(".")
|> String.to_atom
term = elem(param, 1)
queryable
|> where([b],
ilike(field(b, ^field), ^"%#{String.replace(term, "%", "\\%")}%"))
_ ->
field = String.to_atom(field)
term = elem(param, 1)
queryable
|> where([b],
like(field(b, ^field), ^"%#{String.replace(term, "%", "\\%")}%"))
end
end
end
|
lib/rummage_ecto/custom_hooks/simple_search.ex
| 0.791539 | 0.871146 |
simple_search.ex
|
starcoder
|
defmodule CpuInfo do
@moduledoc """
**CpuInfo:** get CPU information, including a type, number of processors, number of physical cores and logical threads of a processor, and status of simultaneous multi-threads (hyper-threading).
"""
@latest_versions %{gcc: 9, "g++": 9, clang: 9, "clang++": 9}
defp os_type do
case :os.type() do
{:unix, :linux} -> :linux
{:unix, :darwin} -> :macos
{:unix, :freebsd} -> :freebsd
{:win32, _} -> :windows
_ -> :unknown
end
end
@doc """
Show all profile information on CPU and the system. The results are a map that contains the following keys:
* **compiler:** its corresponding value is a map that contains the following keys:
* **apple_clang:** (only on macOS) its corresponding value is a list of maps whose values contain the information of the Apple Clang compiler that is on `/usr/bin`;
* **apple_clang++:** (only on macOS) its corresponding value is a list of maps whose values contain the information of the Apple Clang++ compiler that is on `/usr/bin`;
* **cc_env:** its corresponding value is a list of a map whose values contain the information of the C compiler that the environment variable `CC` points;
* **cflags_env:** this is the value of the environment variable `CFLAGS`;
* **clang:** its corresponding value is a list of maps whose values contain the information of the Clang compilers that are executable along `PATH`;
* **clang++:** its corresponding value is a list of maps whose values contain the information of the Clang++ compilers that are executable along `PATH`;
* **cxx_env** its corresponding value is a list of a map whose values contain the information of the C++ compiler that the environment variable `CXX` points;
* **cxxflags_env:** this is the value of the environment variable `CXXFLAGS`;
* **gcc:** its corresponding value is a list of maps whose values contain the information of the GCC compilers that are executable along `PATH`;
* **g++:** its corresponding value is a list of maps whose values contain the information of the G++ compilers that are executable along `PATH`;
* **ldflags_env:** this is the value of the environment variable `LDFLAGS`
* each information of a compiler contains the following keys:
* **bin:** path to the executable;
* **type:** :clang, :gcc, :apple_clang, :unknown, or :undefined;
* **release:** the release version of the release
* **version:** the full name of the version
* **cpu:** its corresponding value is a map that contains the following keys:
* **cpu_model:** a string of the cpu model;s
* **cpu_models:** a list of strings that corresponding to each thread (in case of Linux);
* **cpu_type:** according to :erlang.system_info(:system_architecture);
* **hyper_threading**: :enable or :disable;
* **num_of_cores_of_a_processor:** the number of cores of a processor;
* **num_of_processors:** the number of processors;
* **num_of_threads_of_a_processor:** the number of threads of a processor;
* **total_num_of_cores:** total number of cores;
* **total_num_of_threads:** total number of threads;
* **cuda:** its corresponding value is a map that contains the following keys:
* **bin:** path to executables of CUDA;
* **cuda:** existence of CUDA (true or false)
* **include:** path to include files of CUDA;
* **lib:** path to libraries of CUDA;
* **nvcc:** path to the executable of nvcc;
* **version:** CUDA version number
* **elixir:** its corresponding value is a map that contains the following keys:
* **version:** Elixir version
* **erlang:** its corresponding value is a map that contains the following keys:
* **otp_version:** OTP version
* **kernel:** its corresponding value is a map that contains the following keys:
* **kernel_release:** the release version of kernel;
* **kernel_version:** the full name of the version of kernel;
* **os_type:** type of OS (:macos, :linux, :windows, :freebsd or :unknown);
* **system_version:** the os or distribution name
* **metal:** its corresponding value is a map that contains the following keys:
* **metal:** existence of Metal (true or false)
"""
def all_profile do
os_type = os_type()
cpu_type = %{cpu: cpu_type_sub(os_type)}
kernel_type = %{kernel: kernel_type_sub(os_type)}
cuda_info = %{cuda: cuda(os_type)}
metal_info = %{metal: metal(os_type)}
elixir_version = %{
elixir: %{
version: System.version()
},
erlang: %{
otp_version: :erlang.system_info(:otp_release) |> List.to_string() |> String.to_integer()
}
}
compilers =
%{gcc: cc(:gcc)}
|> Map.merge(%{"g++": cc(:"g++")})
|> Map.merge(%{clang: cc(:clang)})
|> Map.merge(%{"clang++": cc(:"clang++")})
|> Map.merge(%{cc_env: cc_env("CC")})
|> Map.merge(%{cxx_env: cc_env("CXX")})
|> Map.merge(%{cflags_env: flags_env("CFLAGS")})
|> Map.merge(%{cxxflags_env: flags_env("CXXFLAGS")})
|> Map.merge(%{ldflags_env: flags_env("LDFLAGS")})
compilers =
if os_type == :macos do
compilers
|> Map.merge(%{apple_clang: cc(:apple_clang)})
|> Map.merge(%{"apple_clang++": cc(:"apple_clang++")})
else
compilers
end
compilers = %{compiler: compilers}
Map.merge(cpu_type, cuda_info)
|> Map.merge(metal_info)
|> Map.merge(kernel_type)
|> Map.merge(elixir_version)
|> Map.merge(compilers)
end
defp confirm_executable(command) do
if is_nil(System.find_executable(command)) do
raise RuntimeError, message: "#{command} isn't found."
end
end
defp kernel_type_sub(:unknown) do
%{
kernel_release: :unknown,
kernel_version: :unknown,
system_version: :unknown,
os_type: :unknown
}
end
defp kernel_type_sub(:windows) do
%{
kernel_release: :unknown,
kernel_version: :unknown,
system_version: :unknown,
os_type: :windows
}
end
defp kernel_type_sub(:linux) do
os_info =
File.read!("/etc/os-release")
|> String.split("\n")
|> Enum.reverse()
|> tl
|> Enum.reverse()
|> Enum.map(&String.split(&1, "="))
|> Enum.map(fn [k, v] -> {k, v |> String.trim("\"")} end)
|> Map.new()
kernel_release =
case File.read("/proc/sys/kernel/osrelease") do
{:ok, result} -> String.trim(result)
_ -> nil
end
system_version = Map.get(os_info, "PRETTY_NAME")
kernel_version =
case File.read("/proc/sys/kernel/version") do
{:ok, result} -> String.trim(result)
_ -> nil
end
%{
kernel_release: kernel_release,
kernel_version: kernel_version,
system_version: system_version,
os_type: :linux
}
end
defp kernel_type_sub(:freebsd) do
confirm_executable("uname")
kernel_release =
case System.cmd("uname", ["-r"]) do
{result, 0} -> result |> String.trim()
_ -> raise RuntimeError, message: "uname don't work."
end
system_version =
case System.cmd("uname", ["-r"]) do
{result, 0} -> result |> String.trim()
_ -> ""
end
kernel_version =
case System.cmd("uname", ["-r"]) do
{result, 0} -> result |> String.trim()
_ -> raise RuntimeError, message: "uname don't work."
end
%{
kernel_release: kernel_release,
kernel_version: kernel_version,
system_version: system_version,
os_type: :freebsd
}
end
defp kernel_type_sub(:macos) do
confirm_executable("uname")
confirm_executable("system_profiler")
kernel_release =
try do
case System.cmd("uname", ["-r"]) do
{result, 0} -> result |> String.trim()
_ -> :os.version() |> Tuple.to_list() |> Enum.join(".")
end
rescue
_e in ErlangError -> nil
end
%{
kernel_release: kernel_release
}
|> Map.merge(
try do
case System.cmd("system_profiler", ["SPSoftwareDataType"]) do
{result, 0} -> result |> detect_system_and_kernel_version()
_ -> nil
end
rescue
_e in ErlangError -> nil
end
)
end
defp cpu_type_sub(:unknown) do
cpu_type =
:erlang.system_info(:system_architecture) |> List.to_string() |> String.split("-") |> hd
%{
cpu_type: cpu_type,
cpu_model: :unknown,
cpu_models: :unknown,
num_of_processors: :unknown,
num_of_cores_of_a_processor: :unknown,
total_num_of_cores: :unknown,
num_of_threads_of_a_processor: :unknown,
total_num_of_threads: System.schedulers_online(),
hyper_threading: :unknown
}
end
defp cpu_type_sub(:windows) do
cpu_type =
:erlang.system_info(:system_architecture) |> List.to_string() |> String.split("-") |> hd
%{
cpu_type: cpu_type,
cpu_model: :unknown,
cpu_models: :unknown,
num_of_processors: :unknown,
num_of_cores_of_a_processor: :unknown,
total_num_of_cores: :unknown,
num_of_threads_of_a_processor: :unknown,
total_num_of_threads: System.schedulers_online(),
hyper_threading: :unknown
}
end
defp cpu_type_sub(:linux) do
cpu_type =
:erlang.system_info(:system_architecture) |> List.to_string() |> String.split("-") |> hd
info =
File.read!("/proc/cpuinfo")
|> String.split("\n\n")
# drop last (emtpy) item
|> Enum.reverse()
|> tl()
|> Enum.reverse()
|> Enum.map(fn cpuinfo ->
String.split(cpuinfo, "\n")
|> Enum.map(fn item ->
[k | v] = String.split(item, ~r"\t+: ")
{k, v}
end)
|> Map.new()
end)
cpu_models = Enum.map(info, &Map.get(&1, "model name")) |> List.flatten()
cpu_model = hd(cpu_models)
num_of_processors =
Enum.map(info, &Map.get(&1, "physical id"))
|> Enum.uniq()
|> Enum.count()
t1 =
Enum.map(info, &Map.get(&1, "processor"))
|> Enum.uniq()
|> Enum.reject(&is_nil(&1))
|> length
t =
Enum.map(info, &Map.get(&1, "cpu cores"))
|> Enum.uniq()
|> Enum.reject(&is_nil(&1))
|> Enum.map(&(&1 |> hd |> String.to_integer()))
|> Enum.sum()
total_num_of_cores = if t == 0, do: t1, else: t
num_of_cores_of_a_processor = div(total_num_of_cores, num_of_processors)
total_num_of_threads =
Enum.map(info, &Map.get(&1, "processor"))
|> Enum.count()
num_of_threads_of_a_processor = div(total_num_of_threads, num_of_processors)
ht =
if total_num_of_cores < total_num_of_threads do
:enabled
else
:disabled
end
%{
cpu_type: cpu_type,
cpu_model: cpu_model,
cpu_models: cpu_models,
num_of_processors: num_of_processors,
num_of_cores_of_a_processor: num_of_cores_of_a_processor,
total_num_of_cores: total_num_of_cores,
num_of_threads_of_a_processor: num_of_threads_of_a_processor,
total_num_of_threads: total_num_of_threads,
hyper_threading: ht
}
end
defp cpu_type_sub(:freebsd) do
confirm_executable("uname")
confirm_executable("sysctl")
cpu_type =
case System.cmd("uname", ["-m"]) do
{result, 0} -> result |> String.trim()
_ -> raise RuntimeError, message: "uname don't work."
end
cpu_model =
case System.cmd("sysctl", ["-n", "hw.model"]) do
{result, 0} -> result |> String.trim()
_ -> raise RuntimeError, message: "sysctl don't work."
end
cpu_models = [cpu_model]
total_num_of_cores =
case System.cmd("sysctl", ["-n", "kern.smp.cores"]) do
{result, 0} -> result |> String.trim() |> String.to_integer()
_ -> raise RuntimeError, message: "sysctl don't work."
end
total_num_of_threads =
case System.cmd("sysctl", ["-n", "kern.smp.cpus"]) do
{result, 0} -> result |> String.trim() |> String.to_integer()
_ -> raise RuntimeError, message: "sysctl don't work."
end
ht =
case System.cmd("sysctl", ["-n", "machdep.hyperthreading_allowed"]) do
{"1\n", 0} -> :enabled
{"0\n", 0} -> :disabled
_ -> raise RuntimeError, message: "sysctl don't work."
end
%{
cpu_type: cpu_type,
cpu_model: cpu_model,
cpu_models: cpu_models,
num_of_processors: :unknown,
num_of_cores_of_a_processor: :unknown,
total_num_of_cores: total_num_of_cores,
num_of_threads_of_a_processor: :unknown,
total_num_of_threads: total_num_of_threads,
hyper_threading: ht
}
end
defp cpu_type_sub(:macos) do
confirm_executable("uname")
confirm_executable("system_profiler")
cpu_type =
try do
case System.cmd("uname", ["-m"]) do
{result, 0} -> result |> String.trim()
_ -> nil
end
rescue
_e in ErlangError -> nil
end
%{
cpu_type: cpu_type
}
|> Map.merge(
try do
case System.cmd("system_profiler", ["SPHardwareDataType"]) do
{result, 0} -> result |> parse_macos
_ -> nil
end
rescue
_e in ErlangError -> nil
end
)
end
defp detect_system_and_kernel_version(message) do
trimmed_message = message |> split_trim
%{
kernel_version:
trimmed_message
|> Enum.filter(&String.match?(&1, ~r/Kernel Version/))
|> hd
|> String.split()
|> Enum.slice(2..-1)
|> Enum.join(" "),
system_version:
trimmed_message
|> Enum.filter(&String.match?(&1, ~r/System Version/))
|> hd
|> String.split()
|> Enum.slice(2..-1)
|> Enum.join(" ")
}
end
defp parse_macos(message) do
trimmed_message = message |> split_trim
cpu_model =
Enum.filter(trimmed_message, &String.match?(&1, ~r/(Processor Name|Chip)/))
|> hd
|> String.split()
|> Enum.slice(2..-1)
|> Enum.join(" ")
cpu_models = [cpu_model]
num_of_processors =
trimmed_message
|> Enum.filter(&String.match?(&1, ~r/Number of Processors/))
|> case do
[match] -> match_to_integer(match)
[] -> 1
end
num_of_cores =
trimmed_message
|> Enum.filter(&String.match?(&1, ~r/Total Number of Cores/))
|> hd
{total_num_of_cores, num_of_pcores, num_of_ecores} =
cond do
String.match?(num_of_cores, ~r/performance .* efficiency/) ->
parse_cores(num_of_cores)
true ->
{match_to_integer(num_of_cores), 0, 0}
end
num_of_cores_of_a_processor = div(total_num_of_cores, num_of_processors)
m_ht = Enum.filter(trimmed_message, &String.match?(&1, ~r/Hyper-Threading Technology/))
ht =
if length(m_ht) > 0 and String.match?(hd(m_ht), ~r/Enabled/) do
:enabled
else
:disabled
end
total_num_of_threads =
total_num_of_cores *
case ht do
:enabled -> 2
:disabled -> 1
end
num_of_threads_of_a_processor = div(total_num_of_threads, num_of_processors)
%{
os_type: :macos,
cpu_model: cpu_model,
cpu_models: cpu_models,
num_of_processors: num_of_processors,
num_of_cores_of_a_processor: num_of_cores_of_a_processor,
total_num_of_cores: total_num_of_cores,
num_of_threads_of_a_processor: num_of_threads_of_a_processor,
total_num_of_threads: total_num_of_threads,
hyper_threading: ht,
num_of_pcores: num_of_pcores,
num_of_ecores: num_of_ecores
}
end
defp parse_cores(num_of_cores) do
map_cores =
Regex.named_captures(
~r/Total Number of Cores: (?<total_num_of_cores>[0-9]+) \((?<num_of_pcores>[0-9]+) performance and (?<num_of_ecores>[0-9]+) efficiency\)/,
num_of_cores
)
{
map_cores |> Map.get("total_num_of_cores", 1) |> String.to_integer(),
map_cores |> Map.get("num_of_pcores", 0) |> String.to_integer(),
map_cores |> Map.get("num_of_ecores", 0) |> String.to_integer()
}
end
defp split_trim(message) do
message |> String.split("\n") |> Enum.map(&String.trim(&1))
end
defp match_to_integer(message) do
Regex.run(~r/[0-9]+/, message) |> hd |> String.to_integer()
end
def flags_env(env) do
flags = System.get_env(env)
if is_nil(flags) do
""
else
flags
end
end
def cc_env(env) do
System.get_env(env)
|> cc_env_sub()
end
defp cc_env_sub(nil) do
[]
end
defp cc_env_sub(cc) do
exe = System.find_executable(cc)
cond do
is_nil(exe) ->
%{
bin: cc,
type: :undefined
}
String.match?(exe, ~r/clang\+\+/) ->
cc_sub([exe], :"clang++")
String.match?(exe, ~r/clang/) ->
cc_sub([exe], :clang)
String.match?(exe, ~r/g\+\+/) ->
cc_sub([exe], :"g++")
String.match?(exe, ~r/gcc/) ->
cc_sub([exe], :gcc)
true ->
[
%{
bin: exe,
type: :unknown
}
]
end
end
def cc(:apple_clang) do
exe = "/usr/bin/clang"
[System.find_executable(exe)]
|> cc_sub(:apple_clang)
end
def cc(:"apple_clang++") do
exe = "/usr/bin/clang++"
[System.find_executable(exe)]
|> cc_sub(:"apple_clang++")
end
def cc(type) do
exe = Atom.to_string(type)
latest_version = Map.get(@latest_versions, type)
list_executable_versions(exe, 1, latest_version)
|> cc_sub(type)
end
defp cc_sub(exes, type) do
Enum.map(
exes,
&(%{bin: &1}
|> Map.merge(
execute_to_get_version(&1)
|> parse_versions(type)
|> parse_version_number()
))
)
end
defp list_executable_versions(exe, from, to) do
([System.find_executable(exe)] ++
Enum.map(from..to, &System.find_executable(exe <> "-" <> Integer.to_string(&1))))
|> Enum.filter(&(&1 != nil))
end
defp execute_to_get_version(exe) do
System.cmd(exe, ["--version"], stderr_to_stdout: true)
|> elem(0)
end
defp parse_versions(result, :gcc) do
if String.match?(result, ~r/Copyright \(C\) [0-9]+ Free Software Foundation, Inc\./) do
versions = String.split(result, "\n") |> Enum.at(0)
%{type: :gcc, versions: versions}
else
parse_versions(result, :apple_clang)
end
end
defp parse_versions(result, :"g++") do
if String.match?(result, ~r/Copyright \(C\) [0-9]+ Free Software Foundation, Inc\./) do
versions = String.split(result, "\n") |> Enum.at(0)
%{type: :"g++", versions: versions}
else
parse_versions(result, :"apple_clang++")
end
end
defp parse_versions(result, :clang) do
if String.match?(result, ~r/Apple/) do
parse_versions(result, :apple_clang)
else
versions = String.split(result, "\n") |> Enum.at(0)
%{type: :clang, versions: versions}
end
end
defp parse_versions(result, :"clang++") do
if String.match?(result, ~r/Apple/) do
parse_versions(result, :"apple_clang++")
else
versions = String.split(result, "\n") |> Enum.at(0)
%{type: :"clang++", versions: versions}
end
end
defp parse_versions(result, :apple_clang) do
%{type: :apple_clang}
|> Map.merge(
Regex.named_captures(~r/(?<versions>Apple .* version [0-9.]+ .*)\n/, result)
|> key_string_to_atom()
)
end
defp parse_versions(result, :"apple_clang++") do
%{type: :"apple_clang++"}
|> Map.merge(
Regex.named_captures(~r/(?<versions>Apple .* version [0-9.]+ .*)\n/, result)
|> key_string_to_atom()
)
end
defp key_string_to_atom(map) do
if is_nil(map) do
%{versions: ""}
else
Map.keys(map)
|> Enum.map(
&{
String.to_atom(&1),
Map.get(map, &1)
}
)
|> Map.new()
end
end
defp parse_version_number(map) do
Map.merge(
map,
Regex.named_captures(~r/(?<version>[0-9]+\.[0-9.]+)/, Map.get(map, :versions))
|> key_string_to_atom()
)
end
defp cuda(:linux) do
case File.read("/usr/local/cuda/version.txt") do
{:ok, cuda_version} ->
%{cuda: true}
|> Map.merge(parse_cuda_version(cuda_version))
|> Map.merge(%{
bin: find_path("/usr/local/cuda/bin"),
include: find_path("/usr/local/cuda/include"),
lib: find_path("/usr/local/cuda/lib64"),
nvcc: System.find_executable("/usr/local/cuda/bin/nvcc")
})
{:error, _reason} ->
%{cuda: false}
end
end
defp cuda(_) do
%{cuda: false}
end
defp parse_cuda_version(cuda_version) do
Regex.named_captures(~r/CUDA Version (?<version>[0-9.]+)/, cuda_version)
|> key_string_to_atom()
end
defp find_path(path) do
if File.exists?(path) do
path
else
nil
end
end
defp metal(:macos) do
confirm_executable("system_profiler")
try do
case System.cmd("system_profiler", ["SPDisplaysDataType"]) do
{result, 0} -> result |> detect_metal_supported()
_ -> %{metal: false}
end
rescue
_e in ErlangError -> %{metal: false}
end
end
defp metal(_) do
%{metal: false}
end
defp detect_metal_supported(message) do
trimmed_message = message |> split_trim
%{
metal:
trimmed_message
|> Enum.map(&String.match?(&1, ~r/Metal( Family)?: Supported/))
|> Enum.reduce(false, fn x, acc -> x or acc end)
}
end
end
|
lib/cpu_info.ex
| 0.703448 | 0.575081 |
cpu_info.ex
|
starcoder
|
defmodule ExAliyun.Client.RPC do
@moduledoc """
Aliyun RPC client.
### Usage
Please go to Aliyun api explorer to find how to access each service:
https://api.aliyun.com/new#/?product=Dysmsapi&api=QuerySendDetails¶ms={}&tab=DEMO&lang=RUBY
Below is an example for `SendSms`:
```elixir
alias ExAliyun.Client.RPC
client = %RPC{
endpoint: "https://dysmsapi.aliyuncs.com",
access_key_id: "<access_key_id>",
access_key_secret: "<access_key_secret>",
api_version: "2017-05-25",
}
params = %{
"RegionId" => "cn-hangzhou",
"PhoneNumbers" => "1865086****",
"SignName" => "SignName",
"TemplateCode" => "SMS_11111",
"TemplateParam" => "{\"code\":123123}"
}
RPC.request(client, "SendSms", params)
```
"""
@enforce_keys [:endpoint, :api_version, :access_key_id, :access_key_secret]
defstruct [:endpoint, :api_version, :access_key_id, :access_key_secret]
@type t :: %__MODULE__{
endpoint: binary(),
api_version: binary(),
access_key_id: binary(),
access_key_secret: binary(),
}
@spec request(__MODULE__.t(), binary(), map(), binary()) :: {:ok, map()} | {:error, map()}
def request(client, action, params, method \\ "GET") do
qs =
default_params(client)
|> Map.put(:Action, action)
|> Map.merge(params)
|> normalize
signature = sign(client, qs, method)
qs = "Signature=#{signature}&#{qs}"
endpoint = String.trim_trailing(client.endpoint, "/") <> "/"
request = case method do
"GET" ->
%HTTPoison.Request{
method: :get,
url: "#{endpoint}?#{qs}"
}
"POST" ->
%HTTPoison.Request{
method: :post,
url: endpoint,
body: qs,
headers: [{"Content-Type", "application/x-www-form-urlencoded"}]
}
end
with {:ok, resp} <- HTTPoison.request(request),
{:ok, body} <- verify_status(resp),
{:ok, json_data} <- Jason.decode(body) do
{:ok, json_data}
else
{:error, error} -> {:error, error}
end
end
defp verify_status(%{status_code: 200, body: body}) do
{:ok, body}
end
defp verify_status(%{status_code: 400, body: body}) do
{:ok, body}
end
defp verify_status(%{status_code: status_code}) do
{:error, %{status_code: status_code}}
end
def normalize(params) do
params
|> Map.keys
|> Enum.sort
|> Enum.reduce(%{}, fn k, acc -> Map.put(acc, k, params[k]) end)
|> URI.encode_query
end
defp sign(client, qs, method) do
str = pop_encode(qs)
str = "#{method}&%2F&#{str}"
key = "#{client.access_key_secret}&"
:crypto.hmac(:sha, key, str)
|> Base.encode64
|> URI.encode_www_form
end
defp pop_encode(str) do
str
|> URI.encode_www_form
|> String.replace("+", "%20")
|> String.replace("*", "%2A")
|> String.replace("%7E", "~")
end
defp default_params(client) do
%{
AccessKeyId: client.access_key_id,
Version: client.api_version,
Format: "JSON",
SignatureMethod: "HMAC-SHA1",
SignatureVersion: "1.0",
SignatureNonce: random_nonce(),
Timestamp: timestamp(),
}
end
defp timestamp do
DateTime.utc_now()
|> Map.put(:microsecond, {0, 0})
|> DateTime.to_iso8601
end
defp random_nonce(len \\ 10) do
:crypto.strong_rand_bytes(len)
|> Base.url_encode64
|> binary_part(0, len)
end
end
|
lib/client/rpc.ex
| 0.716814 | 0.45423 |
rpc.ex
|
starcoder
|
defmodule XDR.DoubleFloat do
@moduledoc """
This module manages the `Double-Precision Floating-Point` type based on the RFC4506 XDR Standard.
"""
@behaviour XDR.Declaration
alias XDR.Error.DoubleFloat, as: DoubleFloatError
defstruct [:float]
defguard valid_float?(value) when is_float(value) or is_integer(value)
@typedoc """
`XDR.DoubleFloat` struct type specification.
"""
@type t :: %XDR.DoubleFloat{float: integer | float | binary}
@doc """
Create a new `XDR.DoubleFloat` structure from the `float` passed.
"""
@spec new(float :: float | integer | binary) :: t
def new(float), do: %XDR.DoubleFloat{float: float}
@impl XDR.Declaration
@doc """
Encode a `XDR.DoubleFloat` structure into a XDR format.
"""
@spec encode_xdr(double_float :: t) :: {:ok, binary()} | {:error, :not_number}
def encode_xdr(%XDR.DoubleFloat{float: float}) when not valid_float?(float),
do: {:error, :not_number}
def encode_xdr(%XDR.DoubleFloat{float: float}), do: {:ok, <<float::big-signed-float-size(64)>>}
@impl XDR.Declaration
@doc """
Encode a `XDR.DoubleFloat` structure into a XDR format.
If the `double_float` is not valid, an exception is raised.
"""
@spec encode_xdr!(double_float :: t) :: binary()
def encode_xdr!(double_float) do
case encode_xdr(double_float) do
{:ok, binary} -> binary
{:error, reason} -> raise(DoubleFloatError, reason)
end
end
@impl XDR.Declaration
@doc """
Decode the Double-Precision Floating-Point in XDR format to a `XDR.DoubleFloat` structure.
"""
@spec decode_xdr(bytes :: binary, double_float :: t) ::
{:ok, {t, binary()}} | {:error, :not_binary}
def decode_xdr(bytes, double_float \\ nil)
def decode_xdr(bytes, _double_float) when not is_binary(bytes), do: {:error, :not_binary}
def decode_xdr(bytes, _double_float) do
<<float::big-signed-float-size(64), rest::binary>> = bytes
decoded_float = new(float)
{:ok, {decoded_float, rest}}
end
@impl XDR.Declaration
@doc """
Decode the Double-Precision Floating-Point in XDR format to a `XDR.DoubleFloat` structure.
If the binaries are not valid, an exception is raised.
"""
@spec decode_xdr!(bytes :: binary, double_float :: t) :: {t, binary()}
def decode_xdr!(bytes, double_float \\ nil)
def decode_xdr!(bytes, double_float) do
case decode_xdr(bytes, double_float) do
{:ok, result} -> result
{:error, reason} -> raise(DoubleFloatError, reason)
end
end
end
|
lib/xdr/double_float.ex
| 0.938449 | 0.665161 |
double_float.ex
|
starcoder
|
defmodule Maze do
@doc """
Creates non-overlapping tiles representing the maze.
## Examples
iex> Maze.tiles([[4,8]], %{width: 2, height: 1, hall_width: 1})
[
%{tile: "#", x: 1, y: 1},
%{tile: "#", x: 2, y: 1},
%{tile: "#", x: 3, y: 1},
%{tile: "#", x: 4, y: 1},
%{tile: "#", x: 5, y: 1},
%{tile: "#", x: 1, y: 2},
%{tile: " ", x: 2, y: 2},
%{tile: " ", x: 3, y: 2},
%{tile: " ", x: 4, y: 2},
%{tile: "#", x: 5, y: 2},
%{tile: "#", x: 1, y: 3},
%{tile: "#", x: 2, y: 3},
%{tile: "#", x: 3, y: 3},
%{tile: "#", x: 4, y: 3},
%{tile: "#", x: 5, y: 3}
]
iex> Maze.tiles([[4,8]], %{width: 2, height: 1, hall_width: 2})
[
%{tile: "#", x: 1, y: 1},
%{tile: "#", x: 2, y: 1},
%{tile: "#", x: 3, y: 1},
%{tile: "#", x: 4, y: 1},
%{tile: "#", x: 5, y: 1},
%{tile: "#", x: 6, y: 1},
%{tile: "#", x: 7, y: 1},
%{tile: "#", x: 1, y: 2},
%{tile: " ", x: 2, y: 2},
%{tile: " ", x: 3, y: 2},
%{tile: " ", x: 4, y: 2},
%{tile: " ", x: 5, y: 2},
%{tile: " ", x: 6, y: 2},
%{tile: "#", x: 7, y: 2},
%{tile: "#", x: 1, y: 3},
%{tile: " ", x: 2, y: 3},
%{tile: " ", x: 3, y: 3},
%{tile: " ", x: 4, y: 3},
%{tile: " ", x: 5, y: 3},
%{tile: " ", x: 6, y: 3},
%{tile: "#", x: 7, y: 3},
%{tile: "#", x: 1, y: 4},
%{tile: "#", x: 2, y: 4},
%{tile: "#", x: 3, y: 4},
%{tile: "#", x: 4, y: 4},
%{tile: "#", x: 5, y: 4},
%{tile: "#", x: 6, y: 4},
%{tile: "#", x: 7, y: 4}
]
iex> Maze.tiles([[4, 10, 2]], %{width: 3, height: 2, hall_width: 2}) |> Enum.group_by(&{&1.y})
%{
{1} => [
%{tile: "#", x: 1, y: 1},
%{tile: "#", x: 2, y: 1},
%{tile: "#", x: 3, y: 1},
%{tile: "#", x: 4, y: 1},
%{tile: "#", x: 5, y: 1},
%{tile: "#", x: 6, y: 1},
%{tile: "#", x: 7, y: 1},
%{tile: "#", x: 8, y: 1},
%{tile: "#", x: 9, y: 1},
%{tile: "#", x: 10, y: 1}
],
{2} => [
%{tile: "#", x: 1, y: 2},
%{tile: " ", x: 2, y: 2},
%{tile: " ", x: 3, y: 2},
%{tile: " ", x: 4, y: 2},
%{tile: " ", x: 5, y: 2},
%{tile: " ", x: 6, y: 2},
%{tile: "#", x: 7, y: 2},
%{tile: " ", x: 8, y: 2},
%{tile: " ", x: 9, y: 2},
%{tile: "#", x: 10, y: 2}
],
{3} => [
%{tile: "#", x: 1, y: 3},
%{tile: " ", x: 2, y: 3},
%{tile: " ", x: 3, y: 3},
%{tile: " ", x: 4, y: 3},
%{tile: " ", x: 5, y: 3},
%{tile: " ", x: 6, y: 3},
%{tile: "#", x: 7, y: 3},
%{tile: " ", x: 8, y: 3},
%{tile: " ", x: 9, y: 3},
%{tile: "#", x: 10, y: 3}
],
{4} => [
%{tile: "#", x: 1, y: 4},
%{tile: "#", x: 2, y: 4},
%{tile: "#", x: 3, y: 4},
%{tile: "#", x: 4, y: 4},
%{tile: " ", x: 5, y: 4},
%{tile: " ", x: 6, y: 4},
%{tile: "#", x: 7, y: 4},
%{tile: " ", x: 8, y: 4},
%{tile: " ", x: 9, y: 4},
%{tile: "#", x: 10, y: 4}
]
}
"""
def tiles(maze, args) do
MazeTransformer.all_rooms("layer2", maze, args)
|> Enum.map(fn {loc, room} ->
{loc, Room.layout(room, args.hall_width)}
end)
|> Enum.map(fn {loc, room_def} ->
room_def
|> Enum.with_index(0)
|> Enum.map(fn {row, add_y} ->
row
|> String.graphemes()
|> Enum.with_index(0)
|> Enum.map(fn {tile, add_x} ->
%{x: loc.x + add_x, y: loc.y + add_y, tile: tile}
end)
end)
end)
|> List.flatten()
# FIXME:
# We resolve double walls by moving rooms up and left
# over pre-existing walls, but this causes tiles to go
# missing. Reverse sorting on "#" vs " ", followed by
# removing duplicate x,y tiles fixes it.
# There must be a bug elsewhere.
|> Enum.sort_by(& &1.tile)
|> Enum.reverse()
|> Enum.uniq_by(&{&1.y, &1.x})
|> Enum.sort_by(&{&1.y, &1.x})
end
def to_s(maze, args) do
text =
tiles(maze, args)
|> Enum.map(fn tile ->
case tile.x do
1 ->
"\n" <> tile.tile
_ ->
tile.tile
end
end)
|> Enum.join()
IO.puts(text)
text
end
end
|
lib/fireball/models/maze.ex
| 0.511717 | 0.940898 |
maze.ex
|
starcoder
|
defmodule DistAgent.Behaviour do
@moduledoc """
Behaviour module for distributed agents.
The 6 callbacks are classified into the following 2 dimensions:
- how the callback is used, i.e., for pure manipulation of state or for side effect
- when the callback is used, i.e., type of event that triggers the callback
The following table summarizes the classification:
| | for pure state manipulation | for side effect |
| ----------------------------------------- | --------------------------- | ------------------- |
| used when `DistAgent.query/5` is called | `c:handle_query/4` | `c:after_query/5` |
| used when `DistAgent.command/5` is called | `c:handle_command/4` | `c:after_command/6` |
| used when a low-resolution timer fires | `c:handle_timeout/4` | `c:after_timeout/5` |
"""
alias DistAgent.OnTick
alias DistAgent.Quota.Name, as: QName
@type agent_key :: String.t
@type data :: any
@type command :: any
@type query :: any
@type ret :: any
@type milliseconds_since_epoch :: pos_integer
@doc """
Invoked to handle an incoming read-only query.
Implementations of this callback must be pure.
The callback takes the following arguments:
- 1st argument is the name of the quota in which this distributed agent belongs.
- 2nd argument is the key of the agent.
- 3rd argument is the state of the agent.
- 4th argument is the `query` passed to the `DistAgent.query/5`.
Return value of this callback is delivered to the caller process of `DistAgent.query/5`.
"""
@callback handle_query(quota_name :: QName.t, agent_key, data, query) :: ret
@doc """
Invoked to handle an incoming command.
Implementations of this callback must be pure.
The callback takes the following arguments:
- 1st argument is the name of the quota in which this distributed agent belongs.
- 2nd argument is the key of the agent.
- 3rd argument is the state of the agent.
This is `nil` if the agent is right after its activation.
- 4th argument is the `command` passed to the `DistAgent.command/5`.
Return value of this callback must be a 3-tuple.
- 1st element is returned to the caller of `DistAgent.command/5`.
- 2nd element is the state after the command is applied; if this value is `nil` then the distributed agent is deactivated.
- 3rd element specifies what happens at the subsequent ticks.
You may specify either `t:DistAgent.OnTick.t/0` or `:keep`, where `:keep` respects the previously set `t:DistAgent.OnTick.t/0`.
"""
@callback handle_command(quota_name :: QName.t, agent_key, nil | data, command) :: {ret, nil | data, OnTick.t | :keep}
@doc """
Invoked when a low-resolution timer fires.
Implementations of this callback must be pure.
The callback takes the following arguments:
- 1st argument is the name of the quota in which this distributed agent belongs.
- 2nd argument is the key of the agent.
- 3rd argument is the state of the agent.
- 4th argument is the time (milliseconds since UNIX epoch) in UTC time zone at which the tick starts.
Note that this value is not very accurate as it's obtained by a separate process (`DistAgent.TickSender`);
it only indicates that current time is at least as large as this value.
Return value of this callback must be a 2-tuple.
- 1st element is the state after the timeout; if this value is `nil` then the distributed agent is deactivated.
- 2nd element specifies what happens at the subsequent ticks.
See `t:DistAgent.OnTick.t/0` for more details.
"""
@callback handle_timeout(QName.t, agent_key, data, milliseconds_since_epoch) :: {nil | data, OnTick.t}
@doc """
Invoked after `handle_query/2`.
Implementations of this callback can have side effects.
Note that this callback should avoid long running operations to keep distributed agents responsive;
delegate such operations to other processes instead.
The callback takes the following arguments:
- 1st argument is the name of the quota in which this distributed agent belongs.
- 2nd argument is the key of the agent.
- 3rd argument is the state of the agent.
- 4th argument is the `query` passed to the `DistAgent.query/5`.
- 5th argument is the return value to the client process, computed by `c:handle_query/4`.
Return value of this callback is neglected.
"""
@callback after_query(quota_name :: QName.t, agent_key, data, query, ret) :: any
@doc """
Invoked after `handle_command/2`.
Implementations of this callback can have side effects.
Note that this callback should avoid long running operations to keep distributed agents responsive;
delegate such operations to other processes instead.
The callback takes the following arguments:
- 1st argument is the name of the quota in which this distributed agent belongs.
- 2nd argument is the key of the agent.
- 3rd argument is the state of the agent before the command is applied.
This is `nil` if the agent is right after its activation.
- 4th argument is the `command` passed to the `DistAgent.command/5`.
- 5th argument is the return value to the client process (1st element of return value of `c:handle_command/4`).
- 6th argument is the state of the distributed agent after the command is applied (2nd element of return value of `c:handle_command/4`).
Return value of this callback is neglected.
"""
@callback after_command(quota_name :: QName.t, agent_key, data_before :: nil | data, command :: command, ret :: ret, data_after :: nil | data) :: any
@doc """
Invoked after `handle_timeout/2`.
Implementations of this callback can have side effects.
Note that this callback should avoid long running operations to keep distributed agents responsive;
delegate such operations to other processes instead.
The callback takes the following arguments:
- 1st argument is the name of the quota in which this distributed agent belongs.
- 2nd argument is the key of the agent.
- 3rd argument is the state of the agent before the timeout.
- 4th argument is the same timestamp passed to `c:handle_timeout/4`.
- 5th argument is the state of the distrubuted agent after the timeout (1st element of return value of `c:handle_timeout/4`).
Return value of this callback is neglected.
"""
@callback after_timeout(quota_name :: QName.t, agent_key, data_before :: data, now_millis :: milliseconds_since_epoch, data_after :: nil | data) :: any
end
|
lib/dist_agent/behaviour.ex
| 0.884177 | 0.728217 |
behaviour.ex
|
starcoder
|
defmodule Flect.Compiler.Syntax.Parser do
@moduledoc """
Contains the parser for Flect source code documents.
"""
@typep location() :: Flect.Compiler.Syntax.Location.t()
@typep token() :: Flect.Compiler.Syntax.Token.t()
@typep ast_node() :: Flect.Compiler.Syntax.Node.t()
@typep state() :: {[token()], location()}
@typep return_n() :: {ast_node(), state()}
@typep return_m() :: {[ast_node()], state()}
@typep return_mt() :: {[ast_node()], [token()], state()}
@doc """
Parses the given list of tokens into a list of
`Flect.Compiler.Syntax.Node`s representing the module declarations
(and everything inside those) of the source code document. Returns the
resulting list or throws a `Flect.Compiler.Syntax.SyntaxError` if the
source code is malformed.
`tokens` must be a list of `Flect.Compiler.Syntax.Token`s. `file` must
be a binary containing the file name (used to report syntax errors).
"""
@spec parse_modules([Flect.Compiler.Syntax.Token.t()], String.t()) :: [Flect.Compiler.Syntax.Node.t()]
def parse_modules(tokens, file) do
loc = if t = Enum.first(tokens), do: t.location(), else: Flect.Compiler.Syntax.Location[file: file]
do_parse_modules({tokens, loc})
end
@spec do_parse_modules(state(), [ast_node()]) :: [ast_node()]
defp do_parse_modules(state, mods // []) do
case expect_token(state, [:pub, :priv], "module declaration", true) do
:eof -> Enum.reverse(mods)
{_, token, state} ->
{mod, state} = parse_mod(state, token)
do_parse_modules(state, [mod | mods])
end
end
@doc """
Parses the given list of tokens into a list of
`Flect.Compiler.Syntax.Node`s representing each semicolon-terminated
expression in the input. Returns the resulting list or throws a
`Flect.Compiler.Syntax.SyntaxError` if the source code is malformed.
`tokens` must be a list of `Flect.Compiler.Syntax.Token`s. `file` must
be a binary containing the file name (used to report syntax errors).
"""
@spec parse_expressions([Flect.Compiler.Syntax.Token.t()], String.t()) :: [Flect.Compiler.Syntax.Node.t()]
def parse_expressions(tokens, file) do
loc = if t = Enum.first(tokens), do: t.location(), else: Flect.Compiler.Syntax.Location[file: file]
do_parse_expressions({tokens, loc})
end
@spec do_parse_expressions(state(), [ast_node()]) :: [ast_node()]
defp do_parse_expressions(state, exprs // []) do
case next_token(state, true) do
:eof -> Enum.reverse(exprs)
{:semicolon, _, state} -> do_parse_expressions(state, exprs)
_ ->
{expr, state} = parse_expr(state)
{_, _, state} = expect_token(state, :semicolon, "expression-terminating semicolon")
do_parse_expressions(state, [expr | exprs])
end
end
@spec parse_simple_name(state()) :: return_n()
defp parse_simple_name(state) do
{_, tok, state} = expect_token(state, :identifier, "identifier")
{new_node(:simple_name, tok.location(), [name: tok]), state}
end
@spec parse_qualified_name(state(), boolean()) :: return_n()
defp parse_qualified_name(state, global // true) do
{tcol, loc, state} = case next_token(state) do
{:colon_colon, tok, state} when global -> {[separator: tok], tok.location(), state}
_ -> {[], nil, state}
end
{names, toks, state} = parse_qualified_name_list(state, [])
if !loc, do: loc = hd(names).location()
names = lc name inlist names, do: {:name, name}
toks = lc tok inlist toks, do: {:separator, tok}
{new_node(:qualified_name, loc, tcol ++ toks, names), state}
end
@spec parse_qualified_name_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_qualified_name_list(state, names, tokens // []) do
{name, state} = parse_simple_name(state)
case next_token(state) do
{:colon_colon, tok, state} -> parse_qualified_name_list(state, [name | names], [tok | tokens])
_ -> {Enum.reverse([name | names]), Enum.reverse(tokens), state}
end
end
@spec parse_mod(state(), token()) :: return_n()
defp parse_mod(state, visibility) do
{_, tok_mod, state} = expect_token(state, :mod, "'mod' keyword")
{name, state} = parse_qualified_name(state, false)
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{decls, state} = parse_decls(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
tokens = [visibility: visibility,
mod_keyword: tok_mod,
opening_brace: tok_open,
closing_brace: tok_close]
{new_node(:module_declaration, tok_mod.location(), tokens, [{:name, name} | decls]), state}
end
@spec parse_decls(state(), [{:declaration, ast_node()}]) :: return_m()
defp parse_decls(state, decls // []) do
case next_token(state) do
{v, _, _} when v in [:use, :test] ->
{decl, state} = case v do
:use -> parse_use_decl(state)
:test -> parse_test_decl(state)
end
parse_decls(state, [{:declaration, decl} | decls])
{v, token, state} when v in [:pub, :priv] ->
{decl, state} = case expect_token(state, [:fn, :struct, :union, :enum, :type, :trait,
:impl, :glob, :tls, :macro], "declaration") do
{:fn, _, _} -> parse_fn_decl(state, token)
{:struct, _, _} -> parse_struct_decl(state, token)
{:union, _, _} -> parse_union_decl(state, token)
{:enum, _, _} -> parse_enum_decl(state, token)
{:type, _, _} -> parse_type_decl(state, token)
{:trait, _, _} -> parse_trait_decl(state, token)
{:impl, _, _} -> parse_impl_decl(state, token)
{:glob, _, _} -> parse_glob_decl(state, token)
{:tls, _, _} -> parse_tls_decl(state, token)
{:macro, _, _} -> parse_macro_decl(state, token)
end
parse_decls(state, [{:declaration, decl} | decls])
_ -> {Enum.reverse(decls), state}
end
end
@spec parse_use_decl(state()) :: return_n()
defp parse_use_decl(state) do
{_, tok_use, state} = expect_token(state, :use, "use declaration")
{ext, state} = case next_token(state) do
{:ext, tok, state} -> {[ext_keyword: tok], state}
_ -> {[], state}
end
{name, state} = parse_qualified_name(state, false)
{_, tok_semi, state} = expect_token(state, :semicolon, "semicolon")
{new_node(:use_declaration, tok_use.location(),
[{:use_keyword, tok_use} | ext] ++ [semicolon: tok_semi], [name: name]), state}
end
@spec parse_fn_decl(state(), token() | nil, boolean()) :: return_n()
defp parse_fn_decl(state, visibility, body // true) do
{_, tok_fn, state} = expect_token(state, :fn, "function declaration")
{ext, state} = case next_token(state) do
{:ext, tok, state} ->
{_, str, state} = expect_token(state, :string, "function ABI string")
{[ext_keyword: tok, abi: str], state}
_ -> {[], state}
end
{name, state} = parse_simple_name(state)
{ty_par, state} = case next_token(state) do
{:bracket_open, _, _} ->
{ty_par, state} = parse_type_parameters(state)
{[type_parameters: ty_par], state}
_ -> {[], state}
end
{params, state} = parse_function_parameters(state)
{_, tok_arrow, state} = expect_token(state, :minus_angle_close, "return type arrow")
{ret_type, state} = parse_return_type(state)
{tail_tok, tail_node, state} = if body do
{block, state} = parse_block(state)
{[], [body: block], state}
else
{_, tok_semicolon, state} = expect_token(state, :semicolon, "semicolon")
{[semicolon: tok_semicolon], [], state}
end
vis = if visibility, do: [visibility_keyword: visibility], else: []
tokens = vis ++ [fn_keyword: tok_fn] ++ ext ++ [arrow: tok_arrow] ++ tail_tok
{new_node(:function_declaration, tok_fn.location(), tokens,
[{:name, name} | ty_par] ++ [parameters: params, return_type: ret_type] ++ tail_node), state}
end
@spec parse_function_parameters(state()) :: return_n()
defp parse_function_parameters(state) do
{_, tok_open, state} = expect_token(state, :paren_open, "opening parenthesis")
{params, toks, state} = parse_function_parameter_list(state, [])
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
params = lc param inlist params, do: {:parameter, param}
toks = lc tok inlist toks, do: {:comma, tok}
{new_node(:function_parameters, tok_open.location(),
[{:opening_parenthesis, tok_open} | toks] ++ [closing_parenthesis: tok_close], params), state}
end
@spec parse_function_parameter_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_function_parameter_list(state, params, tokens // []) do
case next_token(state) do
{:paren_close, _, _} -> {Enum.reverse(params), Enum.reverse(tokens), state}
_ ->
if params == [] do
{param, state} = parse_function_parameter(state)
parse_function_parameter_list(state, [param | params], tokens)
else
{_, tok, state} = expect_token(state, :comma, "comma")
{param, state} = parse_function_parameter(state)
parse_function_parameter_list(state, [param | params], [tok | tokens])
end
end
end
@spec parse_function_parameter(state()) :: return_n()
defp parse_function_parameter(state) do
{mut, state} = case next_token(state) do
{:mut, mut, state} -> {[mut_keyword: mut], state}
_ -> {[], state}
end
{ref, state} = case next_token(state) do
{:ref, ref, state} -> {[ref_keyword: ref], state}
_ -> {[], state}
end
{name, state} = parse_simple_name(state)
{_, tok_colon, state} = expect_token(state, :colon, "colon")
{type, state} = parse_type(state)
{new_node(:function_parameter, type.location(), mut ++ ref ++ [colon: tok_colon], [name: name, type: type]), state}
end
defp parse_struct_decl(state, visibility) do
{_, tok_struct, state} = expect_token(state, :struct, "structure declaration")
{name, state} = parse_simple_name(state)
{ty_par, state} = case next_token(state) do
{:bracket_open, _, _} ->
{ty_par, state} = parse_type_parameters(state)
{[type_parameters: ty_par], state}
_ -> {[], state}
end
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{fields, state} = parse_fields(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
tokens = [visibility_keyword: visibility,
struct_keyword: tok_struct,
opening_brace: tok_open,
closing_brace: tok_close]
fields = lc field inlist fields, do: {:field, field}
{new_node(:struct_declaration, tok_struct.location(), tokens, [{:name, name} | ty_par] ++ fields), state}
end
@spec parse_fields(state(), [ast_node()]) :: return_m()
defp parse_fields(state, fields // []) do
case next_token(state) do
{v, token, state} when v in [:pub, :priv] ->
{field, state} = parse_field(state, token)
parse_fields(state, [field | fields])
_ -> {Enum.reverse(fields), state}
end
end
@spec parse_field(state(), token()) :: return_n()
defp parse_field(state, visibility) do
{name, state} = parse_simple_name(state)
{_, tok_colon, state} = expect_token(state, :colon, "colon")
{type, state} = parse_type(state)
{_, tok_semicolon, state} = expect_token(state, :semicolon, "semicolon")
tokens = [visibility_keyword: visibility,
colon: tok_colon,
semicolon: tok_semicolon]
{new_node(:field_declaration, name.location(), tokens, [name: name, type: type]), state}
end
@spec parse_union_decl(state(), token()) :: return_n()
defp parse_union_decl(state, visibility) do
{_, tok_union, state} = expect_token(state, :union, "union declaration")
{name, state} = parse_simple_name(state)
{ty_par, state} = case next_token(state) do
{:bracket_open, _, _} ->
{ty_par, state} = parse_type_parameters(state)
{[type_parameters: ty_par], state}
_ -> {[], state}
end
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{cases, state} = parse_cases(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
tokens = [visibility_keyword: visibility,
union_keyword: tok_union,
opening_brace: tok_open,
closing_brace: tok_close]
cases = lc c inlist cases, do: {:case, c}
{new_node(:union_declaration, tok_union.location(), tokens, [{:name, name} | ty_par] ++ cases), state}
end
@spec parse_cases(state(), [ast_node()]) :: return_m()
defp parse_cases(state, cases // []) do
case next_token(state) do
{:identifier, _, _} ->
{c, state} = parse_case(state)
parse_cases(state, [c | cases])
_ -> {Enum.reverse(cases), state}
end
end
@spec parse_case(state()) :: return_n()
defp parse_case(state) do
{name, state} = parse_simple_name(state)
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{fields, state} = parse_fields(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
tokens = [opening_brace: tok_open,
closing_brace: tok_close]
fields = lc field inlist fields, do: {:field, field}
{new_node(:case_declaration, name.location(), tokens, [{:name, name} | fields]), state}
end
@spec parse_enum_decl(state(), token()) :: return_n()
defp parse_enum_decl(state, visibility) do
{_, tok_enum, state} = expect_token(state, :enum, "enumeration declaration")
{name, state} = parse_simple_name(state)
{_, tok_colon, state} = expect_token(state, :colon, "colon")
{type, state} = parse_nominal_type(state, false)
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{values, state} = parse_values(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
tokens = [visibility_keyword: visibility,
enum_keyword: tok_enum,
colon: tok_colon,
opening_brace: tok_open,
closing_brace: tok_close]
values = lc value inlist values, do: {:value, value}
{new_node(:enum_declaration, tok_enum.location(), tokens, [name: name, backing_type: type] ++ values), state}
end
@spec parse_values(state(), [ast_node()]) :: return_m()
defp parse_values(state, values // []) do
case next_token(state) do
{:identifier, _, _} ->
{value, state} = parse_value(state)
parse_fields(state, [value | values])
_ -> {Enum.reverse(values), state}
end
end
@spec parse_value(state()) :: return_n()
defp parse_value(state) do
{name, state} = parse_simple_name(state)
{_, tok_equals, state} = expect_token(state, :assign, "equals sign")
{expr, state} = parse_expr(state)
{_, tok_semicolon, state} = expect_token(state, :semicolon, "semicolon")
tokens = [equals: tok_equals,
semicolon: tok_semicolon]
{new_node(:field_declaration, name.location(), tokens, [name: name, value: expr]), state}
end
@spec parse_type_decl(state(), token()) :: return_n()
defp parse_type_decl(state, visibility) do
{_, tok_type, state} = expect_token(state, :type, "type declaration")
{name, state} = parse_simple_name(state)
{ty_par, state} = case next_token(state) do
{:bracket_open, _, _} ->
{ty_par, state} = parse_type_parameters(state)
{[type_parameters: ty_par], state}
_ -> {[], state}
end
{_, tok_eq, state} = expect_token(state, :assign, "equals sign")
{type, state} = parse_type(state)
{_, tok_semicolon, state} = expect_token(state, :semicolon, "semicolon")
tokens = [visibility_keyword: visibility,
type_keyword: tok_type,
equals: tok_eq,
semicolon: tok_semicolon]
{new_node(:type_declaration, tok_type.location(), tokens, [name: name, type: type] ++ ty_par), state}
end
@spec parse_trait_decl(state(), token()) :: return_n()
defp parse_trait_decl(state, visibility) do
{_, tok_trait, state} = expect_token(state, :trait, "trait declaration")
{name, state} = parse_simple_name(state)
{ty_par, state} = case next_token(state) do
{:bracket_open, _, _} ->
{ty_par, state} = parse_type_parameters(state)
{[type_parameters: ty_par], state}
_ -> {[], state}
end
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{fns, state} = parse_trait_functions(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
tokens = [visibility_keyword: visibility,
trait_keyword: tok_trait,
opening_brace: tok_open,
closing_brace: tok_close]
fns = lc fun inlist fns, do: {:function, fun}
{new_node(:trait_declaration, tok_trait.location(), tokens, [{:name, name} | ty_par ++ fns]), state}
end
@spec parse_trait_functions(state(), [ast_node()]) :: return_m()
defp parse_trait_functions(state, fns // []) do
case next_token(state) do
{:fn, _, _} ->
{fun, state} = parse_fn_decl(state, nil, false)
parse_trait_functions(state, [fun | fns])
_ -> {Enum.reverse(fns), state}
end
end
@spec parse_impl_decl(state(), token()) :: return_n()
defp parse_impl_decl(state, visibility) do
{_, tok_impl, state} = expect_token(state, :impl, "implementation declaration")
{ty_par, state} = case next_token(state) do
{:bracket_open, _, _} ->
{ty_par, state} = parse_type_parameters(state)
{[type_parameters: ty_par], state}
_ -> {[], state}
end
{trait, state} = parse_nominal_type(state)
{_, tok_for, state} = expect_token(state, :for, "'for' keyword")
{type, state} = parse_type(state)
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{fns, state} = parse_impl_functions(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
tokens = [visibility_keyword: visibility,
impl_keyword: tok_impl,
for_keyword: tok_for,
opening_brace: tok_open,
closing_brace: tok_close]
fns = lc fun inlist fns, do: {:function, fun}
{new_node(:impl_declaration, tok_impl.location(), tokens, ty_par ++ [trait: trait, type: type] ++ fns), state}
end
@spec parse_impl_functions(state(), [ast_node()]) :: return_m()
defp parse_impl_functions(state, fns // []) do
case next_token(state) do
{:fn, _, _} ->
{fun, state} = parse_fn_decl(state, nil)
parse_impl_functions(state, [fun | fns])
_ -> {Enum.reverse(fns), state}
end
end
@spec parse_glob_decl(state(), token()) :: return_n()
defp parse_glob_decl(state, visibility) do
{_, tok_glob, state} = expect_token(state, :glob, "global variable declaration")
{ext, state} = case next_token(state) do
{:ext, tok, state} ->
{_, str, state} = expect_token(state, :string, "variable ABI string")
{[ext_keyword: tok, abi: str], state}
_ -> {[], state}
end
{mut, state} = case next_token(state) do
{:mut, tok, state} -> {[mut_keyword: tok], state}
_ -> {[], state}
end
{name, state} = parse_simple_name(state)
{_, tok_colon, state} = expect_token(state, :colon, "colon")
{type, state} = parse_type(state)
{expr, tok_eq, state} = case next_token(state) do
{:assign, tok, state} ->
{expr, state} = parse_expr(state)
{[initializer: expr], [equals: tok], state}
_ -> {[], [], state}
end
{_, tok_semicolon, state} = expect_token(state, :semicolon, "semicolon")
tokens = [visibility_keyword: visibility, glob_keyword: tok_glob] ++ ext ++ mut
tokens = tokens ++ [colon: tok_colon] ++ tok_eq ++ [semicolon: tok_semicolon]
{new_node(:global_declaration, tok_glob.location(), tokens, [{:name, name}, {:type, type} | expr]), state}
end
@spec parse_tls_decl(state(), token()) :: return_n()
defp parse_tls_decl(state, visibility) do
{_, tok_tls, state} = expect_token(state, :tls, "TLS variable declaration")
{ext, state} = case next_token(state) do
{:ext, tok, state} ->
{_, str, state} = expect_token(state, :string, "variable ABI string")
{[ext_keyword: tok, abi: str], state}
_ -> {[], state}
end
{mut, state} = case next_token(state) do
{:mut, tok, state} -> {[mut_keyword: tok], state}
_ -> {[], state}
end
{name, state} = parse_simple_name(state)
{_, tok_colon, state} = expect_token(state, :colon, "colon")
{type, state} = parse_type(state)
{expr, tok_eq, state} = case next_token(state) do
{:assign, tok, state} ->
{expr, state} = parse_expr(state)
{[initializer: expr], [equals: tok], state}
_ -> {[], [], state}
end
{_, tok_semicolon, state} = expect_token(state, :semicolon, "semicolon")
tokens = [visibility_keyword: visibility, tls_keyword: tok_tls] ++ ext ++ mut
tokens = tokens ++ [colon: tok_colon] ++ tok_eq ++ [semicolon: tok_semicolon]
{new_node(:tls_declaration, tok_tls.location(), tokens, [{:name, name}, {:type, type} | expr]), state}
end
@spec parse_macro_decl(state(), token()) :: return_n()
defp parse_macro_decl(state, visibility) do
{_, tok_macro, state} = expect_token(state, :macro, "macro declaration")
{name, state} = parse_simple_name(state)
{params, state} = parse_macro_parameters(state)
{block, state} = parse_block(state)
tokens = [visibility_keyword: visibility,
macro_keyword: tok_macro]
{new_node(:macro_declaration, tok_macro.location(), tokens, [name: name, parameters: params, body: block]), state}
end
@spec parse_macro_parameters(state()) :: return_n()
defp parse_macro_parameters(state) do
{_, tok_open, state} = expect_token(state, :paren_open, "opening parenthesis")
{params, toks, state} = parse_macro_parameter_list(state, [])
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
params = lc param inlist params, do: {:parameter, param}
toks = lc tok inlist toks, do: {:comma, tok}
{new_node(:macro_parameters, tok_open.location(),
[{:opening_parenthesis, tok_open} | toks] ++ [closing_parenthesis: tok_close], params), state}
end
@spec parse_macro_parameter_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_macro_parameter_list(state, params, tokens // []) do
case next_token(state) do
{:paren_close, _, _} -> {Enum.reverse(params), Enum.reverse(tokens), state}
_ ->
if params == [] do
{param, state} = parse_simple_name(state)
parse_macro_parameter_list(state, [param | params], tokens)
else
{_, tok, state} = expect_token(state, :comma, "comma")
{param, state} = parse_simple_name(state)
parse_macro_parameter_list(state, [param | params], [tok | tokens])
end
end
end
@spec parse_test_decl(state()) :: return_n()
defp parse_test_decl(state) do
{_, tok_test, state} = expect_token(state, :test, "test declaration")
{_, name_str, state} = expect_token(state, :string, "test name string")
{block, state} = parse_block(state)
tokens = [test_keyword: tok_test,
test_name: name_str]
{new_node(:test_declaration, tok_test.location(), tokens, [body: block]), state}
end
@spec parse_type_parameters(state()) :: return_n()
defp parse_type_parameters(state) do
{_, tok_open, state} = expect_token(state, :bracket_open, "opening bracket")
{params, toks, state} = parse_type_parameter_list(state, [])
{_, tok_close, state} = expect_token(state, :bracket_close, "closing bracket")
params = lc param inlist params, do: {:parameter, param}
toks = lc tok inlist toks, do: {:comma, tok}
{new_node(:type_parameters, tok_open.location(),
[{:opening_bracket, tok_open} | toks] ++ [closing_bracket: tok_close], params), state}
end
@spec parse_type_parameter_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_type_parameter_list(state, params, tokens // []) do
{param, state} = parse_type_parameter(state)
case next_token(state) do
{:comma, tok, state} -> parse_type_parameter_list(state, [param | params], [tok | tokens])
_ -> {Enum.reverse([param | params]), Enum.reverse(tokens), state}
end
end
@spec parse_type_parameter(state()) :: return_n()
defp parse_type_parameter(state) do
{name, state} = parse_simple_name(state)
{bounds, state} = case next_token(state) do
{:colon, _, _} ->
{bounds, state} = parse_type_parameter_bounds(state)
{[bounds: bounds], state}
_ -> {[], state}
end
{new_node(:type_parameter, name.location(), [], [{:name, name} | bounds]), state}
end
@spec parse_type_parameter_bounds(state()) :: return_n()
defp parse_type_parameter_bounds(state) do
{_, tok_colon, state} = expect_token(state, :colon, "colon")
{bounds, toks, state} = parse_type_parameter_bounds_list(state, [])
bounds = lc bound inlist bounds, do: {:bound, bound}
toks = lc tok inlist toks, do: {:ampersand, tok}
{new_node(:type_parameter_bounds, tok_colon.location(), [colon: tok_colon] ++ toks, bounds), state}
end
@spec parse_type_parameter_bounds_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_type_parameter_bounds_list(state, bounds, tokens // []) do
{bound, state} = parse_nominal_type(state)
case next_token(state) do
{:ampersand, tok, state} -> parse_type_parameter_bounds_list(state, [bound | bounds], [tok | tokens])
_ -> {Enum.reverse([bound | bounds]), Enum.reverse(tokens), state}
end
end
@spec parse_type(state()) :: return_n()
defp parse_type(state) do
case expect_token(state, [:identifier, :paren_open, :fn, :bracket_open,
:at, :star, :ampersand], "type signature") do
{:identifier, _, _} -> parse_nominal_type(state)
{:paren_open, _, _} -> parse_tuple_type(state)
{:fn, _, _} -> parse_function_type(state)
{:bracket_open, _, _} -> parse_vector_type(state)
_ -> parse_pointer_type(state)
end
end
@spec parse_nominal_type(state(), boolean()) :: return_n()
defp parse_nominal_type(state, generic // true) do
{name, state} = parse_qualified_name(state)
{ty_args, state} = case next_token(state) do
{:bracket_open, _, _} when generic ->
{ty_args, state} = parse_type_arguments(state)
{[arguments: ty_args], state}
_ -> {[], state}
end
{new_node(:nominal_type, name.location(), [], [{:name, name} | ty_args]), state}
end
@spec parse_type_arguments(state()) :: return_n()
defp parse_type_arguments(state) do
{_, tok_open, state} = expect_token(state, :bracket_open, "opening bracket")
{types, toks, state} = parse_type_list(state, [])
{_, tok_close, state} = expect_token(state, :bracket_close, "closing bracket")
types = lc type inlist types, do: {:argument, type}
toks = lc tok inlist toks, do: {:comma, tok}
{new_node(:type_arguments, tok_open.location(),
[{:opening_bracket, tok_open} | toks] ++ [closing_bracket: tok_close], types), state}
end
@spec parse_type_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_type_list(state, types, tokens // []) do
{type, state} = parse_type(state)
case next_token(state) do
{:comma, tok, state} -> parse_type_list(state, [type | types], [tok | tokens])
_ -> {Enum.reverse([type | types]), Enum.reverse(tokens), state}
end
end
@spec parse_tuple_type(state()) :: return_n()
defp parse_tuple_type(state) do
{_, tok_open, state} = expect_token(state, :paren_open, "opening parenthesis")
{type, state} = parse_type(state)
{_, comma, state} = expect_token(state, :comma, "comma")
{types, toks, state} = parse_type_list(state, [])
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
types = lc typ inlist [type | types], do: {:element, typ}
toks = lc tok inlist [comma | toks], do: {:comma, tok}
{new_node(:tuple_type, tok_open.location(),
[{:opening_parenthesis, tok_open} | toks] ++ [closing_parenthesis: tok_close], types), state}
end
@spec parse_function_type(state()) :: return_n()
defp parse_function_type(state) do
{_, fn_tok, state} = expect_token(state, :fn, "'fn' keyword")
case expect_token(state, [:at, :paren_open, :ext], "function type parameter list") do
{:at, _, _} -> parse_closure_pointer_type(state, fn_tok)
_ -> parse_function_pointer_type(state, fn_tok)
end
end
@spec parse_function_pointer_type(state(), token()) :: return_n()
defp parse_function_pointer_type(state, fn_kw) do
{ext_abi, state} = case next_token(state) do
{:ext, ext, state} ->
case expect_token(state, :string, "function ABI string") do
{_, abi, state} -> {[ext_keyword: ext, abi: abi], state}
end
_ -> {[], state}
end
{params, state} = parse_function_type_parameters(state)
{_, tok_arrow, state} = expect_token(state, :minus_angle_close, "return type arrow")
{return_type, state} = parse_return_type(state)
{new_node(:function_pointer_type, fn_kw.location(),
[fn_keyword: fn_kw] ++ ext_abi ++ [arrow: tok_arrow], [parameters: params, return_type: return_type]), state}
end
@spec parse_closure_pointer_type(state(), token()) :: return_n()
defp parse_closure_pointer_type(state, fn_kw) do
{_, tok_closure, state} = expect_token(state, :at, "'@' symbol")
{params, state} = parse_function_type_parameters(state)
{_, tok_arrow, state} = expect_token(state, :minus_angle_close, "return type arrow")
{return_type, state} = parse_return_type(state)
{new_node(:closure_pointer_type, fn_kw.location(),
[fn_keyword: fn_kw, at: tok_closure, arrow: tok_arrow], [parameters: params, return_type: return_type]), state}
end
@spec parse_function_type_parameters(state()) :: return_n()
defp parse_function_type_parameters(state) do
{_, tok_open, state} = expect_token(state, :paren_open, "opening parenthesis")
{type_params, toks, state} = parse_function_type_parameter_list(state, [])
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
types = lc type inlist type_params, do: {:parameter, type}
toks = lc tok inlist toks, do: {:comma, tok}
{new_node(:function_type_parameters, tok_open.location(),
[{:opening_parenthesis, tok_open} | toks] ++ [closing_parenthesis: tok_close], types), state}
end
@spec parse_function_type_parameter_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_function_type_parameter_list(state, params, tokens // []) do
case next_token(state) do
{:paren_close, _, _} -> {Enum.reverse(params), Enum.reverse(tokens), state}
_ ->
if params == [] do
{param, state} = parse_function_type_parameter(state)
parse_function_type_parameter_list(state, [param | params], tokens)
else
{_, tok, state} = expect_token(state, :comma, "comma")
{param, state} = parse_function_type_parameter(state)
parse_function_type_parameter_list(state, [param | params], [tok | tokens])
end
end
end
@spec parse_function_type_parameter(state()) :: return_n()
defp parse_function_type_parameter(state) do
{mut, state} = case next_token(state) do
{:mut, mut, state} -> {[mut_keyword: mut], state}
_ -> {[], state}
end
{ref, state} = case next_token(state) do
{:ref, ref, state} -> {[ref_keyword: ref], state}
_ -> {[], state}
end
{type, state} = parse_type(state)
{new_node(:function_type_parameter, type.location(), mut ++ ref, [type: type]), state}
end
@spec parse_return_type(state()) :: return_n()
defp parse_return_type(state) do
case next_token(state) do
{:exclamation, tok_exclam, state} -> {new_node(:bottom_type, tok_exclam.location(), [exclamation: tok_exclam], []), state}
_ -> parse_type(state)
end
end
@spec parse_vector_type(state(), boolean()) :: return_n()
defp parse_vector_type(state, array // false) do
{_, tok_open, state} = expect_token(state, :bracket_open, "opening bracket")
{type, state} = parse_type(state)
{a_type, etok, state} = case next_token(state) do
{:period_period, etok, state} ->
{_, tok_int, state} = expect_token(state, :integer, "vector size integer")
{:vector_type, [ellipsis: etok, size: tok_int], state}
{_, tok, _} ->
if !array do
val = if String.printable?(tok.value()), do: tok.value(), else: "<non-printable token>"
raise_error(tok.location(), "Expected type/size-separating ellipsis, but found: #{val}")
end
{:array_type, [], state}
end
{_, tok_close, state} = expect_token(state, :bracket_close, "closing bracket")
tokens = [{:opening_bracket, tok_open} | etok] ++ [closing_bracket: tok_close]
{new_node(a_type, tok_open.location(), tokens, [element: type]), state}
end
@spec parse_pointer_type(state()) :: return_n()
defp parse_pointer_type(state) do
{p_type, tok, state} = case expect_token(state, [:at, :star, :ampersand], "'@', '*', or '&'") do
{:at, tok, state} -> {:managed_pointer_type, [at: tok], state}
{:star, tok, state} -> {:unsafe_pointer_type, [star: tok], state}
{:ampersand, tok, state} -> {:general_pointer_type, [ampersand: tok], state}
end
{mut_imm, state} = case next_token(state) do
{:mut, mut, state} -> {[mut_keyword: mut], state}
{:imm, imm, state} -> {[imm_keyword: imm], state}
_ -> {[], state}
end
{type, state} = case next_token(state) do
# Kick of a hack, but it works.
{:bracket_open, _, _} -> parse_vector_type(state, true)
_ -> parse_type(state)
end
loc = elem(hd(tok), 1).location()
{new_node(p_type, loc, tok ++ mut_imm, [pointee: type]), state}
end
@spec parse_expr(state()) :: return_n()
defp parse_expr(state) do
parse_assign_expr(state)
end
@spec parse_assign_expr(state) :: return_n()
defp parse_assign_expr(state) do
tup = {expr1, state} = parse_cast_expr(state)
case next_token(state) do
{:assign, tok, state} ->
{expr2, state} = parse_assign_expr(state)
{new_node(:assign_expr, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_cast_expr(state()) :: return_n()
defp parse_cast_expr(state) do
tup = {expr, state} = parse_logical_or_expr(state)
case next_token(state) do
{:as, tok, state} ->
{type, state} = parse_type(state)
{new_node(:cast_expr, tok.location(), [as_keyword: tok], [lhs: expr, rhs: type]), state}
_ -> tup
end
end
@spec parse_logical_or_expr(state) :: return_n()
defp parse_logical_or_expr(state) do
tup = {expr1, state} = parse_logical_and_expr(state)
case next_token(state) do
{:pipe_pipe, tok, state} ->
{expr2, state} = parse_logical_and_expr(state)
{new_node(:logical_or_expr, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_logical_and_expr(state()) :: return_n()
defp parse_logical_and_expr(state) do
tup = {expr1, state} = parse_bitwise_or_expr(state)
case next_token(state) do
{:ampersand_ampersand, tok, state} ->
{expr2, state} = parse_bitwise_or_expr(state)
{new_node(:logical_and_expr, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_bitwise_or_expr(state()) :: return_n()
defp parse_bitwise_or_expr(state) do
tup = {expr1, state} = parse_bitwise_xor_expr(state)
case next_token(state) do
{:pipe, tok, state} ->
{expr2, state} = parse_bitwise_xor_expr(state)
{new_node(:bitwise_or_expr, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_bitwise_xor_expr(state()) :: return_n()
defp parse_bitwise_xor_expr(state) do
tup = {expr1, state} = parse_bitwise_and_expr(state)
case next_token(state) do
{:caret, tok, state} ->
{expr2, state} = parse_bitwise_and_expr(state)
{new_node(:bitwise_xor_expr, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_bitwise_and_expr(state()) :: return_n()
defp parse_bitwise_and_expr(state) do
tup = {expr1, state} = parse_relational_expr(state)
case next_token(state) do
{:ampersand, tok, state} ->
{expr2, state} = parse_relational_expr(state)
{new_node(:bitwise_and_expr, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_relational_expr(state()) :: return_n()
defp parse_relational_expr(state) do
tup = {expr1, state} = parse_pipeline_expr(state)
case next_token(state) do
{type, tok, state} when type in [:assign_assign,
:assign_assign_assign,
:exclamation_assign,
:exclamation_assign_assign,
:angle_open,
:angle_open_assign,
:angle_close,
:angle_close_assign] ->
{expr2, state} = parse_shift_expr(state)
ast_type = case type do
:assign_assign -> :relational_equal_expr
:assign_assign_assign -> :relational_identical_expr
:exclamation_assign -> :relational_not_equal_expr
:exclamation_assign_assign -> :relational_not_identical_expr
:angle_open -> :relational_greater_expr
:angle_open_assign -> :relational_greater_equal_expr
:angle_close -> :relational_lower_expr
:angle_close_assign -> :relational_lower_equal_expr
end
{new_node(ast_type, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_pipeline_expr(state()) :: return_n()
defp parse_pipeline_expr(state) do
tup = {expr1, state} = parse_shift_expr(state)
case next_token(state) do
{type, tok, state} when type in [:angle_open_pipe, :pipe_angle_close] ->
{expr2, state} = parse_shift_expr(state)
ast_type = case type do
:angle_open_pipe -> :left_pipeline_expr
:pipe_angle_close -> :right_pipeline_expr
end
{new_node(ast_type, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_shift_expr(state()) :: return_n()
defp parse_shift_expr(state) do
tup = {expr1, state} = parse_additive_expr(state)
case next_token(state) do
{type, tok, state} when type in [:angle_open_angle_open, :angle_close_angle_close] ->
{expr2, state} = parse_additive_expr(state)
ast_type = case type do
:angle_open_angle_open -> :left_shift_expr
:angle_close_angle_close -> :right_shift_expr
end
{new_node(ast_type, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_additive_expr(state()) :: return_n()
defp parse_additive_expr(state) do
tup = {expr1, state} = parse_multiplicative_expr(state)
case next_token(state) do
{type, tok, state} when type in [:plus, :minus] ->
{expr2, state} = parse_multiplicative_expr(state)
ast_type = case type do
:plus -> :add_expr
:minus -> :subtract_expr
end
{new_node(ast_type, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_multiplicative_expr(state()) :: return_n()
defp parse_multiplicative_expr(state) do
tup = {expr1, state} = parse_unary_expr(state)
case next_token(state) do
{type, tok, state} when type in [:star, :slash, :percent] ->
{expr2, state} = parse_unary_expr(state)
ast_type = case type do
:star -> :multiply_expr
:slash -> :divide_expr
:percent -> :remainder_expr
end
{new_node(ast_type, tok.location(), [operator: tok], [lhs: expr1, rhs: expr2]), state}
_ -> tup
end
end
@spec parse_unary_expr(state()) :: return_n()
defp parse_unary_expr(state) do
case next_token(state) do
{type, tok, state} when type in [:star,
:plus,
:minus,
:exclamation,
:tilde] ->
{expr, state} = parse_unary_expr(state)
ast_type = case type do
:star -> :dereference_expr
:plus -> :plus_expr
:minus -> :negate_expr
:exclamation -> :logical_not_expr
:tilde -> :complement_expr
end
{new_node(ast_type, tok.location(), [operator: tok], [operand: expr]), state}
{:at, tok, state} ->
{mut_imm, state} = case next_token(state) do
{:mut, mut, state} -> {[mut_keyword: mut], state}
{:imm, imm, state} -> {[imm_keyword: imm], state}
_ -> {[], state}
end
{expr, state} = case next_token(state) do
# Kick of a hack, but it works.
{:bracket_open, _, _} -> parse_vector_expr(state, true)
_ -> parse_unary_expr(state)
end
{new_node(:box_expr, tok.location(), [operator: tok] ++ mut_imm, [operand: expr]), state}
{:ampersand, tok, state} ->
{imm, state} = case next_token(state) do
{:imm, tok, state} -> {[imm_keyword: tok], state}
_ -> {[], state}
end
{expr, state} = parse_unary_expr(state)
{new_node(:address_expr, tok.location(), [operator: tok] ++ imm, [operand: expr]), state}
{:ampersand_ampersand, tok, state} ->
{ident, state} = parse_simple_name(state)
{new_node(:label_address_expr, tok.location(), [operator: tok], [operand: ident]), state}
{:paren_open, _, _} ->
{expr, state} = parse_parenthesized_expr(state)
parse_post_expr(state, expr)
_ ->
{expr, state} = parse_primary_expr(state)
parse_post_expr(state, expr)
end
end
@spec parse_parenthesized_expr(state()) :: return_n()
defp parse_parenthesized_expr(state) do
{_, tok_open, state} = expect_token(state, :paren_open, "opening parenthesis")
case next_token(state) do
# It's a unit literal, i.e. ().
{:paren_close, tok, state} -> {new_node(:unit_expr, tok_open.location(),
[opening_parenthesis: tok_open,
closing_parenthesis: tok], []), state}
_ ->
{expr, state} = parse_expr(state)
case next_token(state) do
# It's a tuple, i.e. (e1, ...).
{:comma, tok, state} -> parse_tuple_expr(state, expr, tok_open, tok)
# It's a simple parenthesized expression, i.e. (e).
_ ->
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
{new_node(:parenthesized_expr, tok_open.location(),
[opening_parenthesis: tok_open, closing_parenthesis: tok_close], [expression: expr]), state}
end
end
end
@spec parse_tuple_expr(state(), ast_node(), token(), token()) :: return_n()
defp parse_tuple_expr(state, first, paren, comma) do
{exprs, toks, state} = parse_tuple_expr_list(state, [])
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
exprs = lc expr inlist [first | exprs], do: {:element, expr}
toks = lc tok inlist [comma | toks], do: {:comma, tok}
{new_node(:tuple_expr, paren.location(),
[{:opening_parenthesis, paren} | toks] ++ [closing_parenthesis: tok_close], exprs), state}
end
@spec parse_tuple_expr_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_tuple_expr_list(state, exprs, tokens // []) do
{expr, state} = parse_expr(state)
case next_token(state) do
{:comma, tok, state} -> parse_tuple_expr_list(state, [expr | exprs], [tok | tokens])
_ -> {Enum.reverse([expr | exprs]), Enum.reverse(tokens), state}
end
end
@spec parse_post_expr(state(), ast_node()) :: return_n()
defp parse_post_expr(state, expr) do
case next_token(state) do
{:period, tok, state} ->
{name, state} = parse_simple_name(state)
parse_post_expr(state, new_node(:field_expr, tok.location(), [operator: tok], [lhs: expr, rhs: name]))
{:arrow, tok, state} ->
{name, state} = parse_simple_name(state)
parse_post_expr(state, new_node(:method_expr, tok.location(), [operator: tok], [lhs: expr, rhs: name]))
{:exclamation, tok_excl, state} ->
{_, tok_open, state} = expect_token(state, :paren_open, "opening_parenthesis")
{args, toks, state} = parse_call_argument_list(state, [])
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
args = lc arg inlist args, do: {:argument, arg}
toks = lc tok inlist toks, do: {:comma, tok}
tokens = [{:exclamation, tok_excl}, {:opening_parenthesis, tok_open} | toks] ++ [closing_parenthesis: tok_close]
parse_post_expr(state, new_node(:macro_call_expr, tok_open.location(), tokens, [{:callee, expr} | args]))
{:paren_open, tok_open, state} ->
{args, toks, state} = parse_call_argument_list(state, [])
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
args = lc arg inlist args, do: {:argument, arg}
toks = lc tok inlist toks, do: {:comma, tok}
tokens = [{:opening_parenthesis, tok_open} | toks] ++ [closing_parenthesis: tok_close]
parse_post_expr(state, new_node(:call_expr, tok_open.location(), tokens, [{:callee, expr} | args]))
_ -> {expr, state}
end
end
@spec parse_call_argument_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_call_argument_list(state, args, tokens // []) do
case next_token(state) do
{:paren_close, _, _} -> {Enum.reverse(args), Enum.reverse(tokens), state}
_ ->
if args == [] do
{arg, state} = parse_call_argument(state)
parse_call_argument_list(state, [arg | args], tokens)
else
{_, tok, state} = expect_token(state, :comma, "comma")
{arg, state} = parse_call_argument(state)
parse_call_argument_list(state, [arg | args], [tok | tokens])
end
end
end
@spec parse_call_argument(state()) :: return_n()
defp parse_call_argument(state) do
{mut_ref, state} = case next_token(state) do
{:mut, mut, state} ->
{_, ref, state} = expect_token(state, :ref, "'ref' keyword")
{[mut_keyword: mut, ref_keyword: ref], state}
_ -> {[], state}
end
{expr, state} = parse_expr(state)
{new_node(:call_argument, expr.location(), mut_ref, [expression: expr]), state}
end
@spec parse_primary_expr(state()) :: return_n()
defp parse_primary_expr(state) do
case next_token(state) do
{:if, _, _} -> parse_if_expr(state)
{:cond, _, _} -> parse_cond_expr(state)
{:match, _, _} -> parse_match_expr(state)
{:loop, _, _} -> parse_loop_expr(state)
{:while, _, _} -> parse_while_expr(state)
{:for, _, _} -> parse_for_expr(state)
{:break, _, _} -> parse_break_expr(state)
{:goto, _, _} -> parse_goto_expr(state)
{:return, _, _} -> parse_return_expr(state)
{:asm, _, _} -> parse_asm_expr(state)
{:new, _, _} -> parse_new_expr(state)
{:assert, _, _} -> parse_assert_expr(state)
{:meta, _, _} -> parse_meta_expr(state)
{:macro, _, _} -> parse_macro_expr(state)
{:quote, _, _} -> parse_quote_expr(state)
{:unquote, _, _} -> parse_unquote_expr(state)
{t, _, _} when t in [:safe, :unsafe] -> parse_safety_expr(state)
{:brace_open, _, _} -> parse_block(state)
{t, tok, state} when t in [:string, :character,
:true, :false,
:null,
:float, :integer,
:f32, :f64,
:i8, :u8,
:i16, :u16,
:i32, :u32,
:i64, :u64,
:i, :u] ->
ast_type = cond do
t == :string -> :string_expr
t == :character -> :character_expr
t in [:true, :false] -> :boolean_expr
t == :null -> :null_expr
t in [:float,
:f32, :f64] -> :float_expr
t in [:integer,
:i8, :u8,
:i16, :u16,
:i32, :u32,
:i64, :u64,
:i, :u] -> :integer_expr
end
{new_node(ast_type, tok.location(), [literal: tok], []), state}
{:bracket_open, _, _} -> parse_vector_expr(state)
{:identifier, _, istate} ->
case next_token(istate) do
# If the identifier is followed by a colon, it's a labelled block.
{:colon, _, _} -> parse_labelled_block(state)
# Otherwise, it's just a regular identifier (possibly qualified).
_ -> parse_identifier_expr(state)
end
# An identifier accessing the global scope.
{:colon_colon, _, _} -> parse_identifier_expr(state)
{_, tok, _} ->
val = if String.printable?(tok.value()), do: tok.value(), else: "<non-printable token>"
raise_error(tok.location(), "Expected primary expression, but found: #{val}")
end
end
@spec parse_if_expr(state()) :: return_n()
defp parse_if_expr(state) do
{_, tok_if, state} = expect_token(state, :if, "'if' keyword")
{cond_expr, state} = parse_expr(state)
{then_block, state} = parse_block(state)
{else_block, else_tok, state} = case next_token(state) do
{:else, tok, state} ->
{block, state} = case next_token(state) do
{:if, _, _} -> parse_if_expr(state)
_ -> parse_block(state)
end
{[false_block: block], [else_keyword: tok], state}
_ -> {[], [], state}
end
{new_node(:if_expr, tok_if.location(), [{:if_keyword, tok_if} | else_tok],
[{:condition, cond_expr}, {:true_block, then_block} | else_block]), state}
end
@spec parse_cond_expr(state()) :: return_n()
defp parse_cond_expr(state) do
{_, tok_cond, state} = expect_token(state, :cond, "'cond' keyword")
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{branches, state} = parse_cond_branch_list(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
branches = lc branch inlist branches, do: {:branch, branch}
tokens = [cond_keyword: tok_cond,
opening_brace: tok_open,
closing_brace: tok_close]
{new_node(:cond_expr, tok_cond.location(), tokens, branches), state}
end
@spec parse_cond_branch_list(state(), [ast_node()]) :: return_m()
defp parse_cond_branch_list(state, branches // []) do
{branch, state} = parse_cond_branch(state)
case next_token(state) do
{:brace_close, _, _} -> {Enum.reverse([branch | branches]), state}
_ -> parse_cond_branch_list(state, [branch | branches])
end
end
@spec parse_cond_branch(state()) :: return_n()
defp parse_cond_branch(state) do
{expr, state} = parse_expr(state)
{block, state} = parse_block(state)
{new_node(:cond_expr_branch, expr.location(), [], [condition: expr, body: block]), state}
end
@spec parse_match_expr(state()) :: return_n()
defp parse_match_expr(state) do
{_, tok_match, state} = expect_token(state, :match, "'match' keyword")
{expr, state} = parse_expr(state)
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{branches, state} = parse_match_branch_list(state)
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
tokens = [match_keyword: tok_match,
opening_brace: tok_open,
closing_brace: tok_close]
branches = lc branch inlist branches, do: {:branch, branch}
{new_node(:match_expr, tok_match.location(), tokens, [operand: expr] ++ branches), state}
end
@spec parse_match_branch_list(state(), [ast_node()]) :: return_m()
defp parse_match_branch_list(state, branches // []) do
{branch, state} = parse_match_branch(state)
case next_token(state) do
{:brace_close, _, _} -> {Enum.reverse([branch | branches]), state}
_ -> parse_match_branch_list(state, [branch | branches])
end
end
@spec parse_match_branch(state()) :: return_n()
defp parse_match_branch(state) do
{patterns, toks, state} = parse_match_branch_pattern_list(state, [])
{block, state} = parse_block(state)
patterns = lc pattern inlist patterns, do: {:pattern, pattern}
toks = lc tok inlist toks, do: {:pipe, tok}
{new_node(:match_expr_branch, elem(hd(patterns), 1).location(), toks, patterns ++ [body: block]), state}
end
@spec parse_match_branch_pattern_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_match_branch_pattern_list(state, patterns, tokens // []) do
{pattern, state} = parse_match_branch_pattern(state)
case next_token(state) do
{:pipe, tok, state} -> parse_match_branch_pattern_list(state, [pattern | patterns], [tok | tokens])
_ -> {Enum.reverse([pattern | patterns]), Enum.reverse(tokens), state}
end
end
@spec parse_match_branch_pattern(state()) :: return_n()
defp parse_match_branch_pattern(state) do
# TODO: Parse pattern.
{guard_tok, guard_node, state} = case next_token(state) do
{:if, tok, state} ->
{expr, state} = parse_expr(state)
{[if_keyword: tok], [guard: expr], state}
_ -> {[], [], state}
end
{new_node(:match_expr_pattern, Flect.Compiler.Syntax.Location[], guard_tok, guard_node), state}
end
@spec parse_loop_expr(state()) :: return_n()
defp parse_loop_expr(state) do
{_, tok_loop, state} = expect_token(state, :loop, "'loop' keyword")
{body, state} = case next_token(state) do
{:brace_open, _, _} ->
{block, state} = parse_block(state)
{[body: block], state}
_ -> {[], state}
end
{new_node(:loop_expr, tok_loop.location(), [loop_keyword: tok_loop], body), state}
end
@spec parse_while_expr(state()) :: return_n()
defp parse_while_expr(state) do
{_, tok_while, state} = expect_token(state, :while, "'while' keyword")
{cond_expr, state} = parse_expr(state)
{block, state} = parse_block(state)
{new_node(:while_expr, tok_while.location(), [while_keyword: tok_while], [condition: cond_expr, body: block]), state}
end
@spec parse_for_expr(state()) :: return_n()
defp parse_for_expr(state) do
{_, tok_for, state} = expect_token(state, :for, "'for' keyword")
# TODO: Parse pattern.
{_, tok_in, state} = expect_token(state, :in, "'in' keyword")
{expr, state} = parse_expr(state)
{body, state} = parse_block(state)
{new_node(:for_expr, tok_for.location(), [for_keyword: tok_for, in_keyword: tok_in], [expression: expr, body: body]), state}
end
@spec parse_break_expr(state()) :: return_n()
defp parse_break_expr(state) do
{_, tok_break, state} = expect_token(state, :break, "'break' keyword")
{new_node(:break_expr, tok_break.location(), [break_keyword: tok_break], []), state}
end
@spec parse_goto_expr(state()) :: return_n()
defp parse_goto_expr(state) do
{_, tok_goto, state} = expect_token(state, :goto, "'goto' keyword")
{target, star, state} = case next_token(state) do
{:star, tok_star, state} ->
{expr, state} = parse_expr(state)
{expr, [star: tok_star], state}
_ ->
{name, state} = parse_simple_name(state)
{name, [], state}
end
{new_node(:goto_expr, tok_goto.location(), [{:goto_keyword, tok_goto} | star], [target: target]), state}
end
@spec parse_return_expr(state()) :: return_n()
defp parse_return_expr(state) do
{_, tok_return, state} = expect_token(state, :return, "'return' keyword")
{expr, state} = parse_expr(state)
{new_node(:return_expr, tok_return.location(), [return_keyword: tok_return], [expression: expr]), state}
end
@spec parse_asm_expr(state()) :: return_n()
defp parse_asm_expr(state) do
{_, tok_asm, state} = expect_token(state, :asm, "'asm' keyword")
{tok_qual, state} = case next_token(state) do
{:bracket_open, tok_open, state} ->
{_, tok_qual, state} = expect_token(state, :string, "qualifier string")
{_, tok_close, state} = expect_token(state, :bracket_close, "closing bracket")
{[opening_bracket: tok_open, qualifier: tok_qual, closing_bracket: tok_close], state}
_ -> {[], state}
end
{_, tok_code, state} = expect_token(state, :string, "assembly code string")
{operands, state} = case next_token(state) do
{:colon, _, _} ->
# First one is the outputs.
{oper1, state} = parse_asm_operand(state)
{oper2, state} = case next_token(state) do
{:colon, _, _} ->
# Second one is the inputs.
{oper2, state} = parse_asm_operand(state)
{oper3, state} = case next_token(state) do
{:colon, _, _} ->
# Third one is the clobbers.
{oper3, state} = parse_asm_clobber(state)
{[third_operand: oper3], state}
_ -> {[], state}
end
{[second_operand: oper2] ++ oper3, state}
_ -> {[], state}
end
{[first_operand: oper1] ++ oper2, state}
_ -> {[], state}
end
{new_node(:asm_expr, tok_asm.location(), [{:asm_keyword, tok_asm} | tok_qual] ++ [code: tok_code], operands), state}
end
@spec parse_asm_operand(state()) :: return_n()
defp parse_asm_operand(state) do
{_, tok_col, state} = expect_token(state, :colon, "colon")
{entries, toks, state} = parse_asm_operand_entry_list(state, [])
entries = lc entry inlist entries, do: {:entry, entry}
toks = lc tok inlist toks, do: {:comma, tok}
{new_node(:asm_expr_operand, tok_col.location(), [{:colon, tok_col} | toks], entries), state}
end
@spec parse_asm_operand_entry_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_asm_operand_entry_list(state, entries, tokens // []) do
{entry, state} = parse_asm_operand_entry(state)
case next_token(state) do
{:comma, tok, state} -> parse_asm_operand_entry_list(state, [entry | entries], [tok | tokens])
_ -> {Enum.reverse([entry | entries]), Enum.reverse(tokens), state}
end
end
@spec parse_asm_operand_entry(state()) :: return_n()
defp parse_asm_operand_entry(state) do
{_, tok_open, state} = expect_token(state, :paren_open, "opening_parenthesis")
{_, tok_ident, state} = expect_token(state, :identifier, "symbolic name")
{_, tok_comma1, state} = expect_token(state, :comma, "comma")
{_, tok_str, state} = expect_token(state, :string, "constraint string")
{_, tok_comma2, state} = expect_token(state, :comma, "comma")
{expr, state} = parse_expr(state)
{_, tok_close, state} = expect_token(state, :paren_close, "closing parenthesis")
tokens = [opening_parenthesis: tok_open,
symbolic_name: tok_ident,
comma: tok_comma1,
constraint: tok_str,
comma: tok_comma2,
closing_parenthesis: tok_close]
{new_node(:asm_expr_operand_entry, tok_str.location(), tokens, [expression: expr]), state}
end
@spec parse_asm_clobber(state()) :: return_n()
defp parse_asm_clobber(state) do
{_, tok_col, state} = expect_token(state, :colon, "colon")
{entries, toks, state} = parse_asm_clobber_entry_list(state, [])
entries = lc entry inlist entries, do: {:entry, entry}
toks = lc tok inlist toks, do: {:comma, tok}
{new_node(:asm_expr_clobber, tok_col.location(), [{:colon, tok_col} | toks], entries), state}
end
@spec parse_asm_clobber_entry_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_asm_clobber_entry_list(state, entries, tokens // []) do
{entry, state} = parse_asm_clobber_entry(state)
case next_token(state) do
{:comma, tok, state} -> parse_asm_clobber_entry_list(state, [entry | entries], [tok | tokens])
_ -> {Enum.reverse([entry | entries]), Enum.reverse(tokens), state}
end
end
@spec parse_asm_clobber_entry(state()) :: return_n()
defp parse_asm_clobber_entry(state) do
{_, tok_str, state} = expect_token(state, :string, "clobber string")
{new_node(:asm_expr_clobber_entry, tok_str.location(), [clobber: tok_str], []), state}
end
@spec parse_new_expr(state()) :: return_n()
defp parse_new_expr(state) do
{_, tok_new, state} = expect_token(state, :new, "'new' keyword")
{type, state} = parse_nominal_type(state)
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{pairs, toks, state} = parse_field_value_pair_list(state, [])
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
pairs = lc pair inlist pairs, do: {:pair, pair}
toks = lc tok inlist toks, do: {:comma, tok}
tokens = [new_keyword: tok_new,
opening_brace: tok_open,
closing_brace: tok_close] ++ toks
{new_node(:new_expr, tok_new.location(), tokens, [type: type] ++ pairs), state}
end
@spec parse_field_value_pair_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_field_value_pair_list(state, pairs, tokens // []) do
case next_token(state) do
{:brace_close, _, _} -> {Enum.reverse(pairs), Enum.reverse(tokens), state}
_ ->
if pairs == [] do
{pair, state} = parse_field_value_pair(state)
parse_field_value_pair_list(state, [pair | pairs], tokens)
else
{_, tok, state} = expect_token(state, :comma, "comma")
{pair, state} = parse_field_value_pair(state)
parse_field_value_pair_list(state, [pair | pairs], [tok | tokens])
end
end
end
@spec parse_field_value_pair(state()) :: return_n()
defp parse_field_value_pair(state) do
{name, state} = parse_simple_name(state)
{_, tok_eq, state} = expect_token(state, :assign, "equals sign")
{expr, state} = parse_expr(state)
{new_node(:field_value_pair, name.location(), [equals: tok_eq], [name: name, value: expr]), state}
end
@spec parse_assert_expr(state()) :: return_n()
defp parse_assert_expr(state) do
{_, tok_assert, state} = expect_token(state, :assert, "'assert' keyword")
{expr, state} = parse_expr(state)
{msg, state} = case next_token(state) do
{:comma, tok, state} ->
{_, itok, state} = expect_token(state, :string, "message string")
{[comma: tok, message: itok], state}
_ -> {[], state}
end
{new_node(:assert_expr, tok_assert.location(), [assert_keyword: tok_assert] ++ msg, [condition: expr]), state}
end
@spec parse_meta_expr(state()) :: return_n()
defp parse_meta_expr(state) do
{_, tok_meta, state} = expect_token(state, :meta, "'meta' keyword")
{t, tok_type, state} = expect_token(state, [:type, :fn, :trait, :glob, :tls, :macro],
"'type', 'fn', 'trait', 'glob', 'tls', or 'macro' keyword")
{operand, state} = case t do
:type -> parse_type(state)
:fn -> parse_qualified_name(state)
:trait -> parse_qualified_name(state)
:glob -> parse_qualified_name(state)
:tls -> parse_qualified_name(state)
:macro -> parse_qualified_name(state)
end
{new_node(:meta_expr, tok_meta.location(), [meta_keyword: tok_meta, query_keyword: tok_type], [operand: operand]), state}
end
@spec parse_macro_expr(state()) :: return_n()
defp parse_macro_expr(state) do
{_, tok_macro, state} = expect_token(state, :macro, "'macro' keyword")
{_, tok_query, state} = expect_token(state, :string, "macro query string")
{new_node(:macro_expr, tok_macro.location(), [macro_keyword: tok_macro, query: tok_query], []), state}
end
@spec parse_quote_expr(state()) :: return_n()
defp parse_quote_expr(state) do
{_, tok_quote, state} = expect_token(state, :quote, "'quote' keyword")
{expr, state} = parse_expr(state)
{new_node(:quote_expr, tok_quote.location(), [quote_keyword: tok_quote], [expression: expr]), state}
end
@spec parse_unquote_expr(state()) :: return_n()
defp parse_unquote_expr(state) do
{_, tok_unquote, state} = expect_token(state, :unquote, "'unquote' keyword")
{expr, state} = parse_expr(state)
{new_node(:unquote_expr, tok_unquote.location(), [unquote_keyword: tok_unquote], [expression: expr]), state}
end
@spec parse_safety_expr(state()) :: return_n()
defp parse_safety_expr(state) do
{t, tok_safety, state} = expect_token(state, [:safe, :unsafe], "'safe' or 'unsafe' keyword")
{block, state} = parse_block(state)
{type, tok} = case t do
:safe -> {:safe_expr, [safe_keyword: tok_safety]}
:unsafe -> {:unsafe_expr, [unsafe_keyword: tok_safety]}
end
{new_node(type, tok_safety.location(), tok, [body: block]), state}
end
@spec parse_block(state()) :: return_n()
defp parse_block(state) do
{_, tok_open, state} = expect_token(state, :brace_open, "opening brace")
{exprs, toks, state} = parse_stmt_expr_list(state, [])
{_, tok_close, state} = expect_token(state, :brace_close, "closing brace")
exprs = lc expr inlist exprs, do: {:expression, expr}
toks = lc tok inlist toks, do: {:semicolon, tok}
{new_node(:block_expr, tok_open.location(),
[{:opening_brace, tok_open} | toks] ++ [closing_brace: tok_close], exprs), state}
end
@spec parse_stmt_expr_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_stmt_expr_list(state, exprs, tokens // []) do
case next_token(state) do
{:brace_close, _, _} -> {Enum.reverse(exprs), Enum.reverse(tokens), state}
_ ->
{expr, state} = parse_stmt_expr(state)
{_, tok, state} = expect_token(state, :semicolon, "semicolon")
parse_stmt_expr_list(state, [expr | exprs], [tok | tokens])
end
end
@spec parse_stmt_expr(state()) :: return_n()
defp parse_stmt_expr(state) do
case next_token(state) do
{:let, _, _} -> parse_let_expr(state)
_ -> parse_expr(state)
end
end
@spec parse_let_expr(state()) :: return_n()
defp parse_let_expr(state) do
{_, tok_let, state} = expect_token(state, :let, "'let' keyword")
{mut, state} = case next_token(state) do
{:mut, tok, state} -> {[mut_keyword: tok], state}
_ -> {[], state}
end
# TODO: Parse pattern.
{type, ty_tok, state} = case next_token(state) do
{:colon, tok, state} ->
{type, state} = parse_type(state)
{[type: type], [colon: tok], state}
_ -> {[], [], state}
end
{_, eq, state} = expect_token(state, :assign, "equals sign")
{expr, state} = parse_expr(state)
{new_node(:let_expr, tok_let.location(), [{:let_keyword, tok_let} | mut] ++ ty_tok ++ [equals: eq],
type ++ [expression: expr]), state}
end
@spec parse_vector_expr(state(), boolean()) :: return_n()
defp parse_vector_expr(state, array // false) do
{_, tok_open, state} = expect_token(state, :bracket_open, "opening bracket")
{exprs, toks, state} = parse_vector_expr_list(state, [])
{type, etok, state} = case next_token(state) do
{:period_period, etok, state} ->
{_, tok_int, state} = expect_token(state, :integer, "vector size integer")
{:vector_expr, [ellipsis: etok, size: tok_int], state}
{_, tok, _} ->
if !array do
val = if String.printable?(tok.value()), do: tok.value(), else: "<non-printable token>"
raise_error(tok.location(), "Expected elements/size-separating ellipsis, but found: #{val}")
end
{:array_expr, [], state}
end
{_, tok_close, state} = expect_token(state, :bracket_close, "closing bracket")
exprs = lc expr inlist exprs, do: {:expression, expr}
toks = lc tok inlist toks, do: {:comma, tok}
tokens = [{:opening_bracket, tok_open} | toks] ++ etok ++ [closing_bracket: tok_close]
{new_node(type, tok_open.location(), tokens, exprs), state}
end
@spec parse_vector_expr_list(state(), [ast_node()], [token()]) :: return_mt()
defp parse_vector_expr_list(state, exprs, tokens // []) do
case next_token(state) do
{t, _, _} when t in [:period_period, :bracket_close] -> {Enum.reverse(exprs), Enum.reverse(tokens), state}
_ ->
if exprs == [] do
{expr, state} = parse_expr(state)
parse_vector_expr_list(state, [expr | exprs], tokens)
else
{_, tok, state} = expect_token(state, :comma, "comma")
{expr, state} = parse_expr(state)
parse_vector_expr_list(state, [expr | exprs], [tok | tokens])
end
end
end
@spec parse_labelled_block(state()) :: return_n()
defp parse_labelled_block(state) do
{label, state} = parse_simple_name(state)
{_, tok_col, state} = expect_token(state, :colon, "colon")
{block, state} = parse_block(state)
{new_node(:labelled_block_expr, label.location(), [colon: tok_col], [label: label, block: block]), state}
end
@spec parse_identifier_expr(state()) :: return_n()
defp parse_identifier_expr(state) do
{name, state} = parse_qualified_name(state)
{ty_args, state} = case next_token(state) do
{:bracket_open, _, _} ->
{ty_args, state} = parse_type_arguments(state)
{[arguments: ty_args], state}
_ -> {[], state}
end
{new_node(:identifier_expr, name.location(), [], [{:name, name} | ty_args]), state}
end
@spec next_token(state(), boolean()) :: {atom(), token(), state()} | :eof
defp next_token({tokens, loc}, eof // false) do
case tokens do
[h | t] ->
case h.type() do
# TODO: Attach comments to AST nodes.
a when a in [:line_comment, :block_comment] -> next_token({t, h.location()}, eof)
a -> {a, h, {t, h.location()}}
end
[] when eof -> :eof
_ -> raise_error(loc, "Unexpected end of token stream")
end
end
@spec expect_token(state(), atom() | [atom(), ...], String.t(), boolean()) :: {atom(), token(), state()} | :eof
defp expect_token(state, type, str, eof // false) do
case next_token(state, eof) do
tup = {t, tok, {_, l}} ->
ok = cond do
is_list(type) -> Enum.member?(type, t)
is_atom(type) -> t == type
end
if !ok do
val = if String.printable?(tok.value()), do: tok.value(), else: "<non-printable token>"
raise_error(l, "Expected #{str}, but found: #{val}")
end
tup
# We only get :eof if eof is true.
:eof -> :eof
end
end
@spec new_node(atom(), location(), [{atom(), token()}], [{atom(), ast_node()}]) :: ast_node()
defp new_node(type, loc, tokens, children // []) do
Flect.Compiler.Syntax.Node[type: type,
location: loc,
tokens: tokens,
children: children]
end
@spec raise_error(location(), String.t()) :: no_return()
defp raise_error(loc, msg) do
raise(Flect.Compiler.Syntax.SyntaxError[error: msg, location: loc])
end
end
|
lib/compiler/syntax/parser.ex
| 0.851629 | 0.611411 |
parser.ex
|
starcoder
|
defmodule Plymio.Ast.Vorm.Error do
@moduledoc false
require Plymio.Option.Utility, as: POU
use Plymio.Ast.Vorm.Attribute
@pav_struct_kvs_aliases [
{@pav_key_message, [:m, :msg]},
{@pav_key_value, [:v]},
{@pav_key_error, [:e]}
]
@pav_struct_dict_aliases @pav_struct_kvs_aliases
|> POU.opts_create_aliases_dict
@pav_error_defstruct @pav_struct_kvs_aliases
|> Enum.map(fn {k,_v} -> {k, nil} end)
def opts_canon_keys!(opts, dict \\ @pav_struct_dict_aliases) do
opts |> POU.opts_canon_keys!(dict)
end
defexception @pav_error_defstruct
@type opts :: Keyword.t
@type t :: %__MODULE__{}
@type kv :: {any,any}
@type error :: any
@spec new(opts) :: {:ok, t} | {:error, error}
def new(opts \\ [])
def new([]) do
{:ok, %__MODULE__{}}
end
def new(opts) do
with {:ok, %__MODULE__{} = pase} <- new() do
pase |> update(opts)
else
{:error, _} = result -> result
end
end
def new!(opts \\ []) do
opts
|> new()
|> case do
{:ok, %__MODULE__{} = state} -> state
{:error, error} -> raise error
end
end
def new_result(opts \\ []) do
opts
|> new
|> case do
{:ok, %__MODULE__{} = pase} -> {:error, pase}
{:error, error} ->
case error |> Exception.exception? do
true -> raise error
end
end
end
@spec update_field(t, kv) :: {:ok, t} | {:error, error}
defp update_field(state, kv)
defp update_field(%__MODULE__{} = state, {k,v})
when k in [
@pav_key_message,
] do
cond do
is_binary(v) -> state |> Map.put(k, v)
is_atom(v) -> state |> Map.put(k, v |> to_string)
true -> {:error, %ArgumentError{message: "expected valid #{inspect k}; got #{inspect v}"}}
end
end
defp update_field(%__MODULE__{} = state, {k,v})
when k in [
@pav_key_value,
@pav_key_error,
] do
state |> struct!([{k, v}])
end
@spec update(t, opts) :: {:ok, t} | {:error, error}
def update(state, opts \\ [])
def update(%__MODULE__{} = state, []) do
{:ok, state}
end
def update(%__MODULE__{} = state, opts) when is_list(opts) do
opts
|> POU.opts_canon_keys!(@pav_struct_dict_aliases)
|> Enum.reduce_while(state, fn {k,v}, s ->
s
|> update_field({k,v})
|> case do
%__MODULE__{} = s -> {:cont, s}
{:ok, %__MODULE__{} = s} -> {:cont, s}
{:error, error} -> {:halt, error}
end
end)
|> case do
{:error, _} = result -> result
value -> {:ok, value}
end
end
def update!(%__MODULE__{} = state, opts \\ []) do
state
|> update(opts)
|> case do
{:ok, %__MODULE__{} = state} -> state
{:error, error} -> raise error
end
end
def message(%__MODULE__{} = pase) do
pase
|> Map.from_struct
|> format_error_message
end
def format_error_message(opts \\ [])
def format_error_message(opts) when is_map(opts) do
opts |> Map.to_list |> format_error_message
end
def format_error_message(opts) when is_list(opts)do
[
message: nil,
value: nil,
error: nil,
]
|> Enum.map(fn {k,_v} -> {k, opts |> Keyword.get(k)} end)
|> Enum.reject(fn
{_k, nil} -> true
_ -> false
end)
|> Enum.reduce([], fn
{@pav_key_message, message}, messages -> ["#{message}" | messages]
{@pav_key_value, value}, messages -> ["got: #{inspect value}" | messages]
{@pav_key_error, error}, messages ->
message = cond do
Exception.exception?(error) -> "reason: #{Exception.message(error)}"
is_binary(error) -> error
true -> "reason: #{inspect error}"
end
[message | messages]
end)
|> Enum.reject(&is_nil/1)
|> Enum.reverse
|> Enum.join("; ")
end
end
|
lib/ast/vorm/error.ex
| 0.764012 | 0.442034 |
error.ex
|
starcoder
|
defmodule EctoAsStateMachine.State do
@moduledoc """
State callbacks
"""
alias Ecto.Changeset
@spec update(%{event: List.t(), model: Map.t(), states: List.t(), initial: String.t(), column: atom}) :: term | %{valid: false}
def update(%{event: event, model: model, states: states, initial: initial, column: column}) do
model
|> Changeset.change(%{state: "#{event[:to]}"})
|> run_callback(event[:callback])
|> validate_state_transition(%{
event: event,
column: column,
model: valid_model(model),
states: states,
initial: initial
})
end
def update(%{} = config) do
update(Map.put_new(config, :column, :state))
end
@spec update!(%{repo: Ecto.Repo, event: List.t(), model: Map.t(), states: List.t(),
initial: String.t(), column: atom}) :: term | {:error, term}
def update!(%{repo: repo} = config) do
value = update(config)
case value |> repo.update do
{:ok, new_model} -> new_model
e -> e
end
end
@spec next_state(%{events: List.t(), model: Map.t(), states: List.t(), initial: String.t(), column: atom}) :: term | %{valid: false}
def next_state(%{events: events, model: model} = config) do
event =
events
|> Enum.find(fn(e) -> can_event?(Map.put_new(config, :event, e)) end)
if event do
update(Map.put_new(config, :event, event))
else
model
end
end
@spec can_event?(%{event: List.t(), model: Map.t(), column: atom}) :: true | false
def can_event?(%{model: model, event: event, column: column} = config) do
:"#{state_with_initial(Map.get(model, column), config)}" in event[:from]
end
def can_event?(%{} = config) do
can_event?(Map.put_new(config, :column, :state))
end
@spec is_state?(%{event: List.t(), state: String.t(), column: atom}) :: true | false
def is_state?(%{model: model, state: state, column: column} = config) do
:"#{state_with_initial(Map.get(model, column), config)}" == state
end
def is_state?(%{} = config) do
is_state?(Map.put_new(config, :column, :state))
end
@spec state_with_initial(String.t(), %{states: List.t(), initial: String.t()}) :: String.t() | String.t()
def state_with_initial(state, %{initial: initial, states: states}) do
if :"#{state}" in states do
state
else
initial
end
end
defp validate_state_transition(changeset, %{event: event, model: model, column: column} = config) do
state = state_with_initial(Map.get(model, column), config)
if :"#{state}" in event[:from] do
changeset
else
changeset
|> Changeset.add_error("state",
"You can't move state from :#{state || "nil"} to :#{event[:to]}")
end
end
defp validate_state_transition(changeset, %{} = config) do
validate_state_transition(changeset, Map.put_new(config, :column, :state))
end
defp run_callback(model, callback) when is_function(callback, 1), do: callback.(model)
defp run_callback(model, _), do: model
defp valid_model(%{data: model}), do: model
defp valid_model(%{model: model}), do: model
defp valid_model(model), do: model
end
|
lib/ecto_as_state_machine/state.ex
| 0.764848 | 0.582907 |
state.ex
|
starcoder
|
defmodule SBoM do
@moduledoc """
Collect dependency information for use in a Software Bill-of-Materials (SBOM).
"""
alias SBoM.Purl
alias SBoM.Cpe
@doc """
Builds a SBoM for the current Mix project. The result can be exported to
CycloneDX XML format using the `SBoM.CycloneDX` module. Pass an environment
of `nil` to include dependencies across all environments.
Wrap the call to this function with `Mix.Project.in_project/3,4` to select a
Mix project by path.
"""
def components_for_project(environment \\ :prod) do
Mix.Project.get!()
{deps, not_ok} =
Mix.Dep.load_on_environment(env: environment)
|> Enum.split_with(&ok?/1)
case not_ok do
[] ->
components =
deps
|> Enum.map(&component_from_dep/1)
|> Enum.reject(&is_nil/1)
{:ok, components}
_ ->
{:error, :unresolved_dependency}
end
end
defp ok?(dep) do
Mix.Dep.ok?(dep) || Mix.Dep.compilable?(dep)
end
defp component_from_dep(%{opts: opts} = dep) do
case Map.new(opts) do
%{optional: true} ->
# If the dependency is optional at the top level, then we don't include
# it in the SBoM
nil
opts_map ->
component_from_dep(dep, opts_map)
end
end
defp component_from_dep(%{scm: Hex.SCM}, opts) do
%{hex: name, lock: lock, dest: dest} = opts
version = elem(lock, 2)
sha256 = elem(lock, 3)
hex_metadata_path = Path.expand("hex_metadata.config", dest)
metadata =
case :file.consult(hex_metadata_path) do
{:ok, metadata} -> metadata
_ -> []
end
{_, description} = List.keyfind(metadata, "description", 0, {"description", ""})
{_, licenses} = List.keyfind(metadata, "licenses", 0, {"licenses", []})
%{
type: "library",
name: name,
version: version,
purl: Purl.hex(name, version, opts[:repo]),
cpe: Cpe.hex(name, version, opts[:repo]),
hashes: %{
"SHA-256" => sha256
},
description: description,
licenses: licenses
}
end
defp component_from_dep(%{scm: Mix.SCM.Git, app: app}, opts) do
%{git: git, lock: lock, dest: _dest} = opts
version =
case opts[:tag] do
nil ->
elem(lock, 2)
tag ->
tag
end
%{
type: "library",
name: to_string(app),
version: version,
purl: Purl.git(to_string(app), git, version),
licenses: []
}
end
defp component_from_dep(_dep, _opts), do: nil
end
|
lib/sbom.ex
| 0.756717 | 0.45744 |
sbom.ex
|
starcoder
|
defmodule ExDhcp.Utils do
@moduledoc """
Provides utilities containing typespecs for data types and binary/string
conversions for _ip_ and _mac_ addresses. For ease-of-use both within this
library and when using it.
"""
@typedoc "Erlang-style _ip_ addresses."
@type ip4 :: :inet.ip4_address
@typedoc "_Mac_ addresses in the same style as the erlang _ip_ address."
@type mac :: {byte, byte, byte, byte, byte, byte}
@doc """
Represents an erlang-style ip4 value as a string, without going through
a list intermediate.
"""
@spec ip2str(ip4) :: binary
def ip2str(_ip_addr = {a, b, c, d}), do: "#{a}.#{b}.#{c}.#{d}"
@doc """
Converts an erlang-style _ip_ address (4-tuple of bytes)
to a binary stored _ip_ address (in the dhcp packet spec)
"""
@spec ip2bin(ip4) :: <<_::32>>
def ip2bin(_ip_addr = {a, b, c, d}), do: <<a, b, c, d>>
@doc """
Converts a binary stored _ip_ address (in the dhcp packet spec) to
an erlang-style _ip_ address. (4-tuple of bytes)
"""
@spec bin2ip(<<_::32>>) :: ip4
def bin2ip(_mac_addr = <<a, b, c, d>>), do: {a, b, c, d}
@doc """
Converts a binary stored _mac_ address (in the dhcp packet spec) to
an erlang-style _mac_ address. (6-tuple of bytes)
"""
def bin2mac(_mac_addr = <<a, b, c, d, e, f>>), do: {a, b, c, d, e, f}
@doc """
Converts an erlang-style _mac_ address (6-tuple of bytes) to a
binary stored _mac_ address (in the dhcp packet spec).
"""
def mac2bin(_mac_addr = {a, b, c, d, e, f}), do: <<a, b, c, d, e, f>>
@doc """
Converts a _mac_ address 6-byte tuple to a string.
```elixir
iex> ExDhcp.Utils.mac2str({1, 2, 3, 16, 255, 254})
"01:02:03:10:FF:FE"
```
"""
def mac2str(mac_addr = {_, _, _, _, _, _}) do
mac_addr
|> Tuple.to_list
|> Enum.map(&padhex/1)
|> Enum.join(":")
end
@doc """
Converts a _mac_ address string into a raw binary value.
```elixir
iex> ExDhcp.Utils.str2mac("01:02:03:10:FF:FE")
{1, 2, 3, 16, 255, 254}
```
"""
def str2mac(_mac_addr = <<a::16, ":", b::16, ":", c::16, ":", d::16, ":", e::16, ":", f::16>>) do
[<<a::16>>, <<b::16>>, <<c::16>>, <<d::16>>, <<e::16>>, <<f::16>>]
|> Enum.map(&String.to_integer(&1, 16))
|> List.to_tuple
end
defp padhex(v) when v < 16, do: "0" <> Integer.to_string(v, 16)
defp padhex(v), do: Integer.to_string(v, 16)
@spec cidr2mask(cidr :: 0..32) :: ip4
@doc """
Creates a subnet mask from a _cidr_ value.
```elixir
iex> ExDhcp.Utils.cidr2mask(24)
{255, 255, 255, 0}
```
"""
def cidr2mask(_cidr_val = n) do
import Bitwise
<<a, b, c, d>> = <<-1 <<< (32 - n)::32>>
{a, b, c, d}
end
end
|
lib/ex_dhcp/utils.ex
| 0.830628 | 0.712207 |
utils.ex
|
starcoder
|
defmodule QueryBuilder do
require Ecto.Query
alias Ecto.Query
defmacro __using__(opts) do
quote do
require QueryBuilder.Schema
QueryBuilder.Schema.__using__(unquote(opts))
end
end
def new(ecto_query) do
%QueryBuilder.Query{ecto_query: ensure_query_has_binding(ecto_query)}
end
@doc ~S"""
Preloads the associations.
Bindings are automatically set if joins have been made, or if it is preferable to
join (i.e. one-to-one associations are preferable to include into the query result
rather than emitting separate DB queries).
Example:
```
QueryBuilder.preload(query, [role: :permissions, articles: [:stars, comments: :user]])
```
"""
def preload(%QueryBuilder.Query{} = query, assoc_fields) do
%{query | operations: [%{type: :preload, assocs: assoc_fields, args: []} | query.operations]}
end
def preload(ecto_query, assoc_fields) do
ecto_query = ensure_query_has_binding(ecto_query)
preload(%QueryBuilder.Query{ecto_query: ecto_query}, assoc_fields)
end
@doc ~S"""
An AND where query expression.
Example:
```
QueryBuilder.where(query, firstname: "John")
```
"""
def where(query, filters) do
where(query, [], filters)
end
@doc ~S"""
An AND where query expression.
Associations are passed in second argument; fields from these associations can then
be referenced by writing the field name, followed by the "@" character and the
association name, as an atom. For example: `:name@users`.
Example:
```
QueryBuilder.where(query, [role: :permissions], name@permissions: :write)
```
OR clauses may be passed through last argument `opts`. For example:
```elixir
QueryBuilder.where(query, [], [firstname: "John"], or: [firstname: "Alice", lastname: "Doe"], or: [firstname: "Bob"])
```
"""
def where(query, assoc_fields, filters, or_filters \\ [])
def where(%QueryBuilder.Query{} = query, assoc_fields, filters, or_filters) do
%{query | operations: [%{type: :where, assocs: assoc_fields, args: [filters, or_filters]} | query.operations]}
end
def where(ecto_query, assoc_fields, filters, or_filters) do
ecto_query = ensure_query_has_binding(ecto_query)
where(%QueryBuilder.Query{ecto_query: ecto_query}, assoc_fields, filters, or_filters)
end
@doc ~S"""
Run `QueryBuilder.where/2` only if given condition is met.
"""
def maybe_where(query, true, filters) do
where(query, [], filters)
end
def maybe_where(query, false, _), do: query
def maybe_where(query, condition, assoc_fields, filters, or_filters \\ [])
@doc ~S"""
Run `QueryBuilder.where/4` only if given condition is met.
"""
def maybe_where(query, true, assoc_fields, filters, or_filters) do
where(query, assoc_fields, filters, or_filters)
end
def maybe_where(query, false, _, _, _), do: query
@doc ~S"""
An order by query expression.
Example:
```
QueryBuilder.order_by(query, asc: :lastname, asc: :firstname)
```
"""
def order_by(query, value) do
order_by(query, [], value)
end
@doc ~S"""
An order by query expression.
For more about the second argument, see `where/3`.
Example:
```
QueryBuilder.order_by(query, :articles, asc: :title@articles)
```
"""
def order_by(%QueryBuilder.Query{} = query, assoc_fields, value) do
%{query | operations: [%{type: :order_by, assocs: assoc_fields, args: [value]} | query.operations]}
end
def order_by(ecto_query, assoc_fields, value) do
ecto_query = ensure_query_has_binding(ecto_query)
order_by(%QueryBuilder.Query{ecto_query: ecto_query}, assoc_fields, value)
end
@doc ~S"""
A limit query expression.
If multiple limit expressions are provided, the last expression is evaluated
Example:
```
QueryBuilder.limit(query, 10)
```
"""
def limit(%QueryBuilder.Query{} = query, value) do
# Limit order must be maintained, similar to Ecto:
# - https://hexdocs.pm/ecto/Ecto.Query-macro-limit.html
%{query | operations: query.operations ++ [%{type: :limit, assocs: [], args: [value]}]}
end
def limit(ecto_query, value) do
limit(%QueryBuilder.Query{ecto_query: ecto_query}, value)
end
@doc ~S"""
A offset query expression.
If multiple offset expressions are provided, the last expression is evaluated
Example:
```
QueryBuilder.offset(query, 10)
```
"""
def offset(%QueryBuilder.Query{} = query, value) do
# Offset order must be maintained, similar to Ecto:
# - https://hexdocs.pm/ecto/Ecto.Query.html#offset/3
%{query | operations: query.operations ++ [%{type: :offset, assocs: [], args: [value]}]}
end
def offset(ecto_query, value) do
offset(%QueryBuilder.Query{ecto_query: ecto_query}, value)
end
@doc ~S"""
A join query expression.
Example:
```
QueryBuilder.left_join(query, :articles, title@articles: "Foo", or: [title@articles: "Bar"])
```
"""
def left_join(query, assoc_fields, filters \\ [], or_filters \\ [])
def left_join(%QueryBuilder.Query{} = query, assoc_fields, filters, or_filters) do
%{query | operations: [%{type: :left_join, assocs: assoc_fields, join_filters: [List.wrap(filters), List.wrap(or_filters)]} | query.operations]}
end
def left_join(ecto_query, assoc_fields, filters, or_filters) do
ecto_query = ensure_query_has_binding(ecto_query)
left_join(%QueryBuilder.Query{ecto_query: ecto_query}, assoc_fields, filters, or_filters)
end
@doc ~S"""
Allows to pass a list of operations through a keyword list.
Example:
```
QueryBuilder.from_list(query, [
where: [name: "John", city: "Anytown"],
preload: [articles: :comments]
])
```
"""
def from_list(query, []), do: query
def from_list(query, [{operation, arguments} | tail]) do
arguments =
cond do
is_tuple(arguments) -> Tuple.to_list(arguments)
is_list(arguments) -> [arguments]
true -> List.wrap(arguments)
end
apply(__MODULE__, operation, [query | arguments])
|> from_list(tail)
end
defp ensure_query_has_binding(query) do
schema = QueryBuilder.Utils.root_schema(query)
unless Query.has_named_binding?(query, schema._binding()) do
schema._query()
else
query
end
end
end
|
lib/query_builder.ex
| 0.865665 | 0.815637 |
query_builder.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.