code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
defmodule Monad.Writer do @moduledoc """ The writer monad keeps track of a calculation and a "log". The log can be anything that conforms to the `Monoid` protocol. It's often useful to combine the writer monad with others. For example, you can use a `Monad.Maybe` as the `value` of the writer monad. This offers the benefit of having a log of a writer monad and the control flow of a maybe monad. """ use Monad.Behaviour @opaque t :: %__MODULE__{value: term, log: Monoid.t | :nil_monoid} @doc false defstruct value: nil, log: :nil_monoid @doc """ Wraps `value` into a writer monad. iex> writer 42 %Monad.Writer{value: 42, log: :nil_monoid} """ @spec writer(term) :: t def writer(value), do: %Monad.Writer{value: value} @doc """ Wraps `value` and `log` into a writer monad. iex> writer 42, "The answer" %Monad.Writer{value: 42, log: "The answer"} """ @spec writer(term, Monoid.t) :: t def writer(value, log), do: %Monad.Writer{value: value, log: log} @doc """ Returns the value and log from a writer monad. iex> w = writer 42, "The answer" iex> runWriter w {42, "The answer"} """ @spec runWriter(t) :: {term, Monoid.t} def runWriter(writer), do: {writer.value, writer.log} @doc """ Callback implementation of `Monad.Behaviour.return/1`. Wraps `value` into a writer monad. iex> return 42 %Monad.Writer{value: 42, log: :nil_monoid} """ @spec return(term) :: t def return(value), do: writer value @doc """ Callback implementation of `Monad.Behaviour.bind/2`. Unwraps the value from `writer` and applies it to `fun`. The log from `writer` and from the resulting writer monad are combined. iex> m = writer 4, ["Four"] iex> n = bind m, (& writer &1 * 2, ["Doubled"]) iex> runWriter n {8, ["Four", "Doubled"]} """ @spec bind(t, (term -> t)) :: t def bind(writer, fun) when is_function(fun, 1) do {val1, monoid1} = writer |> runWriter {val2, monoid2} = val1 |> fun.() |> runWriter writer val2, merge_monoids(monoid1, monoid2) end ## Helpers # `merge_monoids` is necessary because Elixir doesn't have a strong, static type system. # :nil_monoid basically acts like a universal "zero". defp merge_monoids(:nil_monoid, :nil_monoid), do: :nil_monoid defp merge_monoids(monoid, :nil_monoid), do: monoid defp merge_monoids(:nil_monoid, monoid), do: monoid defp merge_monoids(monoid1, monoid2), do: Monoid.mappend(monoid1, monoid2) end
lib/monad/writer.ex
0.852506
0.697029
writer.ex
starcoder
defmodule CanvasAPI.Team do @moduledoc """ A group of users in a Slack team. """ use CanvasAPI.Web, :model alias CanvasAPI.ImageMap @type t :: %__MODULE__{} schema "teams" do field :domain, :string field :images, :map, default: %{} field :name, :string field :slack_id, :string many_to_many :accounts, CanvasAPI.Account, join_through: "users" has_many :canvases, CanvasAPI.Canvas has_many :users, CanvasAPI.User has_many :oauth_tokens, CanvasAPI.OAuthToken timestamps() end @doc """ Builds a creation changeset based on the `struct` and `params`. """ @spec create_changeset(%__MODULE__{}, map, Keyword.t) :: Ecto.Changeset.t def create_changeset(struct, params, type: :slack) do struct |> cast(params, [:domain, :name, :slack_id]) |> validate_required([:domain, :name, :slack_id]) |> prevent_domain_change |> unique_constraint(:domain) |> put_change(:images, ImageMap.image_map(params)) end def create_changeset(struct, params, type: :personal) do struct |> cast(params, []) |> put_change(:name, "Notes") end @doc """ Builds a changeset for updating a team (only domain, only personal). """ @spec update_changeset(%__MODULE__{}, map) :: Ecto.Changeset.t def update_changeset(struct, params) do struct |> cast(params, [:domain]) |> if_slack(&prevent_domain_change/1) |> validate_required([:domain]) |> lowercase_domain |> validate_domain_format |> prefix_domain |> unique_constraint(:domain) end @doc """ Fetches the OAuth token for the given team and provider. """ def get_token(team, provider) do from(assoc(team, :oauth_tokens), where: [provider: ^provider]) |> first |> Repo.one |> case do nil -> {:error, :token_not_found} token -> {:ok, token} end end defp if_slack(changeset, func) do if changeset.data.slack_id || get_change(changeset, :slack_id) do func.(changeset) else changeset end end defp prevent_domain_change(changeset) do if changeset.data.slack_id do changeset |> add_error(:domain, "can not be changed for Slack teams") else changeset end end defp prefix_domain(changeset) do domain = "~#{get_change(changeset, :domain)}" put_change(changeset, :domain, domain) end defp lowercase_domain(changeset) do case get_change(changeset, :domain) do "" <> domain -> put_change(changeset, :domain, String.downcase(domain)) _ -> changeset end end defp validate_domain_format(changeset) do changeset |> validate_format(:domain, ~r/\A[a-z0-9][a-z0-9-]{0,34}[a-z0-9]\z/, message: """ must be between 2 and 36 characters, contain only letters, \ numbers, and dashes, and begin and end with a letter or \ number\ """) end end
web/models/team.ex
0.805173
0.412382
team.ex
starcoder
defmodule Asteroid.ObjectStore.AuthenticatedSession.Riak do @moduledoc """ Riak implementation of the `Asteroid.ObjectStore.AuthenticatedSession` behaviour ## Initializing a Riak bucket type ```console $ sudo riak-admin bucket-type create ephemeral_token '{"props":{"datatype":"map", "backend":"leveldb_mult"}}' ephemeral_token created $ sudo riak-admin bucket-type activate ephemeral_token ephemeral_token has been activated ``` ## Options The options (`Asteroid.ObjectStore.AuthenticatedSession.opts()`) are: - `:bucket_type`: an `String.t()` for the bucket type that must be created beforehand in Riak. No defaults, **mandatory** - `bucket_name`: a `String.t()` for the bucket name. Defaults to `"authenticated_session"` - `:purge_interval`: the `integer()` interval in seconds the purge process will be triggered, or `:no_purge` to disable purge. Defaults to `1800` (30 minutes) - `:rows`: the maximum number of results that a search will return. Defaults to `1_000_000`. Search is used by the purge process. ## Installation function The `install/1` function executes the following actions: - it installs a custom schema (`asteroid_object_store_authenticated_session_riak_schema`) - it creates a new index (`asteroid_object_store_authenticated_session_riak_index`) on the bucket (and not the bucket type - so as to avoid collisions) This is necessary to: 1. Efficiently index expiration timestamp 2. Disable indexing of raw authenticated session data ## Purge process The purge process uses the `Singleton` library. Therefore the purge process will be unique per cluster (and that's probably what you want if you use Riak). """ require Logger @behaviour Asteroid.ObjectStore.AuthenticatedSession @impl true def install(opts) do bucket_type = opts[:bucket_type] || raise "Missing bucket type" bucket_name = opts[:bucket_name] || "authenticated_session" with :ok <- Riak.Search.Schema.create( schema_name(), (:code.priv_dir(:asteroid) ++ '/riak/object_store_authenticated_session_schema.xml') |> File.read!() ), :ok <- Riak.Search.Index.put(index_name(), schema_name()), :ok <- Riak.Search.Index.set({bucket_type, bucket_name}, index_name()) do Logger.info( "#{__MODULE__}: created authenticated session store `#{bucket_name}` " <> "of bucket type `#{bucket_type}`" ) :ok else e -> "#{__MODULE__}: failed to create authenticated session store `#{bucket_name}` " <> "of bucket type `#{bucket_type}` (reason: #{inspect(e)})" {:error, "#{inspect(e)}"} end catch :exit, e -> bucket_type = opts[:bucket_type] || raise "Missing bucket type" bucket_name = opts[:bucket_name] || "authenticated_session" "#{__MODULE__}: failed to create authenticated session store `#{bucket_name}` " <> "of bucket type `#{bucket_type}` (reason: #{inspect(e)})" {:error, "#{inspect(e)}"} end @impl true def start_link(opts) do opts = Keyword.merge([purge_interval: 1800], opts) # we launch the process anyway because we need to return a process # but the singleton will do nothing if the value is `:no_purge` Singleton.start_child(__MODULE__.Purge, opts, __MODULE__) end @impl true def get(authenticated_session_id, opts) do bucket_type = opts[:bucket_type] || raise "Missing bucket type" bucket_name = opts[:bucket_name] || "authenticated_session" case Riak.find(bucket_type, bucket_name, authenticated_session_id) do res when not is_nil(res) -> authenticated_session = res |> Riak.CRDT.Map.get(:register, "authenticated_session_data_binary") |> Base.decode64!(padding: false) |> :erlang.binary_to_term() Logger.debug( "#{__MODULE__}: getting authenticated session `#{authenticated_session_id}`, " <> "value: `#{inspect(authenticated_session)}`" ) {:ok, authenticated_session} nil -> Logger.debug( "#{__MODULE__}: getting authenticated session `#{authenticated_session_id}`, " <> "value: `nil`" ) {:ok, nil} end catch :exit, e -> {:error, "#{inspect(e)}"} end @impl true def get_from_subject_id(subject_id, opts) do search("subject_id_register:\"#{String.replace(subject_id, "\"", "\\\"")}\"", opts) end @impl true def put(authenticated_session, opts) do bucket_type = opts[:bucket_type] || raise "Missing bucket type" bucket_name = opts[:bucket_name] || "authenticated_session" riak_map = Riak.CRDT.Map.new() authenticated_session_data_binary = authenticated_session |> :erlang.term_to_binary() |> Base.encode64(padding: false) |> Riak.CRDT.Register.new() riak_map = Riak.CRDT.Map.put( riak_map, "authenticated_session_data_binary", authenticated_session_data_binary ) riak_map = if authenticated_session.subject_id != nil do Riak.CRDT.Map.put( riak_map, "subject_id", Riak.CRDT.Register.new(authenticated_session.subject_id) ) else riak_map end riak_map = if authenticated_session.data["exp"] != nil do Riak.CRDT.Map.put( riak_map, "exp_int", Riak.CRDT.Register.new(to_string(authenticated_session.data["exp"])) ) else Logger.warn( "Inserting authenticated session with no expiration: #{ String.slice(authenticated_session.id, 1..5) }..." ) riak_map end Riak.update(riak_map, bucket_type, bucket_name, authenticated_session.id) Logger.debug( "#{__MODULE__}: stored authenticated session `#{authenticated_session.id}`, " <> "value: `#{inspect(authenticated_session)}`" ) :ok catch :exit, e -> {:error, "#{inspect(e)}"} end @impl true def delete(authenticated_session_id, opts) do bucket_type = opts[:bucket_type] || raise "Missing bucket type" bucket_name = opts[:bucket_name] || "authenticated_session" Riak.delete(bucket_type, bucket_name, authenticated_session_id) Logger.debug("#{__MODULE__}: deleted authenticated session `#{authenticated_session_id}`") :ok catch :exit, e -> {:error, "#{inspect(e)}"} end @doc """ Searches in Riak-stored authenticated sessions This function is used internaly and made available for user convenience. authenticated sessions are stored in the following fields: | Field name | Indexed as | |--------------------------------------------|:-------------:| | authenticated_session_data_binary_register | *not indexed* | | subject_id | string | | exp_int_register | int | Note that you are responsible for escaping values accordingly with Solr escaping. """ @spec search(String.t(), Asteroid.ObjectStore.AuthenticatedSession.opts()) :: {:ok, [Asteroid.OIDC.AuthenticatedSession.id()]} | {:error, any()} def search(search_query, opts) do case Riak.Search.query(index_name(), search_query, rows: opts[:rows] || 1_000_000) do {:ok, {:search_results, result_list, _, _}} -> {:ok, for {_index_name, attribute_list} <- result_list do :proplists.get_value("_yz_rk", attribute_list) end} {:error, _} = error -> error end end @spec schema_name() :: String.t() defp schema_name(), do: "asteroid_object_store_authenticated_session_riak_schema" @doc false @spec index_name() :: String.t() def index_name(), do: "asteroid_object_store_authenticated_session_riak_index" end
lib/asteroid/object_store/authenticated_session/riak.ex
0.931416
0.712301
riak.ex
starcoder
defmodule ExSieve.Predicate do @moduledoc false import ExSieve.CustomPredicate, only: [custom_predicates: 0] @true_values [1, true, "1", "T", "t", "true", "TRUE"] @type predicate_spec :: { prediate_name :: atom(), allowed_types :: :all | [Ecto.Type.primitive()], allowed_values :: :all | [any()], all_any_combinators :: [:all | :any] } @builtin_predicates_specs [ {:eq, :all, :all, [:any]}, {:not_eq, :all, :all, [:all]}, {:cont, [:string], :all, [:all, :any]}, {:not_cont, [:string], :all, [:all, :any]}, {:lt, :all, :all, []}, {:lteq, :all, :all, []}, {:gt, :all, :all, []}, {:gteq, :all, :all, []}, {:in, :all, :all, []}, {:not_in, :all, :all, []}, {:matches, [:string], :all, [:all, :any]}, {:does_not_match, [:string], :all, [:all, :any]}, {:start, [:string], :all, [:any]}, {:not_start, [:string], :all, [:all]}, {:end, [:string], :all, [:any]}, {:not_end, [:string], :all, [:all]}, {true, [:boolean], @true_values, []}, {:not_true, [:boolean], @true_values, []}, {false, [:boolean], @true_values, []}, {:not_false, [:boolean], @true_values, []}, {:present, [:string], @true_values, []}, {:blank, [:string], @true_values, []}, {:null, :all, @true_values, []}, {:not_null, :all, @true_values, []} ] @basic_predicates Enum.map(@builtin_predicates_specs, &(&1 |> elem(0) |> Atom.to_string())) @all_any_predicates Enum.flat_map(@builtin_predicates_specs, fn {predicate, _, _, all_any} -> Enum.map(all_any, &"#{predicate}_#{&1}") end) @builtin_predicates Enum.sort_by(@basic_predicates ++ @all_any_predicates, &byte_size/1, &>=/2) @predicates @builtin_predicates ++ (custom_predicates() |> Keyword.keys() |> Enum.map(&Atom.to_string/1)) @predicate_aliases_map :ex_sieve |> Application.get_env(:predicate_aliases, %{}) |> Map.new(fn {pred_alias, pred} -> {to_string(pred_alias), to_string(pred)} end) |> Enum.reject(fn {pred_alias, _} -> pred_alias in @predicates end) |> Enum.reject(fn {_, pred} -> pred not in @predicates end) |> Map.new() @spec all() :: [String.t()] def all, do: @predicates @spec builtin() :: [String.t()] def builtin, do: @builtin_predicates @spec basic :: [String.t()] def basic, do: @basic_predicates @spec composite :: [String.t()] def composite, do: @all_any_predicates @spec specs :: [predicate_spec()] def specs, do: @builtin_predicates_specs @spec aliases_map :: %{optional(String.t()) => String.t()} def aliases_map, do: @predicate_aliases_map end
lib/ex_sieve/predicate.ex
0.68784
0.460835
predicate.ex
starcoder
defmodule ExPokerEval.Card do @moduledoc """ Card manipulation functions """ @suits ~w(H D S C) @doc """ Gets an array of cards as a keyword of cards represented by value and suit. ## Examples ``` iex>ExPokerEval.Card.parse_hand(~w(KD)) {:ok, [[suit: "D", value: 13]]} iex>ExPokerEval.Card.parse_hand(~w(KD 8Z 5H)) {:error, :invalid_card_in_hand} # A hand might has a maximum of 5 cards and # cannot be empty iex>ExPokerEval.Card.parse_hand(~w(KD 8Z 5H KD 8Z 5H)) {:error, :invalid_hand_size} iex>ExPokerEval.Card.parse_hand([]) {:error, :invalid_hand_size} # Cards can't be repeated iex>ExPokerEval.Card.parse_hand(~w(KD 8Z 8Z 5H)) {:error, :repeated_card} iex>ExPokerEval.Card.parse_hand(~w(2H 3D 5S 9C KD)) {:ok, [[suit: "H", value: 2], [suit: "D", value: 3], [suit: "S", value: 5], [suit: "C", value: 9], [suit: "D", value: 13]]} iex>ExPokerEval.Card.parse_hand(~w(2♠ 3♦)) {:ok, [[suit: "S", value: 2], [suit: "D", value: 3]]} ``` """ def parse_hand([]), do: {:error, :invalid_hand_size} def parse_hand(larger_hand) when length(larger_hand) > 5, do: {:error, :invalid_hand_size} def parse_hand(list) do with true <- length(Enum.uniq(list)) == length(list), parsed_cards <- Enum.map(list, &parse_card/1), {:invalid_cards, []} <- {:invalid_cards, Keyword.get_values(parsed_cards, :error)}, cards <- Keyword.get_values(parsed_cards, :ok) do {:ok, cards} else false -> {:error, :repeated_card} {:invalid_cards, [_]} -> {:error, :invalid_card_in_hand} end end @doc """ Parses a card out of a string ## Examples ``` iex>ExPokerEval.Card.parse_card("KD") {:ok, [suit: "D", value: 13]} iex>ExPokerEval.Card.parse_card("1H") {:ok, [suit: "H", value: 14]} iex>ExPokerEval.Card.parse_card("9S") {:ok, [suit: "S", value: 9]} iex>ExPokerEval.Card.parse_card("5Z") {:error, :invalid_card} ``` """ def parse_card(bin) do with literal_suit <- String.last(bin), {:ok, suit} <- parse_suit(literal_suit), literal_value <- String.trim_trailing(bin, literal_suit), value <- sym_to_num(literal_value) do {:ok, [suit: suit, value: value]} else _ -> {:error, :invalid_card} end end @doc """ Converts symbolic values into numeric ones. ## Examples ``` iex>ExPokerEval.Card.sym_to_num("2") 2 iex>ExPokerEval.Card.sym_to_num("Q") 12 iex>ExPokerEval.Card.sym_to_num("A") 14 iex>ExPokerEval.Card.sym_to_num("89") {:error, :invalid_card_value} iex>ExPokerEval.Card.sym_to_num("7.6") {:error, :invalid_card_value} iex>ExPokerEval.Card.sym_to_num("") {:error, :invalid_card_value} ``` """ def sym_to_num("1"), do: 14 def sym_to_num("A"), do: 14 def sym_to_num("J"), do: 11 def sym_to_num("Q"), do: 12 def sym_to_num("K"), do: 13 def sym_to_num(bin) do case Integer.parse(bin) do {num, ""} when num in 2..14 -> num _ -> {:error, :invalid_card_value} end end @doc """ Converts cards values into their symbol ## Examples ``` iex>ExPokerEval.Card.num_to_sym(4) 4 iex>ExPokerEval.Card.num_to_sym(14) "Ace" iex>ExPokerEval.Card.num_to_sym(1) "Ace" iex>ExPokerEval.Card.num_to_sym(11) "Jack" iex>ExPokerEval.Card.num_to_sym(12) "Queen" iex>ExPokerEval.Card.num_to_sym(13) "King" iex>ExPokerEval.Card.num_to_sym(23) {:error, :invalid_card_value} ``` """ def num_to_sym(invalid_value) when not invalid_value in 1..14, do: {:error, :invalid_card_value} def num_to_sym(ace) when ace == 1 or ace == 14, do: "Ace" def num_to_sym(11), do: "Jack" def num_to_sym(12), do: "Queen" def num_to_sym(13), do: "King" def num_to_sym(num), do: num @doc """ Parses the suit from the card. Supports UTF-8 chars ## Examples ``` iex>ExPokerEval.Card.parse_suit("F") {:error, :invalid_suit} iex>ExPokerEval.Card.parse_suit("S") {:ok, "S"} iex>ExPokerEval.Card.parse_suit("♠") {:ok, "S"} iex>ExPokerEval.Card.parse_suit("♥") {:ok, "H"} iex>ExPokerEval.Card.parse_suit("♦") {:ok, "D"} iex>ExPokerEval.Card.parse_suit("♣") {:ok, "C"} ``` """ def parse_suit(bin) when bin in @suits, do: {:ok, String.upcase(bin)} def parse_suit("♠"), do: {:ok, "S"} def parse_suit("♥"), do: {:ok, "H"} def parse_suit("♣"), do: {:ok, "C"} def parse_suit("♦"), do: {:ok, "D"} def parse_suit(_wrong_suite), do: {:error, :invalid_suit} end
lib/ex_poker_eval/card.ex
0.815012
0.879302
card.ex
starcoder
defmodule P9 do @moduledoc """ # Definition - レベルアップ = floor(倒したモンスターのレベル / 2) - 敵は円状に並んでいる - 最初の敵は選べる - 戦わせる順番は、一戦ごとに決め、min level and min fight - 手持ちのパーティー中で戦闘回数が一番多い回数がもっとも低くなるように、最初に戦う相手を選ぶ。 - その際の、一番戦闘回数が多い数を求める。 1_500 * # Examples iex> P9.solve(3, [6, 1, 5], [9, 2, 7]) 2 iex> P9.solve(5, [6, 1, 5, 9, 2], [7, 7, 9, 4, 4]) 2 """ use Bitwise defmodule Heap do defstruct data: nil, size: 0, comparator: nil def new(comparator), do: %__MODULE__{comparator: comparator} def empty?(%__MODULE__{data: nil, size: 0}), do: true def empty?(%__MODULE__{}), do: false def size(%__MODULE__{size: size}), do: size def top(%__MODULE__{data: nil}), do: nil def top(%__MODULE__{data: {v, _}}), do: v def pop(%__MODULE__{data: nil, size: 0} = heap), do: heap def pop(%__MODULE__{data: {_v, queue}, size: n, comparator: comp} = heap), do: %{heap | data: dequeue(queue, comp), size: n - 1} def pop!(%__MODULE__{} = heap), do: {Heap.top(heap), Heap.pop(heap)} def push(%__MODULE__{data: h, size: n, comparator: comp} = heap, v), do: %{heap | data: meld(h, {v, nil}, comp), size: n + 1} defp meld(nil, v, _comp), do: v defp meld(v, nil, _comp), do: v defp meld({v0, q0} = left , {v1, q1} = right, comp) do if comp.(v0, v1) do {v0, enqueue(q0, right)} else {v1, enqueue(q1, left)} end end defp enqueue(q, v) defp enqueue(nil, v), do: [v] defp enqueue(q, v), do: [v | q] defp dequeue(nil, _), do: nil defp dequeue([], _), do: nil defp dequeue([q], _), do: q defp dequeue([q0, q1 | q], comp), do: meld(meld(q0, q1, comp), dequeue(q, comp), comp) defimpl Collectable do def into(heap) do { heap, fn heap, {:cont, v} -> Heap.push(heap, v) heap, :done -> heap _heap, :halt -> :ok end } end end defimpl Enumerable do def count(heap), do: {:ok, Heap.size(heap)} def member?(_, _), do: {:error, __MODULE__} def slice(_), do: {:error, __MODULE__} def reduce(_heap, {:halt, acc}, _fun), do: {:halted, acc} def reduce(heap, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(heap, &1, fun)} def reduce(%Heap{data: nil, size: 0}, {:cont, acc}, _fun), do: {:done, acc} def reduce(heap, {:cont, acc}, fun) do reduce(Heap.pop(heap), fun.(Heap.top(heap), acc), fun) end end end def main do n = IO.read(:line) |> String.trim() |> String.to_integer(); an = IO.read(:line) |> String.trim() |> String.split(" ") |> Enum.map(&String.to_integer/1) bn = IO.read(:line) |> String.trim() |> String.split(" ") |> Enum.map(&String.to_integer/1) IO.puts solve(n, an, bn) end def solve(n, an, bn) do heap = fn {ll, lt}, {rl, rt} -> ll < rl || ll == rl && lt < rt end |> Heap.new() |> (&Enum.into(an, &1, fn lvl -> {lvl, 0} end)).() 0..(n-1) |> Enum.reduce(:infinity, fn i, min -> times = non_recursion(heap, Enum.split(bn, i)) if min < times, do: min, else: times end) end def solve_task(n, an, bn) do heap = fn {ll, lt}, {rl, rt} -> ll < rl || ll == rl && lt < rt end |> Heap.new() |> (&Enum.into(an, &1, fn lvl -> {lvl, 0} end)).() for i <- 0..(n-1) do Task.async(fn -> non_recursion(heap, Enum.split(bn, i)) end) end |> Enum.reduce(:infinity, fn task, min -> times = Task.await(task, 50_000) if min < times, do: min, else: times end) end def solve_spawn(n, an, bn) do heap = fn {ll, lt}, {rl, rt} -> ll < rl || ll == rl && lt < rt end |> Heap.new() |> (&Enum.into(an, &1, fn lvl -> {lvl, 0} end)).() current = self() for i <- 0..(n-1) do spawn_link(fn -> send(current, {self(), non_recursion(heap, Enum.split(bn, i))}) end) end |> Enum.reduce(:infinity, fn pid, min -> receive do {^pid, times} -> if min < times, do: min, else: times end end) end def solve_recursion(n, an, bn) do heap = fn {ll, lt}, {rl, rt} -> ll < rl || ll == rl && lt < rt end |> Heap.new() |> (&Enum.into(an, &1, fn lvl -> {lvl, 0} end)).() for i <- 0..(n-1) do {bn1, bn0} = Enum.split(bn, i) fight(heap, bn0, bn1) end |> Enum.min() end def solve_task_recursion(n, an, bn) do heap = fn {ll, lt}, {rl, rt} -> ll < rl || ll == rl && lt < rt end |> Heap.new() |> (&Enum.into(an, &1, fn lvl -> {lvl, 0} end)).() for i <- 0..(n-1) do {bn1, bn0} = Enum.split(bn, i) Task.async(fn -> fight(heap, bn0, bn1) end) end |> Enum.map(fn task -> Task.await(task, 50_000) end) |> Enum.min() end def solve_spawn_recursion(n, an, bn) do heap = fn {ll, lt}, {rl, rt} -> ll < rl || ll == rl && lt < rt end |> Heap.new() |> (&Enum.into(an, &1, fn lvl -> {lvl, 0} end)).() current = self() for i <- 0..(n-1) do {bn1, bn0} = Enum.split(bn, i) spawn_link(fn -> send(current, {self(), fight(heap, bn0, bn1)}) end) end |> Enum.map(fn pid -> receive do {^pid, times} -> times end end) |> Enum.min() end def non_recursion(heap, {bn1, bn0}) do [bn0, bn1] |> Enum.reduce(heap, fn list, heap -> Enum.reduce(list, heap, fn b, heap -> with {a, heap} <- Heap.pop!(heap) do Heap.push(heap, level_up(a, b)) end end) end) |> Enum.max_by(fn {_lvl, time} -> time end) |> elem(1) end def fight(heap, bn, bn_next) def fight(heap, [], []), do: heap |> Enum.max_by(fn {_lvl, time} -> time end) |> elem(1) def fight(heap, [], bn), do: fight(heap, bn, []) def fight(heap, [b | tail], bn) do a = Heap.top(heap) heap |> Heap.pop() |> Heap.push(level_up(a, b)) |> fight(tail, bn) end def level_up({lvl, times}, b), do: {lvl + floor(b / 2), times + 1} def bfight(heap, bn0, bn1) def bfight(heap, [], []), do: heap |> Enum.max_by(fn v -> v &&& 0xFFF end) |> Bitwise.&&&(0xFFF) def bfight(heap, [], bn), do: bfight(heap, bn, []) def bfight(heap, [b | tail], bn) do {a, heap} = Heap.pop!(heap) heap |> Heap.push(a + b + 1) |> bfight(tail, bn) end def fight_queue(heap, bn) def fight_queue(heap, {[], []}), do: heap |> Enum.max_by(fn v -> v &&& 0xFFF end) |> Bitwise.&&&(0xFFF) def fight_queue(heap, {_, _} = q) do {a, heap} = Heap.pop!(heap) {{_, b}, q} = :queue.out(q) heap |> Heap.push(a + b + 1) |> fight_queue(q) end def fight_queue(heap, bn0, bn1) def fight_queue(heap, [], bn), do: fight_queue(heap, bn) def fight_queue(heap, [b | tail], bn) do {a, heap} = Heap.pop!(heap) heap |> Heap.push(a + b + 1) |> fight_queue(tail, bn) end def bsolve(n, an, bn) do heap = an |> Enum.map(&(&1 <<< 12)) |> Enum.into(Heap.new(fn l, r -> l < r end)) bn = bn |> Enum.map(&(div(&1, 2) <<< 12)) for i <- 0..(n-1) do {bn1, bn0} = Enum.split(bn, i) bfight(heap, bn0, bn1) end |> Enum.min() end def bsolve_spawn(n, an, bn) do heap = an |> Enum.map(&(&1 <<< 12)) |> Enum.into(Heap.new(fn l, r -> l < r end)) bn = bn |> Enum.map(&(div(&1, 2) <<< 12)) current = self() for i <- 0..(n-1) do {bn1, bn0} = Enum.split(bn, i) spawn_link(fn -> send(current, {self(), bfight(heap, bn0, bn1)}) end) end |> Enum.reduce(:infinity, fn pid, min -> receive do {^pid, times} -> if min > times, do: times, else: min end end) end def split([], q), do: {[], q} def split([h | t], q), do: {t, :queue.in(h, q)} def bsolve_spawn_split(n, an, bn) do heap = an |> Enum.map(&(&1 <<< 12)) |> Enum.into(Heap.new(fn l, r -> l < r end)) bn = bn |> Enum.map(&(div(&1, 2) <<< 12)) current = self() for i <- 0..(n-1) do spawn_link(fn -> {bn1, bn0} = Enum.split(bn, i) send(current, {self(), bfight(heap, bn0, bn1)}) end) end |> Enum.reduce(:infinity, fn pid, min -> receive do {^pid, times} -> if min > times, do: times, else: min end end) end def bsolve_spawn_queue(n, an, bn) do heap = an |> Enum.map(&(&1 <<< 12)) |> Enum.into(Heap.new(fn l, r -> l < r end)) bn = bn |> Enum.map(&(div(&1, 2) <<< 12)) current = self() 1..n |> Enum.reduce({:queue.new, {bn, :queue.new}}, fn _, {pids, {bn, bn_rotate}} -> pid = spawn_link(fn -> send(current, {self(), fight_queue(heap, bn, bn_rotate)}) end) {:queue.in(pid, pids), split(bn, bn_rotate)} end) |> (fn {q, _} -> :queue.to_list(q) end).() |> Enum.reduce(:infinity, fn pid, min -> receive do {^pid, times} -> if min > times, do: times, else: min end end) end end """ defmodule Main do use Bitwise defmodule Heap do defstruct data: nil, size: 0, comparator: nil def new(comparator), do: %__MODULE__{comparator: comparator} def empty?(%__MODULE__{data: nil, size: 0}), do: true def empty?(%__MODULE__{}), do: false def size(%__MODULE__{size: size}), do: size def top(%__MODULE__{data: nil}), do: nil def top(%__MODULE__{data: {v, _}}), do: v def pop(%__MODULE__{data: nil, size: 0} = heap), do: heap def pop(%__MODULE__{data: {_v, queue}, size: n, comparator: comp} = heap), do: %{heap | data: dequeue(queue, comp), size: n - 1} def pop!(%__MODULE__{} = heap), do: {Heap.top(heap), Heap.pop(heap)} def push(%__MODULE__{data: h, size: n, comparator: comp} = heap, v), do: %{heap | data: meld(h, {v, nil}, comp), size: n + 1} defp meld(nil, v, _comp), do: v defp meld(v, nil, _comp), do: v defp meld({v0, q0} = left , {v1, q1} = right, comp) do if comp.(v0, v1), do: {v0, enqueue(q0, right)}, else: {v1, enqueue(q1, left)} end defp enqueue(q, v) defp enqueue(nil, v), do: [v] defp enqueue(q, v), do: [v | q] defp dequeue(nil, _), do: nil defp dequeue([], _), do: nil defp dequeue([q], _), do: q defp dequeue([q0, q1 | q], comp), do: meld(meld(q0, q1, comp), dequeue(q, comp), comp) defimpl Collectable do def into(heap) do { heap, fn heap, {:cont, v} -> Heap.push(heap, v) heap, :done -> heap _heap, :halt -> :ok end } end end defimpl Enumerable do def count(heap), do: {:ok, Heap.size(heap)} def member?(_, _), do: {:error, __MODULE__} def slice(_), do: {:error, __MODULE__} def reduce(_heap, {:halt, acc}, _fun), do: {:halted, acc} def reduce(heap, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(heap, &1, fun)} def reduce(%Heap{data: nil, size: 0}, {:cont, acc}, _fun), do: {:done, acc} def reduce(heap, {:cont, acc}, fun) do reduce(Heap.pop(heap), fun.(Heap.top(heap), acc), fun) end end end def main do n = IO.read(:line) an = IO.read(:line) bn = IO.read(:line) current = self() n = spawn_link(fn -> send(current, {self(), n |> String.trim() |> String.to_integer()}) end) an = spawn_link(fn -> send(current, {self(), an |> String.trim() |> String.split(" ") |> Enum.map(&(String.to_integer(&1) <<< 12)) |> Enum.into(Heap.new(fn l, r -> l < r end)) }) end) bn = spawn_link(fn -> send(current, {self(), bn |> String.trim() |> String.split(" ") |> Enum.map(&(div(String.to_integer(&1), 2) <<< 12)) }) end) n = receive do {^n, v} -> v end heap = receive do {^an, v} -> v end bn = receive do {^bn, v} -> v end for i <- 0..(n-1) do {bn1, bn0} = Enum.split(bn, i) fight(heap, bn0, bn1) end |> Enum.reduce(:infinity, fn times, min -> if min > times, do: times, else: min end) |> IO.puts end def fight(heap, bn0, bn1) def fight(heap, [], []), do: heap |> Enum.max_by(fn v -> v &&& 0xFFF end) |> Bitwise.&&&(0xFFF) def fight(heap, [], bn), do: fight(heap, bn, []) def fight(heap, [b | tail], bn) do {a, heap} = Heap.pop!(heap) heap |> Heap.push(a + b + 1) |> fight(tail, bn) end end """
lib/100/p9.ex
0.529263
0.524699
p9.ex
starcoder
defmodule Liquex do @moduledoc """ Liquid template renderer for Elixir with 100% compatibility with the [Liquid](https://shopify.github.io/liquid/) gem by [Shopify](https://www.shopify.com/). ## Basic Usage iex> {:ok, template_ast} = Liquex.parse("Hello {{ name }}!") iex> {content, _context} = Liquex.render(template_ast, %{"name" => "World"}) iex> content |> to_string() "Hello World!" ## Supported features Currently, all standard Liquid tags, filters, and types are fully supported. Liquex can be considered a byte for byte drop in replacement of the Liquid gem. ## Lazy variables Liquex allows resolver functions for variables that may require some extra work to generate. For example, Shopify has variables for things like available products. Pulling all products every time would be too expensive to do on every render. Instead, it would be better to lazily pull that information as needed. Instead of adding the product list to the context variable map, you can add a function to the variable map. If a function is accessed in the variable map, it is executed. products_resolver = fn _parent -> Product.all() end with {:ok, document} <- Liquex.parse("There are {{ products.size }} products"), {result, _} <- Liquex.render(document, %{products: products_resolver}) do result end iex> "There are 5 products" ## Indifferent access By default, Liquex accesses your maps and structs that may have atom or string (or other type) keys. Liquex will try a string key first. If that fails, it will fall back to using an atom keys. This is similar to how Ruby on Rails handles many of its hashes. This allows you to pass in your structs without having to replace all your keys with string keys. iex> {:ok, template_ast} = Liquex.parse("Hello {{ name }}!") iex> {content, _context} = Liquex.render(template_ast, %{name: "World"}) iex> content |> to_string() "Hello World!" ## Custom filters Liquex contains the full suite of standard Liquid filters, but you may find that there are still filters that you may want to add. Liquex supports adding your own custom filters to the render pipeline. When creating the context for the renderer, set the filter module to your own module. defmodule CustomFilter do # Import all the standard liquid filters use Liquex.Filter def scream(value, _), do: String.upcase(value) <> "!" end context = Liquex.Context.new(%{}, filter_module: CustomFilter) {:ok, template_ast} = Liquex.parse("{{'Hello World' | scream}}" {result, _} = Liquex.render(template_ast, context) result |> to_string() iex> "HELLO WORLD!" ## Custom tags One of the strong points for Liquex is that the tag parser can be extended to support non-standard tags. For example, Liquid used internally for the Shopify site includes a large range of tags that are not supported by the base Ruby gem. These tags could also be added to Liquex by extending the liquid parser. defmodule CustomTag do @moduledoc false @behaviour Liquex.Tag import NimbleParsec @impl true # Parse <<Custom Tag>> def parse() do text = lookahead_not(string(">>")) |> utf8_char([]) |> times(min: 1) |> reduce({Kernel, :to_string, []}) |> tag(:text) ignore(string("<<")) |> optional(text) |> ignore(string(">>")) end @impl true def render(contents, context) do {result, context} = Liquex.render(contents, context) {["Custom Tag: ", result], context} end end defmodule CustomParser do use Liquex.Parser, tags: [CustomTag] end iex> document = Liquex.parse!("<<Hello World!>>", CustomParser) iex> {result, _} = Liquex.render(document, context) iex> result |> to_string() "Custom Tag: Hello World ## Installation Add the package to your `mix.exs` file. def deps do [{:liquex, "~> 0.7"}] end """ alias Liquex.Context @type document_t :: [ {:control_flow, [...]} | {:iteration, [...]} | {:object, [...]} | {:text, iodata} | {:variable, [...]} | {{:custom_tag, module()}, any} ] @spec parse(String.t(), module) :: {:ok, document_t} | {:error, String.t(), pos_integer()} @doc """ Parses a liquid `template` string using the given `parser`. Returns a Liquex AST document or the parser error """ def parse(template, parser \\ Liquex.Parser.Base) do case parser.parse(template) do {:ok, content, _, _, _, _} -> {:ok, content} {:error, reason, _, _, {line, _}, _} -> {:error, reason, line} end end @spec parse!(String.t(), module) :: document_t @doc """ Parses a liquid `template` string using the given `parser`. Returns a Liquex AST document or raises an exception. See also `parse/2` """ def parse!(template, parser \\ Liquex.Parser.Base) do case parse(template, parser) do {:error, reason, line} -> raise Liquex.Error, message: "Liquid parser error: #{reason} - Line #{line}" {:ok, ast} -> ast end end @spec render(document_t, Context.t() | map) :: {iodata, Context.t()} @doc """ Render a Liquex AST `document` with the given `context` """ def render(document, context \\ %Context{}) def render(document, %Context{} = context), do: Liquex.Render.render([], document, context) def render(document, %{} = context), do: render(document, Context.new(context)) end
lib/liquex.ex
0.709422
0.568865
liquex.ex
starcoder
defmodule ExAws.Boto do require ExAws.Boto.Util alias ExAws.Boto.Util, as: Util alias ExAws.Boto.Operation, as: Operation alias ExAws.Boto.Shape, as: Shape @doc """ Loads a service JSON spec from botocore, and generates client modules and objects. Accepts a list of services and API versions to load. ## Examples iex> ExAws.Boto.load(iam: "2010-05-08") :ok iex> ExAws.IAM.Api.list_users |> ExAws.IAM.Client.request() {:ok, [ ... ]} """ @spec load(Keyword.t()) :: Macro.t() defmacro load(slugs) do slugs |> Enum.map(fn {service_atom, api_version} -> "#{service_atom}/#{api_version}" end) |> load_slugs() quote do nil end end @doc """ If you already have a JSON spec file, you can use this to load it directly. """ def generate_client( %{ "version" => _version, "metadata" => %{ "serviceId" => service_id } = _metadata, "operations" => operations_map, "shapes" => shapes_map } = service_json ) do # Code.put_compiler_option(:tracers, [ExAws.Boto.Debug.CompileTracer]) shapes_map |> Enum.each(fn {name, _spec} -> service_json |> Shape.from_service_json(name) end) operations_specs = operations_map |> Enum.map(fn {_name, spec} -> service_json |> Operation.from_service_json(spec) end) operations_specs |> Enum.each(fn op_spec -> op_spec |> Operation.generate_module() |> Code.compile_quoted(op_spec.name) end) api_mod = Util.module_name(service_id, nil) service_json |> generate_api_mod(operations_specs) |> Code.compile_quoted(api_mod |> inspect()) end defp load_slugs(slugs) when is_list(slugs) do slugs |> Enum.each(&load_slug/1) end @deps_path Mix.Project.deps_path() defp load_slug(slug) when is_binary(slug) do base_dir = "#{@deps_path}/botocore/botocore/data/#{slug}" %{ "metadata" => %{ "serviceId" => service_id } } = service = "#{base_dir}/service-2.json" |> load_service_file if function_exported?(ExAws.Boto.Util.module_name(service_id), :__info__, 1) == false do paginators = "#{base_dir}/paginators-1.json" |> load_service_file examples = "#{base_dir}/examples-1.json" |> load_service_file waiters = "#{base_dir}/waiters-2.json" |> load_service_file service |> Map.put("pagination", Map.get(paginators, "pagination", %{})) |> Map.put("examples", Map.get(examples, "examples", %{})) |> Map.put("waiters", Map.get(waiters, "waiters", %{})) |> generate_client end :ok end defp generate_api_mod( %{ "version" => _version, "metadata" => %{ "serviceId" => service_id } = _metadata } = service_json, operations ) do api_mod = Util.module_name(service_id, nil) docs = service_json |> Map.get("documentation") |> ExAws.Boto.DocParser.doc_to_markdown() quote do defmodule unquote(api_mod) do @moduledoc unquote(docs) unquote_splicing( operations |> Enum.map(&Operation.generate_operation/1) ) end end end defp load_service_file(full_path) do full_path |> File.read() |> case do {:ok, contents} -> Jason.decode!(contents) _ -> %{} end end end
lib/ex_aws_boto.ex
0.606498
0.472683
ex_aws_boto.ex
starcoder
defmodule Esperanto.Parser do @moduledoc """ Parser interface """ alias Esperanto.Walker @type tree() :: any() @doc """ Parse given input paremters: * `walker`- Parse is responsible to walk trough input * `tree`- Current AST tree * `parent_id`- id of the parent node * `opts`- id of the parent node returns: A tuple with the new tree and new walker """ @callback parse(walker :: Walker.t(), tree :: tree(), parent_id :: integer(), opts :: keyword()) :: {tree(), Walker.t()} @doc """ Check if parser should handle current walker input paremters: * `walker`- Parse is responsible to walk trough input * `tree`- Current AST tree * `parent_id`- id of the parent node * `opts`- id of the parent node returns: true if the the parse should be used. Note that if two parsers returns true an `AmbigousSyntaxError` is raised """ @callback should_parse( walker :: Walker.t(), tree :: tree(), parent_id :: integer(), opts :: keyword() ) :: boolean() @doc """ Enchant parser returning a new tree paremters: * `tree`- Current AST tree * `tree`- the node being created * `parent_id`- id of the parent node returns: a new enchanted tree """ @callback enchant_parser(tree(), NaryTree.Node.t(), integer()) :: tree() @optional_callbacks enchant_parser: 3 def to_xml(tree, opts \\ []) do opts = Keyword.merge([format: :none], opts) tree |> NaryTree.to_map() |> do_to_xml() |> XmlBuilder.generate(opts) end @spec do_to_xml(nil | maybe_improper_list | map) :: {any, any, [...]} defp do_to_xml(tree_map) do children = case tree_map[:children] do nil -> [] :empty -> [] children -> Enum.map(children, fn child -> do_to_xml(child) end) end {content, attrs} = get_content_and_attr(tree_map[:content]) tag = tree_map[:name] case tag do :empty -> children :p -> case String.trim(content, "\n") do "" -> children content -> {tag, attrs, [content] ++ children} end _ -> {tag, attrs, [content] ++ children} end end defp get_content_and_attr({:empty, attrs}), do: {"", attrs} defp get_content_and_attr({content, attrs}), do: {content, attrs} defp get_content_and_attr(:empty), do: {"", %{}} defp get_content_and_attr(content), do: {content, %{}} end
apps/esperanto/lib/trybe/esperanto/parse.ex
0.82925
0.496765
parse.ex
starcoder
defmodule Rummage.Ecto.CustomHooks.SimpleSort do @moduledoc """ `Rummage.Ecto.CustomHooks.SimpleSort` is a custom sort hook that comes shipped with `Rummage.Ecto`. Usage: For a regular sort: ```elixir alias Rummage.Ecto.CustomHooks.SimpleSort # This returns a queryable which upon running will give a list of `Parent`(s) # sorted by ascending field_1 sorted_queryable = SimpleSort.run(Parent, %{"sort" => "field_1.asc"}) ``` For a case-insensitive sort: ```elixir alias Rummage.Ecto.CustomHooks.SimpleSort # This returns a queryable which upon running will give a list of `Parent`(s) # sorted by ascending case insensitive field_1 # Keep in mind that case insensitive can only be called for text fields sorted_queryable = SimpleSort.run(Parent, %{"sort" => "field_1.asc.ci"}) ``` This module can be used by overriding the default sort module. This can be done in the following ways: In the `Ecto` module: ```elixir Rummage.Ecto.rummage(queryable, rummage, sort: Rummage.Ecto.CustomHooks.SimpleSort) ``` OR Globally for all models in `config.exs` (NOT Recommended): ```elixir config :rummage_ecto, Rummage.Ecto, default_sort: Rummage.Ecto.CustomHooks.SimpleSort ``` """ import Ecto.Query @behaviour Rummage.Ecto.Hook @doc """ Builds a sort `queryable` on top of the given `queryable` from the rummage parameters from the given `rummage` struct. ## Examples When rummage `struct` passed doesn't have the key `"sort"`, it simply returns the `queryable` itself: iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> SimpleSort.run(Parent, %{}) Parent When the `queryable` passed is not just a `struct`: iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> queryable = from u in "parents" #Ecto.Query<from p in "parents"> iex> SimpleSort.run(queryable, %{}) #Ecto.Query<from p in "parents"> When rummage `struct` passed has the key `"sort"`, but with a value of `{}`, `""` or `[]` it simply returns the `queryable` itself: iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> SimpleSort.run(Parent, %{"sort" => {}}) Parent iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> SimpleSort.run(Parent, %{"sort" => ""}) Parent iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> SimpleSort.run(Parent, %{"sort" => []}) Parent When rummage `struct` passed has the key `"sort"`, with `field` and `order` it returns a sorted version of the `queryable` passed in as the argument: iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> rummage = %{"sort" => "field_1.asc"} %{"sort" => "field_1.asc"} iex> queryable = from u in "parents" #Ecto.Query<from p in "parents"> iex> SimpleSort.run(queryable, rummage) #Ecto.Query<from p in "parents", order_by: [asc: p.field_1]> iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> rummage = %{"sort" => "field_1.desc"} %{"sort" => "field_1.desc"} iex> queryable = from u in "parents" #Ecto.Query<from p in "parents"> iex> SimpleSort.run(queryable, rummage) #Ecto.Query<from p in "parents", order_by: [desc: p.field_1]> When no `order` is specified, it returns the `queryable` itself: iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> rummage = %{"sort" => "field_1"} %{"sort" => "field_1"} iex> queryable = from u in "parents" #Ecto.Query<from p in "parents"> iex> SimpleSort.run(queryable, rummage) #Ecto.Query<from p in "parents", order_by: []> When rummage `struct` passed has `case-insensitive` sort, it returns a sorted version of the `queryable` with `case_insensitive` arguments: iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> rummage = %{"sort" => "field_1.asc.ci"} %{"sort" => "field_1.asc.ci"} iex> queryable = from u in "parents" #Ecto.Query<from p in "parents"> iex> SimpleSort.run(queryable, rummage) #Ecto.Query<from p in "parents", order_by: [asc: fragment("lower(?)", ^:field_1)]> iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> import Ecto.Query iex> rummage = %{"sort" => "field_1.desc.ci"} %{"sort" => "field_1.desc.ci"} iex> queryable = from u in "parents" #Ecto.Query<from p in "parents"> iex> SimpleSort.run(queryable, rummage) #Ecto.Query<from p in "parents", order_by: [desc: fragment("lower(?)", ^:field_1)]> """ @spec run(Ecto.Query.t, map) :: {Ecto.Query.t, map} def run(queryable, rummage) do sort_params = Map.get(rummage, "sort") case sort_params do a when a in [nil, [], {}, ""] -> queryable _ -> case Regex.match?(~r/\w.ci+$/, sort_params) do true -> sort_params = sort_params |> String.split(".") |> Enum.drop(-1) |> Enum.join(".") handle_ci_sort(queryable, sort_params) _ -> handle_sort(queryable, sort_params) end end end @doc """ Implementation of `before_hook` for `Rummage.Ecto.CustomHooks.SimpleSort`. This just returns back `rummage` at this point. It doesn't matter what `queryable` or `opts` are, it just returns back `rummage`. ## Examples iex> alias Rummage.Ecto.CustomHooks.SimpleSort iex> SimpleSort.before_hook(Parent, %{}, %{}) %{} """ @spec before_hook(Ecto.Query.t, map, map) :: map def before_hook(_queryable, rummage, _opts), do: rummage defmacrop case_insensitive(field) do quote do fragment("lower(?)", unquote(field)) end end defp handle_sort(queryable, sort_params), do: queryable |> order_by(^consolidate_order_params(sort_params)) defp handle_ci_sort(queryable, sort_params) do order_param = sort_params |> consolidate_order_params |> Enum.at(0) queryable |> order_by([{^elem(order_param, 0), case_insensitive(^elem(order_param, 1))}]) end defp consolidate_order_params(sort_params) do case Regex.match?(~r/\w.asc+$/, sort_params) or Regex.match?(~r/\w.desc+$/, sort_params) do true -> add_order_params([], sort_params) _ -> [] end end defp add_order_params(order_params, unparsed_field) do parsed_field = unparsed_field |> String.split(".") |> Enum.drop(-1) |> Enum.join(".") |> String.to_atom order_type = unparsed_field |> String.split(".") |> Enum.at(-1) |> String.to_atom Keyword.put(order_params, order_type, parsed_field) end end
lib/rummage_ecto/custom_hooks/simple_sort.ex
0.832339
0.850313
simple_sort.ex
starcoder
defmodule Membrane.RTP.SilenceDiscarder do @moduledoc """ Element responsible for dropping silent audio packets. For a packet to be discarded it needs to contain a `RTP.Header.Extension` struct with identifier equal to `vad_id` in its extensions list. The header extension will contain information about audio level (VAD extension is required). The element will only drop packets whose audio level is above given silence threshold (muted audio is of value 127). `#{__MODULE__}` will drop as many silent packets as possible and on reaching dropping limit it will send the current buffer, reset dropped packets counter and emit `Membrane.RTP.DroppedPacketEvent` with a number of packets that have been dropped until that point. The event gets sent on both reaching dropping limit and when a non-silent packet arrives. """ use Membrane.Filter alias Membrane.RTP.{Header, PacketsDiscardedEvent} def_input_pad :input, caps: :any, demand_mode: :auto def_output_pad :output, caps: :any, demand_mode: :auto def_options max_consecutive_drops: [ spec: non_neg_integer() | :infinity, default: 1000, description: """ A number indicating how many consecutive silent packets can be dropped before a single packet will be passed and dropped packet event will we emitted. Passing a single packets once in a while is necessary for element such as jitter buffer or encryptor as they can update their ROCs based on sequence numbers and when we drop to many packets we may roll it over. """ ], silence_threshold: [ spec: 1..127, default: 127, description: """ Audio level threshold that will be compared against incoming packets. Packet will be dropped if its audio level is above or equal to the given threshold. """ ], vad_id: [ spec: 1..14, default: 1, description: """ ID of a VAD extension. """ ] @impl true def handle_init(opts) do {:ok, Map.from_struct(opts) |> Map.put(:dropped, 0)} end @impl true def handle_event(pad, other, ctx, state), do: super(pad, other, ctx, state) @impl true def handle_process( :input, buffer, _ctx, %{dropped: dropped, max_consecutive_drops: max_drops} = state ) when dropped == max_drops do stop_dropping(buffer, state) end @impl true def handle_process(:input, buffer, _ctx, state) do buffer |> Header.Extension.find(state.vad_id) |> handle_vad(buffer, state) end defp handle_vad(nil, buffer, state), do: {{:ok, buffer: {:output, buffer}}, state} defp handle_vad(vad, buffer, state) do %{dropped: dropped, silence_threshold: silence_threshold} = state <<_v::1, audio_level::7>> = vad.data cond do audio_level >= silence_threshold -> {:ok, %{state | dropped: dropped + 1}} dropped > 0 -> stop_dropping(buffer, state) true -> {{:ok, buffer: {:output, buffer}}, state} end end defp stop_dropping(buffer, state) do {{:ok, event: {:output, %PacketsDiscardedEvent{discarded: state.dropped}}, buffer: {:output, buffer}}, %{state | dropped: 0}} end end
lib/membrane/rtp/silence_discarder.ex
0.856512
0.459197
silence_discarder.ex
starcoder
defmodule Analyzer do import SumMag @type asm :: %{args: list(any), operators: list(atom)} @moduledoc """ Provides optimizer for anonymous functions. """ @doc """ Check if expressions can be optimzed. When the expression is enable to optimize, {:ok, map} is returned. The map is shape following: %{args: _, operators: _}. """ @spec supported?(Macro.t()) :: asm def supported?([{:fn, _, [{:->, _, [_arg, expr]}]}]) do supported_expr?(expr) end def supported?({:fn, _, [{:->, _, [_arg, expr]}]}) do supported_expr?(expr) end # Anonymous functions by & def supported?([{:&, _, other}]) do other |> hd |> supported_expr? end def supported?({:&, _, other}) do other |> hd |> supported_expr? end def supported?(other), do: {:error, other} defp supported_expr?({_atom, _, [_left, _right]} = ast) do expr_map = ast |> polynomial_map if verify(expr_map) do {:ok, expr_map} else {:error, ast} end end def polynomial_map(ast) do acc = %{ operators: [], args: [] } Macro.prewalk(ast, acc, &numerical?/2) |> elem(1) end defp operator(:+), do: :+ defp operator(:-), do: :- defp operator(:/), do: :/ defp operator(:*), do: :* defp operator(:rem), do: :rem defp operator(_), do: false defp numerical?({atom, _, [left, right]} = ast, acc) do %{ operators: operators, args: args } = acc operators = case operator(atom) do false -> operators atom -> [atom | operators] end args = args |> listing_literal(right) |> listing_literal(left) ret = %{ operators: operators, args: args } {ast, ret} end defp numerical?(other, acc), do: {other, acc} defp listing_literal(acc, term) do if Macro.quoted_literal?(term) do [term | acc] else case quoted_var?(term) do false -> acc _ -> [term | acc] end end end defp verify(%{operators: operators, args: args}) do if length(operators) != length(args) - 1 do false else true end end end
lib/pelemay/analyzer.ex
0.690246
0.533762
analyzer.ex
starcoder
defmodule Exotus.Upload do @moduledoc """ Handling the lifecycle of a chunked upload. """ use GenStateMachine def start_link(inital_data) do GenStateMachine.start_link(__MODULE__, inital_data) end def append(server, offset, iodata) do GenStateMachine.call(server, {:append, offset, iodata}) end def status(server) do GenServer.call(server, :get_status) end @impl GenStateMachine def init(inital_data) do path = Plug.Upload.random_file!("tus") data = %{ id: Map.fetch!(inital_data, :id), path: path, content_length: Map.fetch!(inital_data, :content_length), upload_offset: 0, metadata: Map.get(inital_data, :metadata, "") } Exotus.Registry.register(data.id) {:ok, :waiting, data, [chunk_timeout(), upload_timeout()]} end # Callbacks @impl GenStateMachine # Handle appending new chunks (only when still waiting) def handle_event({:call, from}, {:append, offset, iodata}, :waiting, data) do with :ok <- match_offset(data.upload_offset, offset), {:ok, new_offset} <- new_offset(data.upload_offset, iodata, data.content_length), :ok <- write_file(data.path, iodata) do if new_offset == data.content_length do {:next_state, :complete, Map.put(data, :upload_offset, new_offset), [{:reply, from, {:ok, new_offset}}, retention_timeout()]} else {:keep_state, Map.put(data, :upload_offset, new_offset), [{:reply, from, {:ok, new_offset}}, chunk_timeout()]} end else :file_size_exceeded -> {:keep_state_and_data, [{:reply, from, {:error, :file_size_exceeded}}, chunk_timeout()]} :file_write_error -> {:keep_state_and_data, [{:reply, from, {:error, :file_write_error}}, chunk_timeout()]} :offset_mismatch -> {:keep_state_and_data, [{:reply, from, {:error, :offset_mismatch}}, chunk_timeout()]} end end # Call :get_id def handle_event({:call, from}, :get_id, _state, data) do {:keep_state_and_data, [{:reply, from, data.id}]} end # Call :get_status def handle_event({:call, from}, :get_status, _state, data) do status = %{ length: data.content_length, offset: data.upload_offset, metadata: data.metadata } {:keep_state_and_data, [{:reply, from, status}]} end # Stop if chunks don't come in within reasonable time def handle_event(:timeout, :stop_waiting, :waiting, _data) do {:stop, :normal} end # Stop if the whole file is not uploaded in within reasonable time def handle_event(:state_timeout, :stop_waiting, :waiting, _data) do {:stop, :normal} end # Stop if file was retained for a certain amount of time def handle_event(:state_timeout, :cleanup, :complete, _data) do {:stop, :normal} end # Timeouts # TODO make configurable defp chunk_timeout do {:timeout, :timer.seconds(10), :stop_waiting} end defp upload_timeout do {:state_timeout, :timer.hours(1), :stop_waiting} end defp retention_timeout do {:state_timeout, :timer.hours(24), :cleanup} end # Helpers defp match_offset(current, append) do if current == append do :ok else :offset_mismatch end end defp new_offset(current, iodata, allowed) do new = current + IO.iodata_length(iodata) if new <= allowed, do: {:ok, new}, else: :file_size_exceeded end defp write_file(file, iodata) do case File.write(file, iodata) do :ok -> :ok _ -> :file_write_error end end end
lib/exotus/upload.ex
0.52902
0.417004
upload.ex
starcoder
defmodule Tox.IsoDays do @moduledoc false @spec add(Calendar.iso_days(), Calendar.iso_days()) :: Calendar.iso_days() def add( {days1, {parts_in_day1, parts_per_day}}, {days2, {parts_in_day2, parts_per_day}} ) do adjust({days1 + days2, {parts_in_day1 + parts_in_day2, parts_per_day}}) end @spec from_datetime(Calendar.datetime()) :: Calendar.iso_days() def from_datetime(datetime), do: from(datetime) @spec from_naive_datetime(Calendar.naive_datetime()) :: Calendar.iso_days() def from_naive_datetime(naive_datetime), do: from(naive_datetime) @spec from_durations_time([Tox.duration()], Calendar.calendar(), non_neg_integer) :: Calendar.iso_days() def from_durations_time(durations, calendar, precision) do # Unlike the other from_* functions, this function can also return negative # values or values greater as parts_per_day for parts_in_day. {0, calendar.time_to_day_fraction( Keyword.get(durations, :hour, 0), Keyword.get(durations, :minute, 0), Keyword.get(durations, :second, 0), { Keyword.get(durations, :microsecond, 0) + Keyword.get(durations, :millisecond, 0) * 1_000, precision } )} end # Helper defp adjust({days, {parts_in_day, parts_per_day}}) when parts_in_day < 0 do quotient = div(parts_in_day, parts_per_day) remainder = rem(parts_in_day, parts_per_day) case remainder == 0 do true -> {days + quotient, {0, parts_per_day}} false -> {days + quotient - 1, {parts_per_day + remainder, parts_per_day}} end end defp adjust({days, {parts_in_day, parts_per_day}}) do {days + div(parts_in_day, parts_per_day), {rem(parts_in_day, parts_per_day), parts_per_day}} end defp from(%{ calendar: calendar, year: year, month: month, day: day, hour: hour, minute: minute, second: second, microsecond: microsecond }) do calendar.naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond) end end
lib/tox/iso_days.ex
0.807916
0.405861
iso_days.ex
starcoder
defmodule PolicrMini.Logger do @moduledoc """ 查询和记录日志。 """ require Logger alias :mnesia, as: Mnesia @type query_cont :: [ {:level, atom | nil}, {:beginning, integer | nil}, {:ending, integer | nil} ] @doc """ 查询已持久化存储的日志。 参数 `query_cont` 表示查询条件,支持以下可选项: - `level`: 日志的级别。例如 `:error` 或 `:warn`。 - `beginning`: 起始时间(时间戳)。 - `ending`: 结束时间(时间戳)。 注意:如果不指定时间区间相关的参数,将返回所有的日志记录,这个数据量可能会很庞大。 """ @spec query(query_cont) :: {:ok, [PolicrMini.Logger.Record.t()]} | {:error, any} def query(cont \\ []) do combiner = fn where, acc -> case where do {_, nil} -> acc {:level, level} -> acc ++ [{:==, :"$1", level}] {:beginning, beginning} -> acc ++ [{:>=, :"$3", beginning}] {:ending, ending} -> acc ++ [{:"=<", :"$3", ending}] end end guards = Enum.reduce(cont, [], combiner) matcher = fn -> Mnesia.select(Log, [{{Log, :_, :"$1", :"$2", :"$3"}, guards, [:"$$"]}]) end case Mnesia.transaction(matcher) do {:atomic, records} -> records = records |> Enum.map(&PolicrMini.Logger.Record.new/1) |> Enum.reverse() {:ok, records} {:aborted, reason} -> {:error, reason} end end @doc """ 输出格式强制统一的错误日志。 ## 参数 - `action`: 执行失败的动作。位于句首,例如 `Message deletion`(消息删除)。 - `details`: 失败的详情。一般是错误的返回值,如要自行定制详情内容推荐使用关键字列表。注意不需要在传递之前调用 `inspect`。 """ @spec unitized_error(String.t(), any) :: :ok def unitized_error(action, details) do error("#{action} failed, details: #{inspect(details)}") end @doc """ 输出格式强制统一的警告日志。 ## 参数 - `message`: 警告消息,语法结构自由,但不要以 `.` 结尾。 - `defails`: 警告的详情。一般是错误的返回值,如要自行定制详情内容推荐使用关键字列表。注意不需要在传递之前调用 `inspect`。 """ @spec unitized_warn(String.t(), any) :: :ok def unitized_warn(message, details) do warn("#{message}, details: #{inspect(details)}") end defdelegate warn(chardata_or_fun, metadata \\ []), to: Logger defdelegate info(chardata_or_fun, metadata \\ []), to: Logger defdelegate error(chardata_or_fun, metadata \\ []), to: Logger defdelegate debug(chardata_or_fun, metadata \\ []), to: Logger defdelegate log(level, chardata_or_fun, metadata \\ []), to: Logger defmodule Record do @moduledoc """ 可查询的单条日志记录的结构。 """ @enforce_keys [:level, :message, :timestamp] defstruct level: nil, message: nil, timestamp: nil @type t :: %__MODULE__{ level: atom, message: String.t(), timestamp: integer } def new([level, message, timestamp]) do %__MODULE__{level: level, message: message, timestamp: timestamp} end end defmodule Backend do @moduledoc """ 自定义的日志后端。 此后端会将日志持久化存储到 Mnesia 中,并可通过 `PolicrMini.Logger.query/1` 函数查询。 """ @behaviour :gen_event alias :mnesia, as: Mnesia def init({__MODULE__, name}) do init_mnesia!(name) {:ok, configure(name, [])} end @spec init_mnesia!(atom) :: :ok defp init_mnesia!(_name) do node_list = PolicrMini.Helper.init_mnesia!() table_results = [ Mnesia.create_table(MnesiaSequence, attributes: [:name, :value], disc_only_copies: node_list ), Mnesia.create_table(Log, attributes: [:id, :level, :message, :timestamp], disc_only_copies: node_list ) ] PolicrMini.Helper.check_mnesia_created_table!(table_results) Mnesia.wait_for_tables([MnesiaSequence, Log], 5000) :ok end @spec increment(atom) :: integer defp increment(name) do Mnesia.dirty_update_counter(MnesiaSequence, name, 1) end defp configure(name, []) do base_level = Application.get_env(:logger, name)[:level] || :debug Application.get_env(:logger, name, []) |> Enum.into(%{name: name, level: base_level}) end def dirty_write(level, msg, ts) when is_binary(msg) do {{year, month, day}, {hour, minute, second, _msec}} = ts ts = {{year, month, day}, {hour, minute, second}} |> NaiveDateTime.from_erl!() # 注意:此处的实现表示日志必须使用 UTC 时间 |> DateTime.from_naive!("Etc/UTC") |> DateTime.to_unix() Mnesia.dirty_write({Log, increment(Log), level, msg, ts}) end def handle_event(:flush, state) do {:ok, state} end # 持久化存储字符串日志消息。 def handle_event({level, _gl, {Logger, msg, ts, _md}}, %{level: min_level} = state) when is_binary(msg) do if right_log_level?(min_level, level) do dirty_write(level, msg, ts) end {:ok, state} end def handle_event(_, state), do: {:ok, state} def handle_call({:configure, opts}, %{name: name} = state) do {:ok, :ok, configure(name, opts, state)} end defp configure(_name, [level: new_level], state) do Map.merge(state, %{level: new_level}) end defp configure(_name, _opts, state), do: state defp right_log_level?(min_level, level) do Logger.compare_levels(level, min_level) != :lt end end end
lib/policr_mini/logger.ex
0.680985
0.57517
logger.ex
starcoder
defmodule Rummage.Ecto.Schema.Macro do @moduledoc """ Usage: ```elixir defmodule MyApp.Rummage.MyModel do use Rummage.Schema, paginate: MyApp.Rummage.Paginate, sort: MyApp.Rummage.MyModel.Sort, search: MyApp.Rummage.MyModel.Search, schema: MyApp.MyModel end ``` """ defmacro __using__(opts) do paginate = Keyword.fetch!(opts, :paginate) sort = Keyword.fetch!(opts, :sort) search = Keyword.fetch!(opts, :search) schema = Keyword.fetch!(opts, :schema) repo = Keyword.get(opts, :repo, Rummage.Ecto.Config.repo()) quote location: :keep do use Ecto.Schema import Ecto.Changeset import Ecto.Query, warn: false @primary_key false embedded_schema do embeds_one(:paginate, unquote(paginate)) embeds_one(:search, unquote(search)) embeds_one(:sort, unquote(sort)) field(:params, :map) field(:changeset, :map) end def changeset(nil), do: changeset(struct(__MODULE__), %{}) def changeset(attrs), do: changeset(struct(__MODULE__), attrs) def changeset(rummage_schema, attrs) do attrs = Map.put_new(attrs, "paginate", %{}) attrs = Map.put_new(attrs, "search", %{}) attrs = Map.put_new(attrs, "sort", %{}) rummage_schema |> cast(attrs, []) |> cast_embed(:paginate) |> cast_embed(:search) |> cast_embed(:sort) end def rummage(params, opts \\ []) do query = Keyword.get(opts, :query, unquote(schema)) changeset = changeset(params) rummage = apply_changes(changeset) # changest - For use w/ 'search' form rummage = Map.put(rummage, :changeset, changeset) {query, rummage} = query |> search(rummage) |> sort(rummage) |> paginate(rummage) query = case Keyword.get(opts, :preload) do nil -> query preload -> from(a in query, preload: ^preload) end records = unquote(repo).all(query) paginate_params = if rummage.paginate do %{page: rummage.paginate.page, per_page: rummage.paginate.per_page} else nil end search_params = if rummage.search do Map.from_struct(rummage.search) else nil end sort_params = if rummage.sort do Map.from_struct(rummage.sort) else nil end # params - For use w/ sort and paginate links... rummage = Map.put(rummage, :params, %{ paginate: paginate_params, search: search_params, sort: sort_params }) {rummage, records} end # Note: rummage.paginate is modified - it gets a total_count defp paginate(query, %{paginate: paginate} = rummage) do {query, paginate} = unquote(paginate).rummage(query, paginate) {query, Map.put(rummage, :paginate, paginate)} end defp search(query, %{search: search}) do unquote(search).rummage(query, search) end defp sort(query, %{sort: sort}) do unquote(sort).rummage(query, sort) end end end end
lib/rummage_ecto/schema/macro.ex
0.574753
0.528168
macro.ex
starcoder
defmodule Mix.Tasks.Akd.Gen.Task do @shortdoc ~w(Generates an Akd.Mix.Task which can be used to deploy an app) @tsk ~s(mix akd.gen.task) @info """ #{@tsk} expects both module name and optional parameters: $ `@{tsk} TaskModule -f FetcherModule` Usage: $ `@{tsk} Deploy -f Akd.Fetcher.Git` Options: Option Alias Description -------------------------------------------------------------------------- --fetcher -f Expects a fetcher hook module. Defaults to `Akd.Fetcher.Git`. Native Fetchers include: `Akd.Fetcher.Git` and `Akd.Fetcher.Scp` --initer -i Expects an initer hook module. Defaults to `Akd.Initer.Distillery`. Native Fetchers include: `Akd.Fetcher.Distillery` --builder -b Expects a builder hook module. Defaults to `Akd.Builder.Distillery`. Native Fetchers include: `Akd.Builder.Distillery` and `Akd.Builder.Docker` --publisher -p Expects a publisher hook module. Defaults to `Akd.Initer.Distillery`. Native Fetchers include: `Akd.Publisher.Distillery` and `Akd.Publisher.Docker` --with-phx -w Generates phoenix hooks alongside base books """ @moduledoc """ This task generates a mix task which can be used to deploy an app. Please refer to `Akd.Mix.Task` for more details. ## Info: #{@info} """ use Mix.Task @switches [fetcher: :string, initer: :string, builder: :string, publisher: :string, with_phx: :boolean] @aliases [f: :fetcher, i: :initer, b: :builder, p: :publisher, w: :with_phx] @errs %{ umbrella: "task `#{@tsk}` can only be run inside an application directory", task: "task already exists. Please pick a new name", args: "Invalid arguments" } @doc """ Runs the mix task to generate the task module. """ def run(args) do if Mix.Project.umbrella?(), do: info_raise @errs.umbrella generate(args) end # Generates the task module with args defp generate(args) do {task_opts, parsed, _} = OptionParser.parse(args, switches: @switches, aliases: @aliases) parsed |> validate_parsed!() |> Akd.Generator.Task.gen(task_opts) end # Validates parsed arguments, expects there to be a name defp validate_parsed!([name | tail]) do mod = "Mix.Tasks.Akd." <> name if Enum.member?(Mix.Task.load_all(), mod), do: info_raise @errs.task [mod | tail] end # Raise error if no name is given defp validate_parsed!(_) do info_raise @errs.args end # Raise with info defp info_raise(message) do Mix.raise """ #{message} #{@info} """ end end
lib/akd/mix/gen/task.ex
0.689828
0.432543
task.ex
starcoder
defmodule Aecore.Channel.ChannelTransaction do @moduledoc """ Behaviour specifying the necessary functions which any onchain/offchain transaction modifying the offchain chainstate must implement. """ alias Aecore.Chain.Identifier alias Aecore.Channel.{ChannelOffChainUpdate, ChannelOffChainTx, ChannelStateOnChain} alias Aecore.Channel.Tx.{ChannelCreateTx, ChannelWithdrawTx, ChannelDepositTx} alias Aecore.Keys alias Aecore.Tx.{DataTx, SignedTx} @typedoc """ Data structures capable of mutating the offchain chainstate off an state channel """ @type channel_tx :: DataTx.t() | ChannelOffChainTx.t() | ChannelCreateTx.t() | ChannelWithdrawTx.t() | ChannelDepositTx.t() @typedoc """ Type of a signed channel transaction """ @type signed_tx :: SignedTx.t() | ChannelOffChainTx.t() @typedoc """ Types of allowed OnChain transactions """ @type onchain_tx :: ChannelCreateTx | ChannelWithdrawTx | ChannelDepositTx @typedoc """ Payloads of allowed OnChain transactions """ @type onchain_tx_payload :: ChannelCreateTx.payload() | ChannelWithdrawTx.payload() | ChannelDepositTx.payload() @allowed_onchain_tx [ ChannelCreateTx, ChannelWithdrawTx, ChannelDepositTx ] @typedoc """ The type of errors returned by the functions in this module """ @type error :: {:error, String.t()} @doc """ Get a list of offchain updates to the offchain chainstate """ @callback offchain_updates(signed_tx() | DataTx.t()) :: list(ChannelOffChainUpdate.update_types()) @doc """ Preprocess checks for an incoming half signed transaction. This callback should check if the transaction is not malicious (for instance transfer updates should validate if the transfer is in the correct direction). """ @spec half_signed_preprocess_check(signed_tx(), map()) :: :ok | error() def half_signed_preprocess_check(tx, opts) do tx |> offchain_updates |> do_half_signed_preprocess_check(opts) end @spec do_half_signed_preprocess_check(list(ChannelOffChainUpdate.update_types()), map()) :: :ok | error() defp do_half_signed_preprocess_check([update | rest], opts) do case ChannelOffChainUpdate.half_signed_preprocess_check(update, opts) do :ok -> do_half_signed_preprocess_check(rest, opts) {:error, _} = err -> err end end defp do_half_signed_preprocess_check([], _) do :ok end @doc """ Verifies if the provided signed transaction was signed by the provided pubkey. Fails when the transaction was signed by more keys than expected. """ @spec verify_half_signed_tx(signed_tx(), Keys.pubkey()) :: boolean() def verify_half_signed_tx( %SignedTx{ data: %DataTx{ type: type, senders: [%Identifier{value: initiator}, %Identifier{value: responder}] }, signatures: signatures } = tx, pubkey ) when type in @allowed_onchain_tx do (pubkey == initiator or pubkey == responder) and length(signatures) == 1 and SignedTx.signature_valid_for?(tx, pubkey) end def verify_half_signed_tx( %SignedTx{ data: %DataTx{ type: type, senders: [] }, signatures: signatures } = tx, pubkey ) when type in @allowed_onchain_tx do type.chainstate_senders?() and length(signatures) == 1 and SignedTx.signature_valid_for?(tx, pubkey) end def verify_half_signed_tx(%SignedTx{}, _) do false end def verify_half_signed_tx(%ChannelOffChainTx{signatures: {_, <<>>}} = tx, pubkey) do ChannelOffChainTx.verify_signature_for_key(tx, pubkey) end def verify_half_signed_tx(%ChannelOffChainTx{}, _) do false end @doc """ Verifies if the transaction was signed by both of the provided parties. """ @spec verify_fully_signed_tx(signed_tx(), tuple()) :: boolean() def verify_fully_signed_tx( %SignedTx{ data: %DataTx{ type: type, senders: [%Identifier{value: initiator}, %Identifier{value: responder}] } } = tx, {correct_initiator, correct_responder} ) when type in @allowed_onchain_tx do initiator == correct_initiator and responder == correct_responder and SignedTx.signatures_valid?(tx, [initiator, responder]) end def verify_fully_signed_tx( %SignedTx{ data: %DataTx{ type: type, senders: [] } } = tx, {correct_initiator, correct_responder} ) when type in @allowed_onchain_tx do type.chainstate_senders?() and SignedTx.signatures_valid?(tx, [correct_initiator, correct_responder]) end def verify_fully_signed_tx(%ChannelOffChainTx{} = tx, pubkeys) do ChannelOffChainTx.verify_signatures(tx, pubkeys) end @doc """ Helper function for signing a channel transaction """ @spec add_signature(signed_tx() | DataTx.t(), Keys.sign_priv_key()) :: {:ok, signed_tx()} | error() def add_signature(%SignedTx{data: %DataTx{type: type}} = tx, privkey) when type in @allowed_onchain_tx do SignedTx.sign_tx(tx, privkey) end def add_signature(%DataTx{type: type} = tx, privkey) when type in @allowed_onchain_tx do SignedTx.sign_tx(tx, privkey) end def add_signature(%ChannelOffChainTx{} = tx, privkey) do ChannelOffChainTx.sign(tx, privkey) end @doc """ Retrieves the unsigned payload from a signed/unsigned channel transaction """ @spec unsigned_payload(signed_tx() | channel_tx()) :: channel_tx() def unsigned_payload(%SignedTx{data: data_tx}) do unsigned_payload(data_tx) end def unsigned_payload(%DataTx{type: type, payload: payload}) when type in @allowed_onchain_tx do payload end def unsigned_payload(%ChannelOffChainTx{} = tx) do tx end @doc """ Converts the transaction to a form suitable for initializing the payload in ChannelSoloCloseTx, ChannelSlashTx and ChannelSnapshotSoloTx """ @spec dispute_payload(signed_tx()) :: ChannelOffChainTx.t() | :empty def dispute_payload(%ChannelOffChainTx{} = tx) do tx end def dispute_payload(%SignedTx{data: %DataTx{type: type}}) when type in @allowed_onchain_tx do :empty end @doc """ Specifies whether the effect of the transaction on the channel offchain state is instant. If it's not then after receiving the Tx the channel is locked until the Tx was mined and min_depth confirmations were made """ @spec requires_onchain_confirmation?(signed_tx()) :: boolean() def requires_onchain_confirmation?(%ChannelOffChainTx{}) do false end def requires_onchain_confirmation?(%SignedTx{data: %DataTx{type: type}}) when type in @allowed_onchain_tx do true end @doc """ Sequence of the state after applying the transaction to the chainstate. """ @spec sequence(signed_tx() | DataTx.t()) :: non_neg_integer() def sequence(%SignedTx{data: data_tx}) do sequence(data_tx) end def sequence(%DataTx{type: ChannelCreateTx}) do 1 end def sequence(tx) do unsigned_payload(tx).sequence end @doc """ Channel id for which the transaction is designated. """ @spec channel_id(signed_tx() | DataTx.t()) :: binary() def channel_id(%SignedTx{data: data_tx}) do channel_id(data_tx) end def channel_id(%DataTx{type: ChannelCreateTx} = data_tx) do ChannelStateOnChain.id(data_tx) end def channel_id(tx) do unsigned_payload(tx).channel_id end @doc """ Sets the sequence of the offchain state after applying the channel transaction to the state channel """ @spec set_sequence(channel_tx(), non_neg_integer()) :: channel_tx() def set_sequence(%DataTx{type: type} = data_tx, _sequence) when type === Aecore.Channel.Tx.ChannelCreateTx do data_tx end def set_sequence(%DataTx{type: type, payload: payload} = data_tx, sequence) when type in @allowed_onchain_tx and type !== Aecore.Channel.Tx.ChannelCreateTx do # Maybe consider doing proper dispatching here? %DataTx{data_tx | payload: Map.put(payload, :sequence, sequence)} end def set_sequence(%ChannelOffChainTx{} = tx, sequence) do %ChannelOffChainTx{tx | sequence: sequence} end @doc """ Sets the state hash of the offchain chainstate after the transaction is applied to the state channel """ @spec set_state_hash(channel_tx(), binary()) :: channel_tx() def set_state_hash(%DataTx{type: type, payload: payload} = data_tx, state_hash) when type in @allowed_onchain_tx do # Maybe consider doing proper dispatching here? %DataTx{data_tx | payload: Map.put(payload, :state_hash, state_hash)} end def set_state_hash(%ChannelOffChainTx{} = tx, state_hash) do %ChannelOffChainTx{tx | state_hash: state_hash} end @doc """ Get a list of updates to the offchain chainstate """ @spec offchain_updates(signed_tx() | DataTx.t()) :: list(ChannelOffChainUpdate.update_types()) def offchain_updates(tx) do structure = unsigned_payload(tx) structure.__struct__.offchain_updates(tx) end end
apps/aecore/lib/aecore/channel/channel_transaction.ex
0.904756
0.445831
channel_transaction.ex
starcoder
defmodule SpadesGame.GameAI.Play do @moduledoc """ Functions for the AI figuring out which card to play. """ alias SpadesGame.{Card, Deck, Game, TrickCard} alias SpadesGame.GameAI.PlayInfo @spec play(Game.t()) :: Card.t() def play(%Game{turn: turn, trick: trick} = game) when turn != nil do {:ok, valid_cards} = Game.valid_cards(game, turn) info = %PlayInfo{ hand: Game.hand(game, turn), valid_cards: valid_cards, trick: trick, me_nil: player_nil?(game, turn), partner_nil: player_nil?(game, Game.partner(turn)), left_nil: player_nil?(game, Game.rotate(turn)), right_nil: player_nil?(game, turn |> Game.partner() |> Game.rotate()), partner_winning: partner_winning?(game) } # true -> raise "play: don't understand this trick" case length(trick) do 0 -> play_pos1(info) 1 -> play_pos2(info) 2 -> play_pos3(info) 3 -> play_pos4(info) end end def partner_winning?(%Game{trick: trick}) do winner_index = Game.trick_winner_index(trick) is_pos3 = length(trick) == 2 is_pos4 = length(trick) == 3 is_pos1_winning = winner_index == 0 is_pos2_winning = winner_index == 1 (is_pos3 and is_pos1_winning) or (is_pos4 and is_pos2_winning) end @spec player_nil?(Game.t(), :west | :north | :east | :south) :: boolean def player_nil?(game, turn) do game[turn].bid == 0 end # play_pos1: What card should we play if we are starting a trick? @spec play_pos1(PlayInfo.t()) :: Card.t() def play_pos1(%PlayInfo{ valid_cards: valid_cards, me_nil: me_nil, partner_nil: partner_nil, left_nil: left_nil, right_nil: right_nil }) do {best_card, worst_card} = empty_trick_best_worst(valid_cards) cond do me_nil or left_nil or right_nil -> worst_card partner_nil -> best_card best_card.rank == 14 -> # Cash an ace best_card true -> worst_card end end # Given a list of cards, if we are starting a new trick, what are the best # and worst cards? Example: Worst card = 2 of clubs, Best card = Ace of spades. @spec empty_trick_best_worst(Deck.t()) :: {Card.t(), Card.t()} def empty_trick_best_worst(valid_cards) when length(valid_cards) > 0 do priority_map = priority_map([]) sorted_cards = valid_cards |> Enum.map(fn %Card{rank: rank, suit: suit} = card -> val = rank + priority_map[suit] {card, val} end) |> Enum.sort_by(fn {_card, val} -> val end) |> Enum.map(fn {card, _val} -> card end) worst_card = List.first(sorted_cards) best_card = List.last(sorted_cards) {best_card, worst_card} end # play_pos2: What card should we play if we are the second person playing in a trick? @spec play_pos2(PlayInfo.t()) :: Card.t() def play_pos2(%PlayInfo{me_nil: me_nil, partner_nil: partner_nil} = info) do options = card_options(info.trick, info.valid_cards) to_play = cond do me_nil -> [ options.best_loser, options.worst_winner ] partner_nil -> [ options.best_winner, options.worst_loser ] true -> [ options.worst_winner, options.worst_loser ] end first_non_nil(to_play ++ [Enum.random(info.valid_cards)]) end # play_pos3: What card should we play if we are the third person playing in a trick? @spec play_pos3(PlayInfo.t()) :: Card.t() def play_pos3( %PlayInfo{me_nil: me_nil, partner_nil: partner_nil, partner_winning: partner_winning} = info ) do options = card_options(info.trick, info.valid_cards) # Wants to know: Is my partner winning? to_play = cond do me_nil -> [ options.best_loser, options.worst_winner ] partner_nil -> [ options.worst_winner, options.worst_loser ] partner_winning -> [ options.worst_loser, options.worst_winner ] true -> [ options.worst_winner, options.worst_loser ] end first_non_nil(to_play ++ [Enum.random(info.valid_cards)]) end # play_pos4: What card should we play if we are the fourth person playing in a trick? @spec play_pos4(PlayInfo.t()) :: Card.t() def play_pos4( %PlayInfo{me_nil: me_nil, partner_winning: partner_winning, partner_nil: partner_nil} = info ) do options = card_options(info.trick, info.valid_cards) to_play = cond do me_nil -> [ options.best_loser, options.worst_winner ] partner_winning and not partner_nil -> # Pos 4: If my partner is winning, I don't have to win it unless they're going nil [ options.worst_loser, options.worst_winner ] true -> [ options.worst_winner, options.worst_loser ] end first_non_nil(to_play ++ [Enum.random(info.valid_cards)]) end @spec first_non_nil(list(any)) :: any def first_non_nil(list) when length(list) > 0 do list |> Enum.filter(fn x -> x != nil end) |> List.first() end @spec card_options(list(TrickCard.t()), Deck.t()) :: map def card_options([], _valid_cards) do %{ worst_winner: nil, best_winner: nil, worst_loser: nil, best_loser: nil } end def card_options(trick, valid_cards) when length(trick) > 0 do priority_map = priority_map(trick) trick_max = trick_max(trick) sort_cards = valid_cards |> Enum.map(fn %Card{rank: rank, suit: suit} = card -> val = rank + priority_map[suit] {card, val} end) |> Enum.sort_by(fn {_card, val} -> val end) winners = sort_cards |> Enum.filter(fn {_card, val} -> val >= trick_max end) |> Enum.map(fn {card, _val} -> card end) losers = sort_cards |> Enum.filter(fn {_card, val} -> val < trick_max end) |> Enum.map(fn {card, _val} -> card end) %{ worst_winner: List.first(winners), best_winner: List.last(winners), worst_loser: List.first(losers), best_loser: List.last(losers) } end # trick_max/1 # Given a trick with at least one card, what's the current value of the winner? # value = rank + suit priority @spec trick_max(list(TrickCard.t())) :: non_neg_integer def trick_max(trick) when length(trick) > 0 do priority_map = priority_map(trick) trick |> Enum.map(fn %TrickCard{card: %Card{rank: rank, suit: suit}} -> rank + priority_map[suit] end) |> Enum.max() end def trick_max([]), do: 0 # priority_map/1 # Given a trick, what is the priority of each suit? # This is the mechanism we use to track following suit, # ruffing and trumping. See game.ex. @spec priority_map(list(TrickCard.t())) :: map def priority_map(trick) when length(trick) > 0 do List.last(trick).card.suit |> Game.suit_priority() end def priority_map([]) do %{s: 200, h: 100, c: 100, d: 100} end end ## Misc notes - Can be deleted later # Game # turn: nil | :west | :north | :east | :south, # west: GamePlayer.t(), # north: GamePlayer.t(), # east: GamePlayer.t(), # south: GamePlayer.t(), # %GamePlayer{ # hand: Deck.new_empty(), # tricks_won: 0, # bid: nil # } # valid_cards: [ # %SpadesGame.Card{rank: 12, suit: :s}, # %SpadesGame.Card{rank: 2, suit: :s} # ] # In a trick, First card = last in list by convention # game.trick: list(TrickCard.t()), # @type t :: %TrickCard{card: Card.t(), seat: :north | :east | :west | :south} # Info Example # %{ # hand: [ # %SpadesGame.Card{rank: 12, suit: :s}, # %SpadesGame.Card{rank: 7, suit: :c}, # %SpadesGame.Card{rank: 2, suit: :s} # ], # trick: [ # %SpadesGame.TrickCard{ # card: %SpadesGame.Card{rank: 5, suit: :s}, # seat: :east # }, # %SpadesGame.TrickCard{ # card: %SpadesGame.Card{rank: 3, suit: :s}, # seat: :north # } # ], # valid_cards: [ # %SpadesGame.Card{rank: 12, suit: :s}, # %SpadesGame.Card{rank: 2, suit: :s} # ] # } # Trick example 1 # [ # %SpadesGame.TrickCard{card: %SpadesGame.Card{rank: 9, suit: :d}, seat: :north}, # %SpadesGame.TrickCard{card: %SpadesGame.Card{rank: 2, suit: :d}, seat: :west} # ] # Trick example 2 # Ace was played first # [ # %SpadesGame.TrickCard{card: %SpadesGame.Card{rank: 2, suit: :c}, seat: :east}, # %SpadesGame.TrickCard{ # card: %SpadesGame.Card{rank: 14, suit: :c}, # seat: :north # } # ]
backend/lib/spades_game/ai/game_ai_play.ex
0.804367
0.564219
game_ai_play.ex
starcoder
defmodule MapTransform do @moduledoc """ This is a simple library that can transform one map into another through mapping rules. """ @type path :: nonempty_list(term) @type mapping :: {path, path} | {path, path, (term -> term)} @doc """ Transform one map into another. ## Format for mappings For the paths is the standard that `get_in/2` and `put_in/2` use. For example to get data from `c` in `%{a: %{b: %{c: 1}}}` you would provide `[:a, :b, :c]` as the path. Then we'll use these paths in the mapping in a list of tuples where: {from_path, to_path} {from_path, to_path, &transform_function/1} ## Example Basic iex> mapping = [ ...> {[:a, :b, :c], [:abc]} ...> ] ...> source = %{a: %{b: %{c: 1}}} ...> transform(source, mapping) %{abc: 1} String keys and using a transform function iex> mapping = [ ...> {["a", "b", "c"], [:abc], &String.to_integer/1} ...> ] ...> source = %{"a" => %{"b" => %{"c" => "1"}}} ...> transform(source, mapping) %{abc: 1} Any nesting iex> mapping = [ ...> {[:a, :b, :c], [:foo, :bar]} ...> ] ...> source = %{a: %{b: %{c: 1}}} ...> transform(source, mapping) %{foo: %{bar: 1}} """ @spec transform(map, [mapping]) :: map def transform(source, mapping) do base_map = base_map_from_mapping(mapping) mapping |> Enum.reduce(base_map, &do_transform(&1, &2, source)) end defp do_transform({from_path, to_path}, acc, source) do do_transform({from_path, to_path, & &1}, acc, source) end defp do_transform({from_path, to_path, function}, acc, source) do put_in(acc, to_path, source |> get_in(from_path) |> function.()) end defp base_map_from_mapping(mapping) do mapping |> Enum.map(&elem(&1, 1)) |> base_map() end defp base_map(paths) do paths |> Enum.reduce(%{}, &do_base_map/2) end defp do_base_map([], _acc) do nil end defp do_base_map([last], acc) do Map.put(acc, last, nil) end defp do_base_map([key | rest], acc) do Map.put(acc, key, do_base_map(rest, %{})) end end
lib/map_transform.ex
0.850422
0.648383
map_transform.ex
starcoder
defmodule HtPipe do @moduledoc """ `HtPipe`: Macro for the Heavy Task Pipeline operator. """ @sub_elixir_alive_time 100_000 @doc """ Starts a task that can be awaited on with being supervised, and temporarily blocks the caller process waiting for a task reply with shutdown. The task won't be linked to the caller. Returns `{:ok, reply}` if the reply is received, `nil` if no reply has arrived, or `{:exit, reason}` if the task has already exited. Keep in mind that normally a task failure also causes the process owning the task to exit. Therefore this function can return `{:exit, reason}` if at least one of the conditions below apply: * the task process exited with the reason `:normal` * the task isn't linked to the caller due to being supervised by this function * the caller is trapping exits This function assumes the task's monitor is still active or the monitor's `:DOWN` message is in the message queue. If it has been demonitored or the message already received, this function will wait for the duration of the timeout awaiting the message. Raises an error if `Task.Supervisor` has reached the maximum number of children. Note this function requires the task supervisor to have `:temporary` as the `:restart` option (the default), as this function keeps a direct reference to the task which is lost if the task is restarted. ## Options * `:timeout` - a timeout, in milliseconds or `:infinity`, can be given with a default value of `5000`. If the time runs out before a message from the task is received, this function will return `nil` and the monitor will remain active. Therefore this function can be called multiple times on the same task. * `:spawn` - the spawn strategy, may be `:inner` (the default) or `:os`. `:inner` means to spawn a light-weight process monitored by `Task.Supervisor`. `:os` means to spawn an os-level process using `Port` and `Node`, which is robust against situations where the entire Erlang VM terminates abnormally, for example a NIF abort, but it reduces the efficiency of parameter passing. """ @spec htp(fun(), keyword()) :: {:ok, any()} | {:exit | any()} | nil def htp(f, options \\ []) when is_function(f) do timeout = options |> Keyword.get(:timeout, 5000) spawn = options |> Keyword.get(:spawn, :inner) htp_p(f, timeout, spawn) end defp htp_p(f, timeout, :inner) do task = Task.Supervisor.async_nolink(HtPipe.TaskSupervisor, f) Task.yield(task, timeout) || Task.shutdown(task) end defp htp_p(f, timeout, :os) do if spawn_sub_elixir() do Node.spawn(htp_worker(), __MODULE__, :worker, [self(), timeout, f]) Process.sleep(100) result = receive do e -> e after timeout -> nil end result else nil end end @doc """ Spawns Elixir sub os process with `Node` setting. This process will terminate after `@sub_elixr_alive_time` msec. """ @spec spawn_sub_elixir() :: true | false def spawn_sub_elixir() do unless Node.alive?() do # TODO set up Node end unless wait_for_connect_htp_worker(1000) do spawn_sub_elixir_sub() end wait_for_connect_htp_worker(1000) end @doc false def spawn_sub_elixir_sub() do Task.async(fn -> spawn_sub_elixir_sub_sub() |> case do { message, 1 } -> if String.match?( message, ~r/Protocol 'inet_tcp': the name #{htp_worker_name()} seems to be in use by another Erlang node\r\n/ ) do kill_htp_worker() Process.sleep(100) spawn_sub_elixir_sub() else raise RuntimeError, "unknown return value #{message}" end other -> other end end) end @doc false def spawn_sub_elixir_sub_sub() do System.cmd( "elixir", [ "--name", htp_worker_name(), "--cookie", Node.get_cookie() |> Atom.to_string(), "-S", "mix", "run", "-e", "Process.sleep(#{@sub_elixir_alive_time})" ], stderr_to_stdout: true ) end @doc """ Kills the htp workers. """ @spec kill_htp_worker() :: :ok def kill_htp_worker() do System.cmd("ps", ["auxww"]) |> elem(0) |> String.split("\n") |> Enum.filter(&String.match?(&1, ~r/#{htp_worker_name()}/)) |> Enum.map(fn str -> Regex.named_captures(~r/[A-z0-9]+[ ]+(?<target>[0-9]+)/, str) end) |> Enum.map(fn %{"target" => id} -> System.cmd("kill", [id], stderr_to_stdout: true) end) :ok end @doc false @spec worker(pid(), non_neg_integer() | atom(), fun()) :: {:ok, any()} | {:exit | any()} | nil def worker(receiver, timeout, f) do send(receiver, HtPipe.htp(f, timeout: timeout, spawn: :inner)) end @doc """ Waits for and tests the connection between the self process and the worker of the sub elixir process. A timeout, in milliseconds can be given with a default value of `1000`. """ @spec wait_for_connect_htp_worker(integer) :: true | false def wait_for_connect_htp_worker(timeout \\ 1000) def wait_for_connect_htp_worker(timeout) when timeout > 0 do case {Node.connect(htp_worker()), Node.ping(htp_worker())} do {true, :pong} -> true _ -> Process.sleep(100) wait_for_connect_htp_worker(timeout - 100) end end def wait_for_connect_htp_worker(_), do: false @doc """ Halts the worker of the sub elixir process. """ @spec halt_htp_worker() :: :ok def halt_htp_worker() do case Node.ping(htp_worker()) do :pong -> Node.spawn(htp_worker(), &System.halt/0) :ok :pang -> :ok end end @doc """ Gets the node of the worker. """ @spec htp_worker() :: atom() def htp_worker() do [sname, hostname] = Node.self() |> get_listname_from_nodename() :"htp_worker_#{sname}@#{hostname}" end @spec htp_worker_name() :: String.t() def htp_worker_name() do htp_worker() |> Atom.to_string() end defp get_listname_from_nodename(node_name) do node_name |> Atom.to_string() |> String.split("@") end @doc """ Gets the short name of the node. """ @spec get_sname_from_nodename(atom()) :: String.t() def get_sname_from_nodename(node_name) do node_name |> get_listname_from_nodename() |> Enum.at(0) end @doc """ Gets the host name of the node. """ @spec get_hostname_from_nodename(atom()) :: String.t() def get_hostname_from_nodename(node_name) do node_name |> get_listname_from_nodename() |> Enum.at(1) end end
lib/ht_pipe.ex
0.70304
0.558207
ht_pipe.ex
starcoder
defmodule Auctoritas.DataStorage.Data do alias Auctoritas.DataStorage.Data @typedoc "Token expiration in seconds" @type expiration() :: non_neg_integer() @typedoc "When was token inserted (UNIX Epoch time)" @type inserted_at() :: non_neg_integer() @typedoc "When was token updated (UNIX Epoch time)" @type updated_at() :: non_neg_integer() @typedoc "Refresh token" @type token() :: String.t() @type metadata() :: %{ inserted_at: inserted_at(), updated_at: updated_at(), expires_in: expiration() } @derive Jason.Encoder @enforce_keys [:data, :refresh_token, :metadata] defstruct [:data, :refresh_token, :metadata] @typedoc """ Data struct with data and metadata maps * data is data associated when inserting token into data_storage * metadata contains inserted_at, updated_at, expires_in time inserted when using `get_token_data` function from data_storage """ @type t :: %__MODULE__{ data: map(), refresh_token: token() | nil, metadata: metadata() } @spec new(data_map :: map()) :: %__MODULE__{} def new(data_map) when is_map(data_map) do struct(__MODULE__, data_map) end @spec new(data :: map(), expiration :: expiration()) :: %__MODULE__{} def new(data, expiration) when is_map(data) and is_number(expiration) do new(%{data: data, metadata: initial_metadata(expiration)}) end @spec new(data :: map(), refresh_token :: token(), expiration :: expiration()) :: %__MODULE__{} def new(data, refresh_token, expiration) when is_map(data) and is_number(expiration) do new(%{data: data, refresh_token: refresh_token, metadata: initial_metadata(expiration)}) end @spec get_data(data :: %__MODULE__{}) :: map() def get_data(%__MODULE__{} = data) do data.data end @spec get_metadata(data :: %__MODULE__{}) :: map() def get_metadata(%__MODULE__{} = data) do data.metadata end @spec get_refresh_token(data :: %__MODULE__{}) :: token() def get_refresh_token(%__MODULE__{} = data) do data.refresh_token end @spec update_data(data :: %__MODULE__{}, data :: map()) :: %__MODULE__{} def update_data(%__MODULE__{} = data, new_data) when is_map(new_data) do data |> update_metadata(%{ updated_at: System.system_time(:second) }) |> Map.put(:data, Map.merge(data.data, new_data)) end @spec update_metadata(data :: %__MODULE__{}, new_metadata :: map()) :: %__MODULE__{} def update_metadata(%__MODULE__{} = data, new_metadata) when is_map(new_metadata) do Map.put(data, :metadata, Map.merge(data.metadata, new_metadata)) end @spec add_expiration(data :: %__MODULE__{}, expiration :: expiration()) :: %__MODULE__{} def add_expiration(%__MODULE__{} = data, expiration) when is_number(expiration) do data |> update_metadata(%{expires_in: expiration}) end @spec initial_metadata(expiration :: expiration()) :: metadata() def initial_metadata(expiration) do %{ inserted_at: System.system_time(:second), updated_at: System.system_time(:second), expires_in: expiration } end def encode(%__MODULE__{} = data) do Jason.encode(data) end def decode(data_json) when is_bitstring(data_json) do case Jason.decode(data_json, keys: :atoms) do {:ok, data_map} -> {:ok, Data.new(data_map)} {:error, error} -> {:error, error} end end end
lib/auctoritas/data_storage/data.ex
0.831827
0.441793
data.ex
starcoder
defmodule Period.Relationship do @moduledoc """ Does work with relationships between two `Period`'s. """ @typep relationship :: :same | :contains | :contained | :before | :after | :insertsect_start | :intersect_end | :abut_left | :abut_right @doc """ Determine if the first period is before the second one. Being before the other period means having no overlap, but the periods might directly abut each other. ## Examples iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-21 10:29:12]) iex> b = Period.from_naive!(~N[2017-11-22 14:32:21], ~N[2017-11-23 10:29:12]) iex> Period.Relationship.is_before?(a, b) true """ @spec is_before?(Period.t(), Period.t()) :: boolean @spec is_before?(Period.t(), Period.t(), keyword) :: boolean def is_before?(%Period{} = a, %Period{} = b, opts \\ []) do case period_relationship(a, b, opts) do :before -> true :abut_right -> true _ -> false end end @doc """ Determine if the first period is after the second one. Being after the other period means having no overlap, but the periods might directly abut each other. ## Examples iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-21 10:29:12]) iex> b = Period.from_naive!(~N[2017-11-22 14:32:21], ~N[2017-11-23 10:29:12]) iex> Period.Relationship.is_after?(b, a) true """ @spec is_after?(Period.t(), Period.t()) :: boolean @spec is_after?(Period.t(), Period.t(), keyword) :: boolean def is_after?(%Period{} = a, %Period{} = b, opts \\ []) do case period_relationship(a, b, opts) do :after -> true :abut_left -> true _ -> false end end @doc """ Determine if the periods abut each other. Being about means having no overlap, but having no gap: ### End exclusive - Start inclusive ```markdown … . . .) [. . . … ``` ### End inclusive - Start exclusive ```markdown … . . .] (. . . … ``` ### Both inclusive ```markdown … . .] [. . . … ``` ## Examples iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-21 10:29:12]) iex> b = Period.from_naive!(~N[2017-11-21 10:29:12], ~N[2017-11-23 10:29:12]) iex> Period.Relationship.abut?(a, b) && Period.Relationship.abut?(b, a) true """ @spec abut?(Period.t(), Period.t()) :: boolean @spec abut?(Period.t(), Period.t(), keyword) :: boolean def abut?(%Period{} = a, %Period{} = b, opts \\ []) do is_abutted_left?(a, b, opts) || is_abutted_right?(a, b, opts) end @doc """ Determine if the first period is abutted on it's left side by the second period. For details on what `abut` means, see: `abut?/3`. ## Examples iex> a = Period.from_naive!(~N[2017-11-21 10:29:12], ~N[2017-11-23 10:29:12]) iex> b = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-21 10:29:12]) iex> Period.Relationship.is_abutted_left?(a, b) true """ @spec is_abutted_left?(Period.t(), Period.t()) :: boolean @spec is_abutted_left?(Period.t(), Period.t(), keyword) :: boolean def is_abutted_left?(%Period{} = a, %Period{} = b, opts \\ []) do :abut_left == period_relationship(a, b, opts) end @doc """ Determine if the first period is abutted on it's right side by the second period. For details on what `abut` means, see: `abut?/3`. ## Examples iex> a = Period.from_naive!(~N[2017-11-21 10:29:12], ~N[2017-11-23 10:29:12]) iex> b = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-21 10:29:12]) iex> Period.Relationship.is_abutted_right?(b, a) true """ @spec is_abutted_right?(Period.t(), Period.t()) :: boolean @spec is_abutted_right?(Period.t(), Period.t(), keyword) :: boolean def is_abutted_right?(%Period{} = a, %Period{} = b, opts \\ []) do :abut_right == period_relationship(a, b, opts) end @doc """ Determine if the first period overlaps the second one. Overlaping means having being at least one common point: ### End exclusive - Start inclusive ```markdown … . . .) [. . . . … ``` ### End inclusive - Start exclusive ```markdown … . . .] (. . . . … ``` ### Both inclusive ```markdown … . .] [. . . . … ``` ### Both exclusive ```markdown … . . . .) (. . . . … ``` ## Examples iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21]) iex> b = Period.from_naive!(~N[2017-11-21 10:29:12], ~N[2017-11-23 10:29:12]) iex> Period.Relationship.overlaps?(a, b) true """ @spec overlaps?(Period.t(), Period.t()) :: boolean @spec overlaps?(Period.t(), Period.t(), keyword) :: boolean def overlaps?(%Period{} = a, %Period{} = b, opts \\ []) do case period_relationship(a, b, opts) do :intersect_start -> true :intersect_end -> true :contains -> true :contained -> true _ -> false end end @doc """ Determine if the first period contains the second one. Containment means having being at least the same period as the second one, but overlaping it on at least one side: ### Overlap both ends ```markdown [. . . . . . .] [. . . . .] ``` ### Overlap start ```markdown [. . . . . . .] [. . . . . .] ``` ### Overlap end ```markdown [. . . . . . .] [. . . . . .] ``` ## Examples iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21]) iex> b = Period.from_naive!(~N[2017-11-20 18:32:21], ~N[2017-11-22 07:32:21]) iex> Period.Relationship.contains?(a, b) true """ @spec contains?(Period.t(), Period.t()) :: boolean @spec contains?(Period.t(), Period.t(), keyword) :: boolean def contains?(%Period{} = a, %Period{} = b, opts \\ []) do :contains == period_relationship(a, b, opts) end @doc """ Determine if the first period is contained by the second one. For details on what `contained` means, see: `contains?/3`. ``` ## Examples iex> a = Period.from_naive!(~N[2017-11-20 18:32:21], ~N[2017-11-22 07:32:21]) iex> b = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21]) iex> Period.Relationship.is_contained_by?(a, b) true """ @spec is_contained_by?(Period.t(), Period.t()) :: boolean @spec is_contained_by?(Period.t(), Period.t(), keyword) :: boolean def is_contained_by?(%Period{} = a, %Period{} = b, opts \\ []) do contains?(b, a, opts) end @doc """ Determine if both periods span the same time. ## Non-strict comparison ### Simple ```markdown [. . . . .] [. . . . .] ``` ### Exclusive overlap start ```markdown (. . . . . . .] [. . . . . .] ``` ### Exclusive overlap end ```markdown [. . . . . . .) [. . . . . .] ``` ## Strict comparison For strict comparison timespan and boundry states need to be the same. ## Examples iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21]) iex> b = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21]) iex> Period.Relationship.same?(a, b) true iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21]) iex> b = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21]) iex> Period.Relationship.same?(a, b, strict: true) true iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21.000001]) iex> b = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21], upper_state: :included) iex> Period.Relationship.same?(a, b) true iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21.000001]) iex> b = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21], upper_state: :included) iex> Period.Relationship.same?(a, b, strict: true) false """ @spec same?(Period.t(), Period.t()) :: boolean @spec same?(Period.t(), Period.t(), keyword) :: boolean def same?(%Period{} = a, %Period{} = b, opts \\ []) do with :same <- period_relationship(a, b, opts) do if Keyword.get(opts, :strict, false) do Period.get_boundry_notation(a) == Period.get_boundry_notation(b) else true end else _ -> false end end @doc """ If both periods overlap returns a new period of the intersection of both. For details on what `overlapping` means, see: `overlaps?/3`. ## Examples iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-22 14:32:21]) iex> b = Period.from_naive!(~N[2017-11-21 10:29:12], ~N[2017-11-23 10:29:12]) iex> {:ok, period} = Period.Relationship.intersection(a, b) iex> period #Period<[#DateTime<2017-11-21 10:29:12.000000Z>, #DateTime<2017-11-22 14:32:21.000000Z>)> """ @spec intersection(Period.t(), Period.t()) :: {:ok, Period.t()} | {:error, binary} @spec intersection(Period.t(), Period.t(), keyword) :: {:ok, Period.t()} | {:error, binary} def intersection(%Period{} = a, %Period{} = b, opts \\ []) do case period_relationship(a, b, opts) do :intersect_start -> opts = [ lower_state: a.lower_state, upper_state: b.upper_state ] Period.new(a.lower, b.upper, opts) :intersect_end -> opts = [ lower_state: b.lower_state, upper_state: a.upper_state ] Period.new(b.lower, a.upper, opts) :contains -> {:ok, a} :contained -> {:ok, b} _ -> {:error, "Periods do not intersect"} end end @doc """ If both periods do not overlap or abut it returns a new period of the gap between both. For details on what `overlapping` means, see: `overlaps?/3`. For details on what `abut` means, see: `abut?/3`. ## Examples iex> a = Period.from_naive!(~N[2017-11-20 14:32:21], ~N[2017-11-21 10:29:12]) iex> b = Period.from_naive!(~N[2017-11-22 14:32:21], ~N[2017-11-23 10:29:12]) iex> Period.Relationship.gap(a, b) iex> {:ok, period} = Period.Relationship.gap(a, b) iex> period #Period<[#DateTime<2017-11-21 10:29:12.000000Z>, #DateTime<2017-11-22 14:32:21.000000Z>)> """ @spec gap(Period.t(), Period.t()) :: {:ok, Period.t()} | {:error, binary} @spec gap(Period.t(), Period.t(), keyword) :: {:ok, Period.t()} | {:error, binary} def gap(%Period{} = a, %Period{} = b, opts \\ []) do with false <- overlaps?(a, b, opts), false <- abut?(a, b, opts) do [a, b] = Enum.sort([a, b], &is_before?/2) opts = [ lower_state: invert_inclusion(a.upper_state), upper_state: invert_inclusion(b.lower_state) ] Period.new(a.upper, b.lower, opts) else true -> {:error, "Periods intersect or abut each other."} end end @spec invert_inclusion(Period.boundry_state()) :: Period.boundry_state() defp invert_inclusion(:included), do: :excluded defp invert_inclusion(:excluded), do: :included @spec period_relationship(Period.t(), Period.t(), Keyword.t()) :: relationship defp period_relationship(%Period{} = a, %Period{} = b, _opts) do a = Period.make_inclusive(a) b = Period.make_inclusive(b) cond do a.lower == b.lower && a.upper == b.upper -> :same a.lower <= b.lower && a.upper >= b.upper -> :contains a.lower >= b.lower && a.upper <= b.upper -> :contained a.lower < b.lower && a.upper in b.lower..b.upper -> :intersect_end a.upper > b.upper && a.lower in b.lower..b.upper -> :intersect_start a.upper + 1 == b.lower -> :abut_right a.lower - 1 == b.upper -> :abut_left a.upper < b.lower -> :before a.lower > b.upper -> :after end end end
lib/period/relationship.ex
0.946138
0.892516
relationship.ex
starcoder
defmodule Couch.Test.Setup do @moduledoc """ Allows to chain setup functions. Example of using: ``` alias Couch,Test.Utils def with_db_name(context, setup) do setup = setup |> Step.Start.new(:start, extra_apps: [:chttpd]) |> Step.User.new(:admin, roles: [:server_admin]) |> Setup.run() context = Map.merge(context, %{ db_name: Utils.random_name("db") base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url(), user: setup |> Setup.get(:admin) |> Step.User.name() }) {context, setup} end @tag setup: &__MODULE__.with_db_name/2 test "Create", %{db_name: db_name, user: user} do ... end ``` """ import ExUnit.Callbacks, only: [on_exit: 1] import ExUnit.Assertions, only: [assert: 2] require Logger alias Couch.Test.Setup alias Couch.Test.Setup.Step defstruct stages: [], by_type: %{}, state: %{} def step(%Setup{stages: stages} = setup, id, step) do %{setup | stages: [{id, step} | stages]} end defp setup_step({id, step}, %Setup{state: state, by_type: by_type} = setup) do %module{} = step # credo:disable-for-next-line Credo.Check.Warning.LazyLogging Logger.debug("Calling 'setup/2' for '#{module}'") step = module.setup(setup, step) state = Map.put(state, id, step) by_type = Map.update(by_type, module, [id], fn ids -> [id | ids] end) on_exit(fn -> # credo:disable-for-next-line Credo.Check.Warning.LazyLogging Logger.debug("Calling 'teardown/3' for '#{module}'") try do module.teardown(setup, step) :ok catch _ -> :ok _, _ -> :ok end end) {{id, step}, %{setup | state: state, by_type: by_type}} end def run(%Setup{stages: stages} = setup) do {stages, setup} = stages |> Enum.reverse |> Enum.map_reduce(setup, &setup_step/2) %{setup | stages: stages} end def setup(ctx) do Map.get(ctx, :__setup) end def setup(ctx, setup_fun) do setup = %Setup{} |> Step.Config.new(:test_config, config_file: nil) {ctx, setup} = setup_fun.(ctx, setup) assert not Map.has_key?(ctx, :__setup), "Key `__setup` is reserved for internal purposes" Map.put(ctx, :__setup, setup) end def completed?(%Setup{by_type: by_type}, step) do Map.has_key?(by_type, step) end def all_for(%Setup{by_type: by_type, state: state}, step_module) do Map.take(state, by_type[step_module] || []) end def reduce_for(setup, step_module, acc, fun) do Enum.reduce(all_for(setup, step_module), acc, fun) end def get(%Setup{state: state}, id) do state[id] end end
test/elixir/lib/setup.ex
0.621541
0.71852
setup.ex
starcoder
defmodule Tensorflow.GraphTransferInfo.Destination do @moduledoc false use Protobuf, enum: true, syntax: :proto3 @type t :: integer | :NOP | :HEXAGON field(:NOP, 0) field(:HEXAGON, 1) end defmodule Tensorflow.GraphTransferNodeInput do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ node_id: integer, output_port: integer } defstruct [:node_id, :output_port] field(:node_id, 1, type: :int32) field(:output_port, 2, type: :int32) end defmodule Tensorflow.GraphTransferNodeInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ name: String.t(), node_id: integer, type_name: String.t(), soc_op_id: integer, padding_id: integer, input_count: integer, output_count: integer } defstruct [ :name, :node_id, :type_name, :soc_op_id, :padding_id, :input_count, :output_count ] field(:name, 1, type: :string) field(:node_id, 2, type: :int32) field(:type_name, 3, type: :string) field(:soc_op_id, 4, type: :int32) field(:padding_id, 5, type: :int32) field(:input_count, 6, type: :int32) field(:output_count, 7, type: :int32) end defmodule Tensorflow.GraphTransferConstNodeInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ name: String.t(), node_id: integer, shape: [integer], data: binary, dtype: Tensorflow.DataType.t() } defstruct [:name, :node_id, :shape, :data, :dtype] field(:name, 1, type: :string) field(:node_id, 2, type: :int32) field(:shape, 3, repeated: true, type: :int64) field(:data, 4, type: :bytes) field(:dtype, 5, type: Tensorflow.DataType, enum: true) end defmodule Tensorflow.GraphTransferNodeInputInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ node_id: integer, node_input: [Tensorflow.GraphTransferNodeInput.t()] } defstruct [:node_id, :node_input] field(:node_id, 1, type: :int32) field(:node_input, 2, repeated: true, type: Tensorflow.GraphTransferNodeInput ) end defmodule Tensorflow.GraphTransferNodeOutputInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ node_id: integer, max_byte_size: [integer] } defstruct [:node_id, :max_byte_size] field(:node_id, 1, type: :int32) field(:max_byte_size, 2, repeated: true, type: :int32) end defmodule Tensorflow.GraphTransferGraphInputNodeInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ name: String.t(), shape: [integer], dtype: Tensorflow.DataType.t() } defstruct [:name, :shape, :dtype] field(:name, 1, type: :string) field(:shape, 2, repeated: true, type: :int64) field(:dtype, 3, type: Tensorflow.DataType, enum: true) end defmodule Tensorflow.GraphTransferGraphOutputNodeInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ name: String.t(), shape: [integer], dtype: Tensorflow.DataType.t() } defstruct [:name, :shape, :dtype] field(:name, 1, type: :string) field(:shape, 2, repeated: true, type: :int64) field(:dtype, 3, type: Tensorflow.DataType, enum: true) end defmodule Tensorflow.GraphTransferInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ node_info: [Tensorflow.GraphTransferNodeInfo.t()], const_node_info: [Tensorflow.GraphTransferConstNodeInfo.t()], node_input_info: [Tensorflow.GraphTransferNodeInputInfo.t()], node_output_info: [Tensorflow.GraphTransferNodeOutputInfo.t()], graph_input_node_info: [ Tensorflow.GraphTransferGraphInputNodeInfo.t() ], graph_output_node_info: [ Tensorflow.GraphTransferGraphOutputNodeInfo.t() ], destination: Tensorflow.GraphTransferInfo.Destination.t() } defstruct [ :node_info, :const_node_info, :node_input_info, :node_output_info, :graph_input_node_info, :graph_output_node_info, :destination ] field(:node_info, 1, repeated: true, type: Tensorflow.GraphTransferNodeInfo) field(:const_node_info, 2, repeated: true, type: Tensorflow.GraphTransferConstNodeInfo ) field(:node_input_info, 3, repeated: true, type: Tensorflow.GraphTransferNodeInputInfo ) field(:node_output_info, 4, repeated: true, type: Tensorflow.GraphTransferNodeOutputInfo ) field(:graph_input_node_info, 5, repeated: true, type: Tensorflow.GraphTransferGraphInputNodeInfo ) field(:graph_output_node_info, 6, repeated: true, type: Tensorflow.GraphTransferGraphOutputNodeInfo ) field(:destination, 7, type: Tensorflow.GraphTransferInfo.Destination, enum: true ) end
lib/tensorflow/core/framework/graph_transfer_info.pb.ex
0.776919
0.523786
graph_transfer_info.pb.ex
starcoder
defmodule Notoriety.Note.Meta do @moduledoc false defstruct title: nil, tags: [] end defmodule Notoriety.Note do @moduledoc """ A `Note` is the focal point of Notoriety, representing a markdown file with any tags parsed out of yaml front matter. """ alias Notoriety.Note.Meta @doc """ A parsed `Note` is a set of its contents and metadata to be created and manipulated via the `Notoriety.Note` module. """ defstruct meta: %Meta{}, text: nil @doc """ Construct a new `Note` using the given options. Options: * `:text` (required) - the contents of the note without the front matter * `:tags` - any tags for the note * `:title` - the specific title for the note, computed from the text if not given """ def new(opts) do text = Keyword.fetch!(opts, :text) tags = Keyword.get(opts, :tags, []) |> List.wrap() title = Keyword.get_lazy(opts, :title, get_title(text)) %__MODULE__{ meta: %Meta{ title: title, tags: tags }, text: text } end defp get_title(text) do fn -> text |> String.split("\n", trim: true) |> List.first() |> String.replace(~r/#+\s+/, "") end end # TODO(adam): compute missing title on the fly instead of at construction? @doc """ Return the `Note`'s title. """ def title(%__MODULE__{meta: meta}), do: meta.title @doc """ Return the `Note`'s text. """ def text(%__MODULE__{text: text}), do: text @doc """ Return the `Note`'s tags. """ def tags(%__MODULE__{meta: meta}), do: meta.tags @doc """ Check if the note has the given tag. """ def has_tag?(%__MODULE__{meta: meta}, tag), do: Enum.member?(meta.tags, tag) @doc """ Parse a markdown file into a `Note`, extracting any tags given in the front matter if available. Currently returns a `Note` in _all_ cases; if the front matter fails to parse, it is simply included as a part of the text instead of being trimmed. """ def parse(raw) do case YamlFrontMatter.parse(raw) do {:ok, front_matter, body} -> tags = Map.get(front_matter, "tags", []) |> List.wrap() new(text: body, tags: tags) # TODO(adam): consider checking for attempted front matter {:error, :invalid_front_matter} -> new(text: raw) end end end
lib/notoriety/note.ex
0.680666
0.527073
note.ex
starcoder
defmodule Exfacebook do use GenServer use Exfacebook.Macros @moduledoc ~S""" Exfacebook implements Graph Api: * `Exfacebook.Api` - graph calls using access token to Facebook Graph API, depends on respnse it returns decoded to JSON values. * `Exfacebook.Config` - specify `api_version` and http requests for hackney. Configuration example(optional variables): config :exfacebook, api_version: "v2.6", http_options: [recv_timeout: :infinity], id: "your_app_id_optional", secret: "your_app_secret_optiona" How to use API? ## Examples: * `start_link` - if you want to have worker you can start Exfacebook GenServer and use `pid` as entry param for API methods: ```elixir {:ok, pid} = Exfacebook.start_link ``` * `get_object` - get user or page related attributes, in case if you decide to use specific params for Facebook API like `fields` ```elixir {:ok, %{"id" => id, "picture" => picture}} = Exfacebook.get_object( pid, :me, %{access_token: "access-token", fields: "id, picture"}) ``` * `get_connections` - get collection related items and attributes(feed or home or friends): ```elixir {:ok, %{"data" => collection}} = response = Exfacebook.get_connections( pid, :feed, %{fields: "id, name", access_token: "access-token"}) ``` * `next_page`/`prev_page` - take next or prev collections using response from `get_connections`: ```elixir response = Exfacebook.get_connections(pid, :feed, %{fields: "id, name", access_token: "access-token"}) response2 = Exfacebook.next_page(pid, response) response3 = Exfacebook.next_page(pid, response2) response4 = Exfacebook.prev_page(pid, response3) ``` * `put_connections` - update actions in facebook, example creates the new message in feed: ```elixir Exfacebook.put_connections(:me, :feed, %{access_token: "access-token"}, %{message: "hello"}) ``` """ def start_link(options \\ []) do GenServer.start_link(__MODULE__, [], options) end define_api :get_object, :get, [id, params] define_api :get_connections, :get, [id, name, params] define_api :next_page, :get, [response] define_api :prev_page, :get, [response] define_api :put_connections, :post, [id, name, params, body] @doc """ Realtime updates using subscriptions API ## Examples: * `list_subscriptions` - returns list of subscriptions ```elixir params = %{fields: "id,name"} {:ok, %{ "data" => [ %{"active" => true, "callback_url" => "https://example.com/client/subscriptions", "fields" => ["feed", "friends", "music"], "object" => "user"}] } } = Exfacebook.Api.list_subscriptions(params) ``` * `subscribe` - subscribe to real time updates for `object`, `fields` should contains object to watch for updates("feed, friends"). ```elixir Exfacebook.Api.subscribe("id-1", "friends, feed", "http://www.example.com/facebook/updates", "token-123") ``` * `unsubscribe` - unsubscribe `object` from real time updates. ```elixir Exfacebook.Api.unsubscribe("id-1") ``` """ define_api :list_subscriptions, :get, [params], [batch: false] define_api :subscribe, :post, [object, fields, callback_url, verify_token], [batch: false] define_api :unsubscribe, :post, [object], [batch: false] @doc ~S""" You can use `delete_object` and `delete_connections` passing pid or directly from Api module. In case of missing permissions to delete items you will error object as response. ## Examples: * `delete_connections` - delete item from connections ```elixir {:ok, response} = Exfacebook.Api.delete_connections(:me, :feed, %{ ... }) ``` * `delete_object` - delete item from Facebook data ```elixir {:ok, response} = Exfacebook.Api.delete_object("item-id") ``` """ define_api :delete_object, :delete, [id, params] define_api :delete_connections, :delete, [id, name, params] @doc ~S""" API for easy accessing basic Facebook API functions like get avatar image, put comment, image, video or make wall post. ## Examples: * `put_picture` - upload new photo to `id` feed ```elixir {:ok, response} = Exfacebook.Api.put_picture(:me, params, "/path/file.jpg") {:ok, response} = Exfacebook.Api.put_picture(:me, params, {:url, "http://www.example.com/file.jpg"}) ``` * `put_video` - upload new video to `id` feed ```elixir {:ok, response} = Exfacebook.Api.put_video(:me, params, "/path/file.mp4") {:ok, response} = Exfacebook.Api.put_video(:me, params, {:url, "http://www.example.com/file.mp4"}) ``` """ define_api :get_picture_data, :get, [id, params] define_api :put_picture, :post, [id, params, file], [batch: false] define_api :put_video, :post, [id, params, file], [batch: false] define_api :put_comment, :post, [id, params, message] define_api :put_wall_post, :post, [id, message, params, attachment] @doc ~S""" Make likes or delete this action for specific post: ## Examples: * `put_like` - like object, params should include `access_token` ```elixir {:ok, response} = Exfacebook.Api.put_like(:me, params) ``` * `delete_like` - unlike object, params should include `access_token` ```elixir {:ok, response} = Exfacebook.Api.delete_like(:me, params) ``` """ define_api :put_like, :post, [id, params] define_api :delete_like, :delete, [id, params] @doc ~S""" Fetches an access token with extended expiration time (ignoring expiration and other info). """ define_api :exchange_access_token, :get, [access_token], [batch: false] @doc ~S""" Fetches an access_token with extended expiration time, along with any other information provided by Facebook. See https://developers.facebook.com/docs/offline-access-deprecation/#extend_token (search for fb_exchange_token). """ define_api :exchange_access_token_info, :get, [access_token], [batch: false] @doc ~S""" Passing prepared params for batch processing using Facebook API. Params are coming like normal requests encoded to JSON and then Facebook emulate requests on their side: """ def batch(params, callback) do callback.([]) |> Api.batch(params) end def batch(callback), do: batch(%{}, callback) end
lib/exfacebook.ex
0.857753
0.673672
exfacebook.ex
starcoder
defmodule JSON do @moduledoc """ Provides a RFC 7159, ECMA 404, and JSONTestSuite compliant JSON Encoder / Decoder """ require Logger import JSON.Logger alias JSON.Decoder alias JSON.Encoder @vsn "1.0.2" @doc """ Returns a JSON string representation of the Elixir term ## Examples iex> JSON.encode([result: "this will be a JSON result"]) {:ok, "{\\\"result\\\":\\\"this will be a JSON result\\\"}"} """ @spec encode(term) :: {atom, bitstring} defdelegate encode(term), to: Encoder @doc """ Returns a JSON string representation of the Elixir term, raises errors when something bad happens ## Examples iex> JSON.encode!([result: "this will be a JSON result"]) "{\\\"result\\\":\\\"this will be a JSON result\\\"}" """ @spec encode!(term) :: bitstring def encode!(term) do case encode(term) do {:ok, value} -> value {:error, error_info} -> raise JSON.Encoder.Error, error_info: error_info _ -> raise JSON.Encoder.Error end end @doc """ Converts a valid JSON string into an Elixir term ## Examples iex> JSON.decode("{\\\"result\\\":\\\"this will be an Elixir result\\\"}") {:ok, Enum.into([{"result", "this will be an Elixir result"}], Map.new)} """ @spec decode(bitstring) :: {atom, term} @spec decode(charlist) :: {atom, term} defdelegate decode(bitstring_or_char_list), to: Decoder @doc """ Converts a valid JSON string into an Elixir term, raises errors when something bad happens ## Examples iex> JSON.decode!("{\\\"result\\\":\\\"this will be an Elixir result\\\"}") Enum.into([{"result", "this will be an Elixir result"}], Map.new) """ @spec decode!(bitstring) :: term @spec decode!(charlist) :: term def decode!(bitstring_or_char_list) do case decode(bitstring_or_char_list) do {:ok, value} -> log(:debug, fn -> "#{__MODULE__}.decode!(#{inspect(bitstring_or_char_list)}} was sucesfull: #{ inspect(value) }" end) value {:error, {:unexpected_token, tok}} -> log(:debug, fn -> "#{__MODULE__}.decode!(#{inspect(bitstring_or_char_list)}} unexpected token #{tok}" end) raise JSON.Decoder.UnexpectedTokenError, token: tok {:error, :unexpected_end_of_buffer} -> log(:debug, fn -> "#{__MODULE__}.decode!(#{inspect(bitstring_or_char_list)}} end of buffer" end) raise JSON.Decoder.UnexpectedEndOfBufferError e -> log(:debug, fn -> "#{__MODULE__}.decode!(#{inspect(bitstring_or_char_list)}} an unknown problem occurred #{ inspect(e) }" end) end end end
node_modules/@snyk/snyk-hex-plugin/elixirsrc/deps/json/lib/json.ex
0.890669
0.519399
json.ex
starcoder
defmodule APDS9960.Proximity do @moduledoc "The proximity detection." alias APDS9960.{Comm, Sensor} @doc """ Returns all the current proximity settings. """ @spec settings(Sensor.t()) :: %{ enabled: boolean, gain: 0..3, gain_compensation: %{enabled: boolean, mask: byte}, interrupt_enabled: boolean, interrupt_persistence: byte, led_boost: 0..3, led_drive: 0..3, offset: %{down_left: integer, up_right: integer}, pulse: %{count: byte, length: 0..3}, saturation_interrupt_enabled: boolean, threshold: %{high: byte, low: byte} } def settings(%Sensor{} = sensor) do %{ enabled: enabled?(sensor), interrupt_enabled: interrupt_enabled?(sensor), threshold: get_threshold(sensor), interrupt_persistence: get_interrupt_persistence(sensor), pulse: get_pulse(sensor), gain: get_gain(sensor), led_drive: get_led_drive(sensor), saturation_interrupt_enabled: saturation_interrupt_enabled?(sensor), led_boost: get_led_boost(sensor), offset: get_offset(sensor), gain_compensation: get_gain_compensation(sensor) } end ## Proximity Enable @spec enabled?(Sensor.t()) :: boolean def enabled?(%Sensor{transport: i2c}) do {:ok, %{proximity: value}} = Comm.get_enable(i2c) value == 1 end @spec enable(Sensor.t(), 0 | 1) :: :ok def enable(%Sensor{transport: i2c}, value \\ 1) do Comm.set_enable(i2c, proximity: value) end @spec interrupt_enabled?(Sensor.t()) :: boolean def interrupt_enabled?(%Sensor{transport: i2c}) do {:ok, %{proximity_interrupt: value}} = Comm.get_enable(i2c) value == 1 end @spec enable_interrupt(Sensor.t(), 0 | 1) :: :ok def enable_interrupt(%Sensor{transport: i2c}, value \\ 1) do Comm.set_enable(i2c, proximity_interrupt: value) end ## Proximity low/high threshold @spec get_threshold(Sensor.t()) :: %{high: byte, low: byte} def get_threshold(%Sensor{transport: i2c}) do {:ok, x} = Comm.get_proximity_threshold(i2c) %{low: x.low, high: x.high} end @spec set_threshold(Sensor.t(), Enum.t()) :: :ok def set_threshold(%Sensor{transport: i2c}, opts) do Comm.set_proximity_threshold(i2c, opts) end ## Proximity Interrupt Persistence @spec get_interrupt_persistence(Sensor.t()) :: 0..15 def get_interrupt_persistence(%Sensor{transport: i2c}) do {:ok, %{proximity: value}} = Comm.get_interrupt_persistence(i2c) value end @spec set_interrupt_persistence(Sensor.t(), 0..15) :: :ok def set_interrupt_persistence(%Sensor{transport: i2c}, value) do Comm.set_interrupt_persistence(i2c, proximity: value) end ## Proximity pulse count and length @spec get_pulse(Sensor.t()) :: %{count: byte, length: 0..3} def get_pulse(%Sensor{transport: i2c}) do {:ok, x} = Comm.get_proximity_pulse(i2c) %{count: x.pulse_count, length: x.pulse_length} end @spec set_pulse(Sensor.t(), Enum.t()) :: :ok def set_pulse(%Sensor{transport: i2c}, opts) do Comm.set_proximity_pulse(i2c, opts) end ## Proximity Gain Control @spec get_gain(Sensor.t()) :: 0..3 def get_gain(%Sensor{transport: i2c}) do {:ok, x} = Comm.get_control(i2c) x.proximity_gain end @spec set_gain(Sensor.t(), 0..3) :: :ok def set_gain(%Sensor{transport: i2c}, value) do Comm.set_control(i2c, proximity_gain: value) end ## LED Drive Strength @spec get_led_drive(Sensor.t()) :: 0..3 def get_led_drive(%Sensor{transport: i2c}) do {:ok, x} = Comm.get_control(i2c) x.led_drive_strength end @spec set_led_drive(Sensor.t(), 0..3) :: :ok def set_led_drive(%Sensor{transport: i2c}, value) do Comm.set_control(i2c, led_drive_strength: value) end ## Proximity Saturation Interrupt Enable @spec saturation_interrupt_enabled?(Sensor.t()) :: boolean def saturation_interrupt_enabled?(%Sensor{transport: i2c}) do {:ok, x} = Comm.get_config2(i2c) x.proximity_saturation_interrupt == 1 end @spec enable_saturation_interrupt(Sensor.t(), 0 | 1) :: :ok def enable_saturation_interrupt(%Sensor{transport: i2c}, value \\ 1) do Comm.set_config2(i2c, proximity_saturation_interrupt: value) end ## Proximity/Gesture LED Boost @spec get_led_boost(Sensor.t()) :: 0..3 def get_led_boost(%Sensor{transport: i2c}) do {:ok, x} = Comm.get_config2(i2c) x.led_boost end @spec set_led_boost(Sensor.t(), 0..3) :: :ok def set_led_boost(%Sensor{transport: i2c}, value \\ 1) do Comm.set_config2(i2c, led_boost: value) end ## Proximity Status @spec status(Sensor.t()) :: %{interrupt: boolean, saturation: boolean, valid: boolean} def status(%Sensor{transport: i2c}) do {:ok, x} = Comm.status(i2c) %{ interrupt: x.proximity_interrupt == 1, saturation: x.proximity_or_gesture_saturation == 1, valid: x.proximity_valid == 1 } end ## Proximity Data @spec read_proximity(Sensor.t(), Enum.t()) :: byte def read_proximity(%Sensor{} = sensor, _opts \\ []) do {:ok, data} = Comm.proximity_data(sensor.transport) :binary.decode_unsigned(data) end ## Proximity Offset @spec get_offset(Sensor.t()) :: %{down_left: -127..127, up_right: -127..127} def get_offset(%Sensor{transport: i2c}) do {:ok, x} = Comm.get_proximity_offset(i2c) %{ up_right: x.up_right, down_left: x.down_left } end @spec set_offset(Sensor.t(), Enum.t()) :: :ok def set_offset(%Sensor{transport: i2c}, opts) do Comm.set_proximity_offset(i2c, opts) end ## Proximity Gain Compensation Enable @spec get_gain_compensation(Sensor.t()) :: %{enabled: boolean, mask: byte} def get_gain_compensation(%Sensor{transport: i2c}) do {:ok, x} = Comm.get_config3(i2c) %{ enabled: x.proximity_gain_compensation == 1, mask: x.proximity_mask } end @spec set_gain_compensation(Sensor.t(), 0..14) :: :ok def set_gain_compensation(%Sensor{transport: i2c}, mask) do Comm.set_config3(i2c, mask: mask) end ## Proximity Interrupt Clear @spec clear_interrupt(Sensor.t()) :: :ok def clear_interrupt(%Sensor{transport: i2c}) do Comm.clear_proximity_interrupt(i2c) end end
lib/apds9960/proximity.ex
0.876892
0.420897
proximity.ex
starcoder
defmodule CatalogApi.Address do @moduledoc """ Defines the CatalogApi.Address struct and functions which are responsible for validation and interpretation of physical shipping addresses as they relate to CatalogApi. To see the CatalogApi documentation for what is and isn't a valid Address see `http://username.catalogapi.com/docs/methods/cart_methods/#cart_set_address` An overview of the address fields is as follows: - `first_name` (required): The first name of the person receiving shipment. - `last_name` (required): The last name of the person receiving shipment. - `address_1` (required): The street address. - `address_2` (optional): The second line of the street address. - `address_3` (optional): The third line of the street address. - `city` (required): The city. - `state_province` (required): The state or province. If it is a US state, this should be the 2 digit abbreviation. (Example: OH) - `postal_code` (required) : The postal code. This should be a string. - `country` (required): The ISO 3166-1 alpha-2 country code. - `email` (optional): The email of the person receiving shipment. - `phone_number` (optional): The phone number of the person receiving shipment. """ alias CatalogApi.Address alias CatalogApi.Address.Email alias CatalogApi.Address.Iso3166 alias CatalogApi.StructHelper @derive Jason.Encoder defstruct first_name: "", last_name: "", address_1: "", address_2: "", address_3: "", city: "", state_province: "", postal_code: "", country: "", email: "", phone_number: "" @type t :: %Address{} @type invalid_address_error :: {:invalid_address, list({atom(), list(String.t())})} @valid_fields ~w(first_name last_name address_1 address_2 address_3 city state_province postal_code country email phone_number) @spec cast(map()) :: t def cast(address_json) when is_map(address_json) do address_json # To avoid dynamically creating atoms |> filter_unknown_properties |> Enum.map(fn {k, v} -> {String.to_atom(k), v} end) |> Enum.into(%{}) |> to_struct! end defp filter_unknown_properties(map) do Enum.filter(map, fn {k, _v} -> k in @valid_fields end) end defp to_struct!(map), do: struct(Address, map) def extract_address_from_json(%{"cart_view_response" => %{"cart_view_result" => maybe_address}}) do {:ok, cast(maybe_address)} end def extract_address_from_json(_), do: {:error, :unparseable_catalog_api_address} @doc """ Validates a map with string or atom keys that is intended to represent a CatalogApi address. If the params are valid, `:ok` is returned. If there are validation errors, than an error tuple is returned which enumerates the field specific errors. To see the CatalogApi documentation for what is and isn't a valid Address see `http://username.catalogapi.com/docs/methods/cart_methods/#cart_set_address` ## Example iex> address = %{ ...> first_name: "Jo", ...> last_name: "Bob", ...> address_1: "123 Street Road", ...> city: "Cleveland", ...> state_province: "OH", ...> postal_code: "44444", ...> country: "US"} ...> CatalogApi.Address.validate_params(address) :ok This function also properly validates a map where the keys are strings. ## Example iex> address = %{ ...> "first_name" => "Jo", ...> "last_name" => "Bob", ...> "address_1" => "123 Street Road", ...> "city" => "Cleveland", ...> "state_province" => "OH", ...> "postal_code" => "44444", ...> "country" => "US"} ...> CatalogApi.Address.validate_params(address) :ok """ @spec validate_params(t | map()) :: :ok | {:error, invalid_address_error} def validate_params(%Address{} = address), do: validate(address) def validate_params(params) when is_map(params) do with {:ok, address_struct} <- convert_params_to_struct(params) do validate(address_struct) end end defp convert_params_to_struct(params) do with {:ok, filtered_params} <- filter_disallowed_fields(params), {:ok, atom_params} <- keys_to_atoms(filtered_params) do to_struct(atom_params) end end @spec keys_to_atoms(map()) :: %{optional(atom()) => any()} defp keys_to_atoms(fields) do {:ok, Enum.map(fields, fn {k, v} -> {ensure_atom(k), v} end)} end defp ensure_atom(value) when is_atom(value), do: value defp ensure_atom(value) when is_binary(value), do: String.to_atom(value) @spec filter_disallowed_fields(%{optional(String.t()) => any()}) :: list({String.t(), any()}) defp filter_disallowed_fields(fields) do {:ok, allowed_fields_atoms} = StructHelper.allowed_fields(Address) {:ok, allowed_fields_strings} = StructHelper.allowed_fields_as_strings(Address) allowed_fields = allowed_fields_atoms ++ allowed_fields_strings {:ok, Enum.filter(fields, fn {k, _} -> k in allowed_fields end)} end defp to_struct(map) do {:ok, struct(Address, map)} end @doc """ Validates an address struct to ensure that its values will not be rejected by CatalogApi endpoints. This ensures that an error can be thrown before the CatalogApi endpoint is actually hit. If the Address struct is valid, `:ok` is returned. If there are validation errors, than an error tuple is returned which enumerates the field specific errors. To see the CatalogApi documentation for what is and isn't a valid Address see `http://username.catalogapi.com/docs/methods/cart_methods/#cart_set_address` ## Examples iex> address = %CatalogApi.Address{ ...> first_name: "Jo", ...> last_name: "Bob", ...> address_1: "123 Street Road", ...> city: "Cleveland", ...> state_province: "OH", ...> postal_code: "44444", ...> country: "US"} ...> CatalogApi.Address.validate(address) :ok iex> address = %CatalogApi.Address{ ...> first_name: "Jo", ...> last_name: "Bob", ...> address_1: "123 Street Road", ...> city: "", ...> state_province: "OH", ...> postal_code: "44444", ...> country: "AJ"} ...> CatalogApi.Address.validate(address) {:error, {:invalid_address, %{country: ["country code must be valid ISO 3166-1 alpha 2 country code"], city: ["cannot be blank"]}}} """ @spec validate(t) :: :ok | {:error, invalid_address_error} def validate(%Address{} = address) do {:ok, allowed_fields} = StructHelper.allowed_fields(Address) errors = allowed_fields |> Enum.reduce(%{}, fn field, acc -> maybe_errors = validate_field(field, Map.fetch!(address, field)) Map.merge(acc, maybe_errors) end) case errors do map when map == %{} -> :ok errors -> {:error, {:invalid_address, errors}} end end @doc """ Returns a valid fake address. Useful for testing. """ @spec fake_valid_address() :: t() def fake_valid_address do %Address{ first_name: "John", last_name: "Doe", address_1: "123 Street Road", city: "Cleveland", state_province: "OH", postal_code: "44444", country: "US" } end # TODO: Think about validation for state_province. Through poking the API, # this can be anywhere between 1 and 50 alphanumeric characters despite the # CatalogApi docstring saying that it must be 2 characters for US states. I # guess outside of the US there is no such validation restriction? Maybe we # can specially validate this field if the country is "US" @doc """ Validates a specific address field in the context of what is valid as input to a CatalogApi address. """ @spec validate_field(atom(), any()) :: map() def validate_field(:first_name, ""), do: %{first_name: ["cannot be blank"]} def validate_field(:first_name, first_name) when is_binary(first_name) do validate_field_length(:first_name, first_name, 40) end def validate_field(:first_name, _), do: %{first_name: ["must be a string"]} def validate_field(:last_name, ""), do: %{last_name: ["cannot be blank"]} def validate_field(:last_name, last_name) when is_binary(last_name) do validate_field_length(:last_name, last_name, 40) end def validate_field(:last_name, _), do: %{last_name: ["must be a string"]} def validate_field(:address_1, ""), do: %{address_1: ["cannot be blank"]} def validate_field(:address_1, address_1) when is_binary(address_1) do validate_field_length(:address_1, address_1, 75) end def validate_field(:address_1, _), do: %{address_1: ["must be a string"]} def validate_field(:address_2, address_2) when is_binary(address_2) do validate_field_length(:address_2, address_2, 60) end def validate_field(:address_2, _), do: %{address_2: ["must be a string"]} def validate_field(:address_3, address_3) when is_binary(address_3) do validate_field_length(:address_3, address_3, 60) end def validate_field(:address_3, _), do: %{address_3: ["must be a string"]} def validate_field(:city, ""), do: %{city: ["cannot be blank"]} def validate_field(:city, city) when is_binary(city) do validate_field_length(:city, city, 40) end def validate_field(:city, _), do: %{city: ["must be a string"]} def validate_field(:state_province, ""), do: %{state_province: ["cannot be blank"]} def validate_field(:state_province, state_province) when is_binary(state_province) do validate_field_length(:state_province, state_province, 50) end def validate_field(:state_province, _), do: %{state_province: ["must be a string"]} def validate_field(:postal_code, ""), do: %{postal_code: ["cannot be blank"]} def validate_field(:postal_code, postal_code) when is_binary(postal_code) do validate_field_length(:postal_code, postal_code, 15) end def validate_field(:postal_code, _), do: %{postal_code: ["must be a string"]} def validate_field(:country, ""), do: %{country: ["cannot be blank"]} def validate_field(:country, country) when is_binary(country) do case Iso3166.validate(country) do :ok -> %{} :error -> %{country: ["country code must be valid ISO 3166-1 alpha 2 country code"]} end end def validate_field(:country, _), do: %{country: ["must be a string"]} def validate_field(:email, ""), do: %{} def validate_field(:email, email) when is_binary(email) do cond do String.length(email) > 254 -> %{email: ["cannot be longer than 254 characters"]} Email.valid?(email) -> %{} true -> %{email: ["must be a valid email"]} end end def validate_field(:email, _), do: %{email: ["must be a string"]} def validate_field(:phone_number, ""), do: %{} def validate_field(:phone_number, phone_number) when is_binary(phone_number) do validate_field_length(:phone_number, phone_number, 20) end def validate_field(:phone_number, _), do: %{phone_number: ["must be a string"]} def validate_field(_field, _value), do: %{} defp validate_field_length(field, value, max_length) when is_binary(value) do if String.length(value) > max_length do %{field => ["cannot be longer than #{max_length} characters"]} else %{} end end end
lib/catalog_api/address.ex
0.917682
0.508117
address.ex
starcoder
defmodule Date do @moduledoc """ A Date struct and functions. The Date struct contains the fields year, month, day and calendar. New dates can be built with the `new/3` function or using the `~D` (see `Kernel.sigil_D/2`) sigil: iex> ~D[2000-01-01] ~D[2000-01-01] Both `new/3` and sigil return a struct where the date fields can be accessed directly: iex> date = ~D[2000-01-01] iex> date.year 2000 iex> date.month 1 The functions on this module work with the `Date` struct as well as any struct that contains the same fields as the `Date` struct, such as `NaiveDateTime` and `DateTime`. Such functions expect `t:Calendar.date/0` in their typespecs (instead of `t:t/0`). Developers should avoid creating the Date structs directly and instead rely on the functions provided by this module as well as the ones in third-party calendar libraries. ## Comparing dates Comparisons in Elixir using `==/2`, `>/2`, `</2` and similar are structural and based on the `Date` struct fields. For proper comparison between dates, use the `compare/2` function. The existence of the `compare/2` function in this module also allows using `Enum.min/2` and `Enum.max/2` functions to get the minimum and maximum date of an `Enum`. For example: iex> Enum.min([~D[2017-03-31], ~D[2017-04-01]], Date) ~D[2017-03-31] ## Using epochs The `add/2` and `diff/2` functions can be used for computing dates or retrieving the number of days between instants. For example, if there is an interest in computing the number of days from the Unix epoch (1970-01-01): iex> Date.diff(~D[2010-04-17], ~D[1970-01-01]) 14716 iex> Date.add(~D[1970-01-01], 14716) ~D[2010-04-17] Those functions are optimized to deal with common epochs, such as the Unix Epoch above or the Gregorian Epoch (0000-01-01). """ @enforce_keys [:year, :month, :day] defstruct [:year, :month, :day, calendar: Calendar.ISO] @type t :: %__MODULE__{ year: Calendar.year(), month: Calendar.month(), day: Calendar.day(), calendar: Calendar.calendar() } @doc """ Returns a range of dates. A range of dates represents a discrete number of dates where the first and last values are dates with matching calendars. Ranges of dates can be either increasing (`first <= last`) or decreasing (`first > last`). They are also always inclusive. ## Examples iex> Date.range(~D[1999-01-01], ~D[2000-01-01]) Date.range(~D[1999-01-01], ~D[2000-01-01]) A range of dates implements the `Enumerable` protocol, which means functions in the `Enum` module can be used to work with ranges: iex> range = Date.range(~D[2001-01-01], ~D[2002-01-01]) iex> range Date.range(~D[2001-01-01], ~D[2002-01-01]) iex> Enum.count(range) 366 iex> ~D[2001-02-01] in range true iex> Enum.take(range, 3) [~D[2001-01-01], ~D[2001-01-02], ~D[2001-01-03]] """ @doc since: "1.5.0" @spec range(Calendar.date(), Calendar.date()) :: Date.Range.t() def range(%{calendar: calendar} = first, %{calendar: calendar} = last) do {first_days, _} = to_iso_days(first) {last_days, _} = to_iso_days(last) # TODO: Deprecate inferring a range with a step of -1 on Elixir v1.16 step = if first_days <= last_days, do: 1, else: -1 range(first, first_days, last, last_days, calendar, step) end def range(%{calendar: _, year: _, month: _, day: _}, %{calendar: _, year: _, month: _, day: _}) do raise ArgumentError, "both dates must have matching calendars" end @doc """ Returns a range of dates with a step. ## Examples iex> range = Date.range(~D[2001-01-01], ~D[2002-01-01], 2) iex> range Date.range(~D[2001-01-01], ~D[2002-01-01], 2) iex> Enum.count(range) 183 iex> ~D[2001-01-03] in range true iex> Enum.take(range, 3) [~D[2001-01-01], ~D[2001-01-03], ~D[2001-01-05]] """ @doc since: "1.12.0" @spec range(Calendar.date(), Calendar.date(), step :: pos_integer | neg_integer) :: Date.Range.t() def range(%{calendar: calendar} = first, %{calendar: calendar} = last, step) when is_integer(step) and step != 0 do {first_days, _} = to_iso_days(first) {last_days, _} = to_iso_days(last) range(first, first_days, last, last_days, calendar, step) end def range( %{calendar: _, year: _, month: _, day: _} = first, %{calendar: _, year: _, month: _, day: _} = last, step ) do raise ArgumentError, "both dates must have matching calendar and the step must be a " <> "non-zero integer, got: #{inspect(first)}, #{inspect(last)}, #{step}" end defp range(first, first_days, last, last_days, calendar, step) do %Date.Range{ first: %Date{calendar: calendar, year: first.year, month: first.month, day: first.day}, last: %Date{calendar: calendar, year: last.year, month: last.month, day: last.day}, first_in_iso_days: first_days, last_in_iso_days: last_days, step: step } end @doc """ Returns the current date in UTC. ## Examples iex> date = Date.utc_today() iex> date.year >= 2016 true """ @doc since: "1.4.0" @spec utc_today(Calendar.calendar()) :: t def utc_today(calendar \\ Calendar.ISO) def utc_today(Calendar.ISO) do {:ok, {year, month, day}, _, _} = Calendar.ISO.from_unix(System.os_time(), :native) %Date{year: year, month: month, day: day} end def utc_today(calendar) do calendar |> DateTime.utc_now() |> DateTime.to_date() end @doc """ Returns `true` if the year in the given `date` is a leap year. ## Examples iex> Date.leap_year?(~D[2000-01-01]) true iex> Date.leap_year?(~D[2001-01-01]) false iex> Date.leap_year?(~D[2004-01-01]) true iex> Date.leap_year?(~D[1900-01-01]) false iex> Date.leap_year?(~N[2004-01-01 01:23:45]) true """ @doc since: "1.4.0" @spec leap_year?(Calendar.date()) :: boolean() def leap_year?(date) def leap_year?(%{calendar: calendar, year: year}) do calendar.leap_year?(year) end @doc """ Returns the number of days in the given `date` month. ## Examples iex> Date.days_in_month(~D[1900-01-13]) 31 iex> Date.days_in_month(~D[1900-02-09]) 28 iex> Date.days_in_month(~N[2000-02-20 01:23:45]) 29 """ @doc since: "1.4.0" @spec days_in_month(Calendar.date()) :: Calendar.day() def days_in_month(date) def days_in_month(%{calendar: calendar, year: year, month: month}) do calendar.days_in_month(year, month) end @doc """ Returns the number of months in the given `date` year. ## Example iex> Date.months_in_year(~D[1900-01-13]) 12 """ @doc since: "1.7.0" @spec months_in_year(Calendar.date()) :: Calendar.month() def months_in_year(date) def months_in_year(%{calendar: calendar, year: year}) do calendar.months_in_year(year) end @doc """ Builds a new ISO date. Expects all values to be integers. Returns `{:ok, date}` if each entry fits its appropriate range, returns `{:error, reason}` otherwise. ## Examples iex> Date.new(2000, 1, 1) {:ok, ~D[2000-01-01]} iex> Date.new(2000, 13, 1) {:error, :invalid_date} iex> Date.new(2000, 2, 29) {:ok, ~D[2000-02-29]} iex> Date.new(2000, 2, 30) {:error, :invalid_date} iex> Date.new(2001, 2, 29) {:error, :invalid_date} """ @spec new(Calendar.year(), Calendar.month(), Calendar.day(), Calendar.calendar()) :: {:ok, t} | {:error, atom} def new(year, month, day, calendar \\ Calendar.ISO) do if calendar.valid_date?(year, month, day) do {:ok, %Date{year: year, month: month, day: day, calendar: calendar}} else {:error, :invalid_date} end end @doc """ Builds a new ISO date. Expects all values to be integers. Returns `date` if each entry fits its appropriate range, raises if the date is invalid. ## Examples iex> Date.new!(2000, 1, 1) ~D[2000-01-01] iex> Date.new!(2000, 13, 1) ** (ArgumentError) cannot build date, reason: :invalid_date iex> Date.new!(2000, 2, 29) ~D[2000-02-29] """ @doc since: "1.11.0" @spec new!(Calendar.year(), Calendar.month(), Calendar.day(), Calendar.calendar()) :: t def new!(year, month, day, calendar \\ Calendar.ISO) do case new(year, month, day, calendar) do {:ok, value} -> value {:error, reason} -> raise ArgumentError, "cannot build date, reason: #{inspect(reason)}" end end @doc """ Converts the given date to a string according to its calendar. ### Examples iex> Date.to_string(~D[2000-02-28]) "2000-02-28" iex> Date.to_string(~N[2000-02-28 01:23:45]) "2000-02-28" iex> Date.to_string(~D[-0100-12-15]) "-0100-12-15" """ @spec to_string(Calendar.date()) :: String.t() def to_string(date) def to_string(%{calendar: calendar, year: year, month: month, day: day}) do calendar.date_to_string(year, month, day) end @doc """ Parses the extended "Dates" format described by [ISO 8601:2019](https://en.wikipedia.org/wiki/ISO_8601). The year parsed by this function is limited to four digits. ## Examples iex> Date.from_iso8601("2015-01-23") {:ok, ~D[2015-01-23]} iex> Date.from_iso8601("2015:01:23") {:error, :invalid_format} iex> Date.from_iso8601("2015-01-32") {:error, :invalid_date} """ @spec from_iso8601(String.t(), Calendar.calendar()) :: {:ok, t} | {:error, atom} def from_iso8601(string, calendar \\ Calendar.ISO) do with {:ok, {year, month, day}} <- Calendar.ISO.parse_date(string) do convert(%Date{year: year, month: month, day: day}, calendar) end end @doc """ Parses the extended "Dates" format described by [ISO 8601:2019](https://en.wikipedia.org/wiki/ISO_8601). Raises if the format is invalid. ## Examples iex> Date.from_iso8601!("2015-01-23") ~D[2015-01-23] iex> Date.from_iso8601!("2015:01:23") ** (ArgumentError) cannot parse "2015:01:23" as date, reason: :invalid_format """ @spec from_iso8601!(String.t(), Calendar.calendar()) :: t def from_iso8601!(string, calendar \\ Calendar.ISO) do case from_iso8601(string, calendar) do {:ok, value} -> value {:error, reason} -> raise ArgumentError, "cannot parse #{inspect(string)} as date, reason: #{inspect(reason)}" end end @doc """ Converts the given `date` to [ISO 8601:2019](https://en.wikipedia.org/wiki/ISO_8601). By default, `Date.to_iso8601/2` returns dates formatted in the "extended" format, for human readability. It also supports the "basic" format through passing the `:basic` option. Only supports converting dates which are in the ISO calendar, or other calendars in which the days also start at midnight. Attempting to convert dates from other calendars will raise an `ArgumentError`. ### Examples iex> Date.to_iso8601(~D[2000-02-28]) "2000-02-28" iex> Date.to_iso8601(~D[2000-02-28], :basic) "20000228" iex> Date.to_iso8601(~N[2000-02-28 00:00:00]) "2000-02-28" """ @spec to_iso8601(Calendar.date(), :extended | :basic) :: String.t() def to_iso8601(date, format \\ :extended) def to_iso8601(%{calendar: Calendar.ISO} = date, format) when format in [:basic, :extended] do %{year: year, month: month, day: day} = date Calendar.ISO.date_to_string(year, month, day, format) end def to_iso8601(%{calendar: _} = date, format) when format in [:basic, :extended] do date |> convert!(Calendar.ISO) |> to_iso8601() end @doc """ Converts the given `date` to an Erlang date tuple. Only supports converting dates which are in the ISO calendar, or other calendars in which the days also start at midnight. Attempting to convert dates from other calendars will raise. ## Examples iex> Date.to_erl(~D[2000-01-01]) {2000, 1, 1} iex> Date.to_erl(~N[2000-01-01 00:00:00]) {2000, 1, 1} """ @spec to_erl(Calendar.date()) :: :calendar.date() def to_erl(date) do %{year: year, month: month, day: day} = convert!(date, Calendar.ISO) {year, month, day} end @doc """ Converts an Erlang date tuple to a `Date` struct. Only supports converting dates which are in the ISO calendar, or other calendars in which the days also start at midnight. Attempting to convert dates from other calendars will return an error tuple. ## Examples iex> Date.from_erl({2000, 1, 1}) {:ok, ~D[2000-01-01]} iex> Date.from_erl({2000, 13, 1}) {:error, :invalid_date} """ @spec from_erl(:calendar.date(), Calendar.calendar()) :: {:ok, t} | {:error, atom} def from_erl(tuple, calendar \\ Calendar.ISO) def from_erl({year, month, day}, calendar) do with {:ok, date} <- new(year, month, day, Calendar.ISO), do: convert(date, calendar) end @doc """ Converts an Erlang date tuple but raises for invalid dates. ## Examples iex> Date.from_erl!({2000, 1, 1}) ~D[2000-01-01] iex> Date.from_erl!({2000, 13, 1}) ** (ArgumentError) cannot convert {2000, 13, 1} to date, reason: :invalid_date """ @spec from_erl!(:calendar.date(), Calendar.calendar()) :: t def from_erl!(tuple, calendar \\ Calendar.ISO) do case from_erl(tuple, calendar) do {:ok, value} -> value {:error, reason} -> raise ArgumentError, "cannot convert #{inspect(tuple)} to date, reason: #{inspect(reason)}" end end @doc """ Converts a number of gregorian days to a `Date` struct. ## Examples iex> Date.from_gregorian_days(1) ~D[0000-01-02] iex> Date.from_gregorian_days(730_485) ~D[2000-01-01] iex> Date.from_gregorian_days(-1) ~D[-0001-12-31] """ @doc since: "1.11.0" @spec from_gregorian_days(integer(), Calendar.calendar()) :: t def from_gregorian_days(days, calendar \\ Calendar.ISO) when is_integer(days) do from_iso_days({days, 0}, calendar) end @doc """ Converts a `date` struct to a number of gregorian days. ## Examples iex> Date.to_gregorian_days(~D[0000-01-02]) 1 iex> Date.to_gregorian_days(~D[2000-01-01]) 730_485 iex> Date.to_gregorian_days(~N[2000-01-01 00:00:00]) 730_485 """ @doc since: "1.11.0" @spec to_gregorian_days(Calendar.date()) :: integer() def to_gregorian_days(date) do {days, _} = to_iso_days(date) days end @doc """ Compares two date structs. Returns `:gt` if first date is later than the second and `:lt` for vice versa. If the two dates are equal `:eq` is returned. ## Examples iex> Date.compare(~D[2016-04-16], ~D[2016-04-28]) :lt This function can also be used to compare across more complex calendar types by considering only the date fields: iex> Date.compare(~D[2016-04-16], ~N[2016-04-28 01:23:45]) :lt iex> Date.compare(~D[2016-04-16], ~N[2016-04-16 01:23:45]) :eq iex> Date.compare(~N[2016-04-16 12:34:56], ~N[2016-04-16 01:23:45]) :eq """ @doc since: "1.4.0" @spec compare(Calendar.date(), Calendar.date()) :: :lt | :eq | :gt def compare(%{calendar: calendar} = date1, %{calendar: calendar} = date2) do %{year: year1, month: month1, day: day1} = date1 %{year: year2, month: month2, day: day2} = date2 case {{year1, month1, day1}, {year2, month2, day2}} do {first, second} when first > second -> :gt {first, second} when first < second -> :lt _ -> :eq end end def compare(%{calendar: calendar1} = date1, %{calendar: calendar2} = date2) do if Calendar.compatible_calendars?(calendar1, calendar2) do case {to_iso_days(date1), to_iso_days(date2)} do {first, second} when first > second -> :gt {first, second} when first < second -> :lt _ -> :eq end else raise ArgumentError, """ cannot compare #{inspect(date1)} with #{inspect(date2)}. This comparison would be ambiguous as their calendars have incompatible day rollover moments. Specify an exact time of day (using DateTime) to resolve this ambiguity """ end end @doc """ Converts the given `date` from its calendar to the given `calendar`. Returns `{:ok, date}` if the calendars are compatible, or `{:error, :incompatible_calendars}` if they are not. See also `Calendar.compatible_calendars?/2`. ## Examples Imagine someone implements `Calendar.Holocene`, a calendar based on the Gregorian calendar that adds exactly 10,000 years to the current Gregorian year: iex> Date.convert(~D[2000-01-01], Calendar.Holocene) {:ok, %Date{calendar: Calendar.Holocene, year: 12000, month: 1, day: 1}} """ @doc since: "1.5.0" @spec convert(Calendar.date(), Calendar.calendar()) :: {:ok, t} | {:error, :incompatible_calendars} def convert(%{calendar: calendar, year: year, month: month, day: day}, calendar) do {:ok, %Date{calendar: calendar, year: year, month: month, day: day}} end def convert(%{calendar: calendar} = date, target_calendar) do if Calendar.compatible_calendars?(calendar, target_calendar) do result_date = date |> to_iso_days() |> from_iso_days(target_calendar) {:ok, result_date} else {:error, :incompatible_calendars} end end @doc """ Similar to `Date.convert/2`, but raises an `ArgumentError` if the conversion between the two calendars is not possible. ## Examples Imagine someone implements `Calendar.Holocene`, a calendar based on the Gregorian calendar that adds exactly 10,000 years to the current Gregorian year: iex> Date.convert!(~D[2000-01-01], Calendar.Holocene) %Date{calendar: Calendar.Holocene, year: 12000, month: 1, day: 1} """ @doc since: "1.5.0" @spec convert!(Calendar.date(), Calendar.calendar()) :: t def convert!(date, calendar) do case convert(date, calendar) do {:ok, value} -> value {:error, reason} -> raise ArgumentError, "cannot convert #{inspect(date)} to target calendar #{inspect(calendar)}, " <> "reason: #{inspect(reason)}" end end @doc """ Adds the number of days to the given `date`. The days are counted as Gregorian days. The date is returned in the same calendar as it was given in. ## Examples iex> Date.add(~D[2000-01-03], -2) ~D[2000-01-01] iex> Date.add(~D[2000-01-01], 2) ~D[2000-01-03] iex> Date.add(~N[2000-01-01 09:00:00], 2) ~D[2000-01-03] iex> Date.add(~D[-0010-01-01], -2) ~D[-0011-12-30] """ @doc since: "1.5.0" @spec add(Calendar.date(), integer()) :: t def add(%{calendar: Calendar.ISO} = date, days) do %{year: year, month: month, day: day} = date {year, month, day} = Calendar.ISO.date_to_iso_days(year, month, day) |> Kernel.+(days) |> Calendar.ISO.date_from_iso_days() %Date{calendar: Calendar.ISO, year: year, month: month, day: day} end def add(%{calendar: calendar} = date, days) do {base_days, fraction} = to_iso_days(date) from_iso_days({base_days + days, fraction}, calendar) end @doc """ Calculates the difference between two dates, in a full number of days. It returns the number of Gregorian days between the dates. Only `Date` structs that follow the same or compatible calendars can be compared this way. If two calendars are not compatible, it will raise. ## Examples iex> Date.diff(~D[2000-01-03], ~D[2000-01-01]) 2 iex> Date.diff(~D[2000-01-01], ~D[2000-01-03]) -2 iex> Date.diff(~D[0000-01-02], ~D[-0001-12-30]) 3 iex> Date.diff(~D[2000-01-01], ~N[2000-01-03 09:00:00]) -2 """ @doc since: "1.5.0" @spec diff(Calendar.date(), Calendar.date()) :: integer def diff(%{calendar: Calendar.ISO} = date1, %{calendar: Calendar.ISO} = date2) do %{year: year1, month: month1, day: day1} = date1 %{year: year2, month: month2, day: day2} = date2 Calendar.ISO.date_to_iso_days(year1, month1, day1) - Calendar.ISO.date_to_iso_days(year2, month2, day2) end def diff(%{calendar: calendar1} = date1, %{calendar: calendar2} = date2) do if Calendar.compatible_calendars?(calendar1, calendar2) do {days1, _} = to_iso_days(date1) {days2, _} = to_iso_days(date2) days1 - days2 else raise ArgumentError, "cannot calculate the difference between #{inspect(date1)} and #{inspect(date2)} because their calendars are not compatible and thus the result would be ambiguous" end end @doc false def to_iso_days(%{calendar: Calendar.ISO, year: year, month: month, day: day}) do {Calendar.ISO.date_to_iso_days(year, month, day), {0, 86_400_000_000}} end def to_iso_days(%{calendar: calendar, year: year, month: month, day: day}) do calendar.naive_datetime_to_iso_days(year, month, day, 0, 0, 0, {0, 0}) end defp from_iso_days({days, _}, Calendar.ISO) do {year, month, day} = Calendar.ISO.date_from_iso_days(days) %Date{year: year, month: month, day: day, calendar: Calendar.ISO} end defp from_iso_days(iso_days, target_calendar) do {year, month, day, _, _, _, _} = target_calendar.naive_datetime_from_iso_days(iso_days) %Date{year: year, month: month, day: day, calendar: target_calendar} end @doc """ Calculates the day of the week of a given `date`. Returns the day of the week as an integer. For the ISO 8601 calendar (the default), it is an integer from 1 to 7, where 1 is Monday and 7 is Sunday. An optional `starting_on` value may be supplied, which configures the weekday the week starts on. The default value for it is `:default`, which translates to `:monday` for the built-in ISO calendar. Any other weekday may be given to. ## Examples iex> Date.day_of_week(~D[2016-10-31]) 1 iex> Date.day_of_week(~D[2016-11-01]) 2 iex> Date.day_of_week(~N[2016-11-01 01:23:45]) 2 iex> Date.day_of_week(~D[-0015-10-30]) 3 iex> Date.day_of_week(~D[2016-10-31], :sunday) 2 iex> Date.day_of_week(~D[2016-11-01], :sunday) 3 iex> Date.day_of_week(~N[2016-11-01 01:23:45], :sunday) 3 iex> Date.day_of_week(~D[-0015-10-30], :sunday) 4 """ @doc since: "1.4.0" @spec day_of_week(Calendar.date(), starting_on :: :default | atom) :: Calendar.day_of_week() def day_of_week(date, starting_on \\ :default) def day_of_week(%{calendar: calendar, year: year, month: month, day: day}, starting_on) do {day_of_week, _first, _last} = calendar.day_of_week(year, month, day, starting_on) day_of_week end @doc """ Calculates a date that is the first day of the week for the given `date`. If the day is already the first day of the week, it returns the day itself. For the built-in ISO calendar, the week starts on Monday. A weekday rather than `:default` can be given as `starting_on`. ## Examples iex> Date.beginning_of_week(~D[2020-07-11]) ~D[2020-07-06] iex> Date.beginning_of_week(~D[2020-07-06]) ~D[2020-07-06] iex> Date.beginning_of_week(~D[2020-07-11], :sunday) ~D[2020-07-05] iex> Date.beginning_of_week(~D[2020-07-11], :saturday) ~D[2020-07-11] iex> Date.beginning_of_week(~N[2020-07-11 01:23:45]) ~D[2020-07-06] """ @doc since: "1.11.0" @spec beginning_of_week(Calendar.date(), starting_on :: :default | atom) :: Date.t() def beginning_of_week(date, starting_on \\ :default) def beginning_of_week(%{calendar: Calendar.ISO} = date, starting_on) do %{year: year, month: month, day: day} = date iso_days = Calendar.ISO.date_to_iso_days(year, month, day) {year, month, day} = case Calendar.ISO.iso_days_to_day_of_week(iso_days, starting_on) do 1 -> {year, month, day} day_of_week -> Calendar.ISO.date_from_iso_days(iso_days - day_of_week + 1) end %Date{calendar: Calendar.ISO, year: year, month: month, day: day} end def beginning_of_week(%{calendar: calendar} = date, starting_on) do %{year: year, month: month, day: day} = date case calendar.day_of_week(year, month, day, starting_on) do {day_of_week, day_of_week, _} -> %Date{calendar: calendar, year: year, month: month, day: day} {day_of_week, first_day_of_week, _} -> add(date, -(day_of_week - first_day_of_week)) end end @doc """ Calculates a date that is the last day of the week for the given `date`. If the day is already the last day of the week, it returns the day itself. For the built-in ISO calendar, the week ends on Sunday. A weekday rather than `:default` can be given as `starting_on`. ## Examples iex> Date.end_of_week(~D[2020-07-11]) ~D[2020-07-12] iex> Date.end_of_week(~D[2020-07-05]) ~D[2020-07-05] iex> Date.end_of_week(~D[2020-07-06], :sunday) ~D[2020-07-11] iex> Date.end_of_week(~D[2020-07-06], :saturday) ~D[2020-07-10] iex> Date.end_of_week(~N[2020-07-11 01:23:45]) ~D[2020-07-12] """ @doc since: "1.11.0" @spec end_of_week(Calendar.date(), starting_on :: :default | atom) :: Date.t() def end_of_week(date, starting_on \\ :default) def end_of_week(%{calendar: Calendar.ISO} = date, starting_on) do %{year: year, month: month, day: day} = date iso_days = Calendar.ISO.date_to_iso_days(year, month, day) {year, month, day} = case Calendar.ISO.iso_days_to_day_of_week(iso_days, starting_on) do 7 -> {year, month, day} day_of_week -> Calendar.ISO.date_from_iso_days(iso_days + 7 - day_of_week) end %Date{calendar: Calendar.ISO, year: year, month: month, day: day} end def end_of_week(%{calendar: calendar} = date, starting_on) do %{year: year, month: month, day: day} = date case calendar.day_of_week(year, month, day, starting_on) do {day_of_week, _, day_of_week} -> %Date{calendar: calendar, year: year, month: month, day: day} {day_of_week, _, last_day_of_week} -> add(date, last_day_of_week - day_of_week) end end @doc """ Calculates the day of the year of a given `date`. Returns the day of the year as an integer. For the ISO 8601 calendar (the default), it is an integer from 1 to 366. ## Examples iex> Date.day_of_year(~D[2016-01-01]) 1 iex> Date.day_of_year(~D[2016-11-01]) 306 iex> Date.day_of_year(~D[-0015-10-30]) 303 iex> Date.day_of_year(~D[2004-12-31]) 366 """ @doc since: "1.8.0" @spec day_of_year(Calendar.date()) :: Calendar.day() def day_of_year(date) def day_of_year(%{calendar: calendar, year: year, month: month, day: day}) do calendar.day_of_year(year, month, day) end @doc """ Calculates the quarter of the year of a given `date`. Returns the day of the year as an integer. For the ISO 8601 calendar (the default), it is an integer from 1 to 4. ## Examples iex> Date.quarter_of_year(~D[2016-10-31]) 4 iex> Date.quarter_of_year(~D[2016-01-01]) 1 iex> Date.quarter_of_year(~N[2016-04-01 01:23:45]) 2 iex> Date.quarter_of_year(~D[-0015-09-30]) 3 """ @doc since: "1.8.0" @spec quarter_of_year(Calendar.date()) :: non_neg_integer() def quarter_of_year(date) def quarter_of_year(%{calendar: calendar, year: year, month: month, day: day}) do calendar.quarter_of_year(year, month, day) end @doc """ Calculates the year-of-era and era for a given calendar year. Returns a tuple `{year, era}` representing the year within the era and the era number. ## Examples iex> Date.year_of_era(~D[0001-01-01]) {1, 1} iex> Date.year_of_era(~D[0000-12-31]) {1, 0} iex> Date.year_of_era(~D[-0001-01-01]) {2, 0} """ @doc since: "1.8.0" @spec year_of_era(Calendar.date()) :: {Calendar.year(), non_neg_integer()} def year_of_era(date) def year_of_era(%{calendar: calendar, year: year, month: month, day: day}) do # TODO: Remove me on 1.17 # The behaviour implementation already warns on missing callback. if function_exported?(calendar, :year_of_era, 3) do calendar.year_of_era(year, month, day) else calendar.year_of_era(year) end end @doc """ Calculates the day-of-era and era for a given calendar `date`. Returns a tuple `{day, era}` representing the day within the era and the era number. ## Examples iex> Date.day_of_era(~D[0001-01-01]) {1, 1} iex> Date.day_of_era(~D[0000-12-31]) {1, 0} """ @doc since: "1.8.0" @spec day_of_era(Calendar.date()) :: {Calendar.day(), non_neg_integer()} def day_of_era(date) def day_of_era(%{calendar: calendar, year: year, month: month, day: day}) do calendar.day_of_era(year, month, day) end @doc """ Calculates a date that is the first day of the month for the given `date`. ## Examples iex> Date.beginning_of_month(~D[2000-01-31]) ~D[2000-01-01] iex> Date.beginning_of_month(~D[2000-01-01]) ~D[2000-01-01] iex> Date.beginning_of_month(~N[2000-01-31 01:23:45]) ~D[2000-01-01] """ @doc since: "1.11.0" @spec beginning_of_month(Calendar.date()) :: t() def beginning_of_month(date) def beginning_of_month(%{year: year, month: month, calendar: calendar}) do %Date{year: year, month: month, day: 1, calendar: calendar} end @doc """ Calculates a date that is the last day of the month for the given `date`. ## Examples iex> Date.end_of_month(~D[2000-01-01]) ~D[2000-01-31] iex> Date.end_of_month(~D[2000-01-31]) ~D[2000-01-31] iex> Date.end_of_month(~N[2000-01-01 01:23:45]) ~D[2000-01-31] """ @doc since: "1.11.0" @spec end_of_month(Calendar.date()) :: t() def end_of_month(date) def end_of_month(%{year: year, month: month, calendar: calendar} = date) do day = Date.days_in_month(date) %Date{year: year, month: month, day: day, calendar: calendar} end ## Helpers defimpl String.Chars do def to_string(%{calendar: calendar, year: year, month: month, day: day}) do calendar.date_to_string(year, month, day) end end defimpl Inspect do def inspect(%{calendar: calendar, year: year, month: month, day: day}, _) do "~D[" <> calendar.date_to_string(year, month, day) <> suffix(calendar) <> "]" end defp suffix(Calendar.ISO), do: "" defp suffix(calendar), do: " " <> inspect(calendar) end end
lib/elixir/lib/calendar/date.ex
0.91621
0.891197
date.ex
starcoder
defmodule Money do import Kernel, except: [abs: 1] @moduledoc """ Defines a `Money` struct along with convenience methods for working with currencies. ## Examples iex> money = Money.new(500, :USD) %Money{amount: 500, currency: :USD} iex> money = Money.add(money, 550) %Money{amount: 1050, currency: :USD} iex> Money.to_string(money) "$10.50" ## Configuration You can set defaults in your Mix configuration to make working with `Money` a little easier. config :money, default_currency: :EUR, # this allows you to do Money.new(100) separator: ".", # change the default thousands separator for Money.to_string delimiter: ",", # change the default decimal delimeter for Money.to_string symbol: false # don’t display the currency symbol in Money.to_string symbol_on_right: false, # position the symbol symbol_space: false # add a space between symbol and number fractional_unit: true # display units after the delimeter strip_insignificant_zeros: false # don’t display the insignificant zeros or the delimeter code: false # add the currency code after the number """ @type t :: %__MODULE__{ amount: integer, currency: atom } defstruct amount: 0, currency: :USD alias Money.Currency @spec new(integer) :: t @doc ~S""" Create a new `Money` struct using a default currency. The default currency can be set in the system Mix config. ## Config config :money, default_currency: :USD ## Examples Money.new(123) %Money{amount: 123, currency: :USD} """ def new(amount) do currency = Application.get_env(:money, :default_currency) if currency do new(amount, currency) else raise ArgumentError, "to use Money.new/1 you must set a default currency in your application config." end end @spec new(integer, atom | String.t()) :: t @doc """ Create a new `Money` struct from currency sub-units (cents) ## Examples iex> Money.new(1_000_00, :USD) %Money{amount: 1_000_00, currency: :USD} """ def new(int, currency) when is_integer(int), do: %Money{amount: int, currency: Currency.to_atom(currency)} @spec parse(String.t() | number | Decimal.t(), atom | String.t(), Keyword.t()) :: {:ok, t} | :error @doc ~S""" Parse a value into a `Money` type. The following options are available: * `:separator` - default `","`, sets the separator for groups of thousands. "1,000" * `:delimiter` - default `"."`, sets the decimal delimiter. "1.23" ## Examples iex> Money.parse("$1,234.56", :USD) {:ok, %Money{amount: 123456, currency: :USD}} iex> Money.parse("1.234,56", :EUR, separator: ".", delimiter: ",") {:ok, %Money{amount: 123456, currency: :EUR}} iex> Money.parse("1.234,56", :WRONG) :error iex> Money.parse(1_234.56, :USD) {:ok, %Money{amount: 123456, currency: :USD}} iex> Money.parse(1_234, :USD) {:ok, %Money{amount: 123400, currency: :USD}} iex> Money.parse(-1_234.56, :USD) {:ok, %Money{amount: -123456, currency: :USD}} iex> Money.parse(Decimal.from_float(1_234.56), :USD) {:ok, %Money{amount: 123456, currency: :USD}} """ def parse(value, currency \\ nil, opts \\ []) def parse(value, nil, opts) do currency = Application.get_env(:money, :default_currency) if currency do parse(value, currency, opts) else raise ArgumentError, "to use Money.new/1 you must set a default currency in your application config." end end def parse(str, currency, opts) when is_binary(str) do {_separator, delimiter} = get_parse_options(opts) value = str |> prepare_parse_string(delimiter) |> add_missing_leading_digit case Float.parse(value) do {float, _} -> parse(float, currency, []) :error -> :error end rescue _ -> :error end def parse(number, currency, _opts) when is_number(number) do {:ok, new(round(number * Currency.sub_units_count!(currency)), currency)} end if Code.ensure_loaded?(Decimal) do def parse(%Decimal{} = decimal, currency, _opts) do Decimal.to_float(decimal) |> Money.parse(currency) end end defp prepare_parse_string(characters, delimiter, acc \\ []) defp prepare_parse_string([], _delimiter, acc), do: acc |> Enum.reverse() |> Enum.join() defp prepare_parse_string(["-" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["-" | acc]) defp prepare_parse_string(["0" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["0" | acc]) defp prepare_parse_string(["1" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["1" | acc]) defp prepare_parse_string(["2" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["2" | acc]) defp prepare_parse_string(["3" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["3" | acc]) defp prepare_parse_string(["4" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["4" | acc]) defp prepare_parse_string(["5" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["5" | acc]) defp prepare_parse_string(["6" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["6" | acc]) defp prepare_parse_string(["7" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["7" | acc]) defp prepare_parse_string(["8" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["8" | acc]) defp prepare_parse_string(["9" | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["9" | acc]) defp prepare_parse_string([delimiter | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, ["." | acc]) defp prepare_parse_string([_head | tail], delimiter, acc), do: prepare_parse_string(tail, delimiter, acc) defp prepare_parse_string(string, delimiter, _acc), do: prepare_parse_string(String.codepoints(string), delimiter) defp add_missing_leading_digit(<<"-.">> <> tail), do: "-0." <> tail defp add_missing_leading_digit(<<".">> <> tail), do: "0." <> tail defp add_missing_leading_digit(str), do: str @spec parse!(String.t() | number | Decimal.t(), atom | String.t(), Keyword.t()) :: t @doc ~S""" Parse a value into a `Money` type. Similar to `parse/3` but returns a `%Money{}` or raises an error if parsing fails. ## Examples iex> Money.parse!("1,234.56", :USD) %Money{amount: 123456, currency: :USD} iex> Money.parse!("wrong", :USD) ** (ArgumentError) unable to parse "wrong" with currency :USD """ def parse!(value, currency \\ nil, opts \\ []) do case parse(value, currency, opts) do {:ok, money} -> money :error -> raise ArgumentError, "unable to parse #{inspect(value)} with currency #{inspect(currency)}" end end @spec compare(t, t) :: -1 | 0 | 1 @doc ~S""" Compares two `Money` structs with each other. They must each be of the same currency and then their amounts are compared. If the first amount is larger than the second `1` is returned, if less than `-1` is returned, if both amounts are equal `0` is returned. See `cmp/2` for a similar function that returns `:lt`, `:eq` or `:gt` instead. ## Examples iex> Money.compare(Money.new(100, :USD), Money.new(100, :USD)) 0 iex> Money.compare(Money.new(100, :USD), Money.new(101, :USD)) -1 iex> Money.compare(Money.new(101, :USD), Money.new(100, :USD)) 1 """ def compare(%Money{currency: cur} = a, %Money{currency: cur} = b) do case a.amount - b.amount do x when x > 0 -> 1 x when x < 0 -> -1 x when x == 0 -> 0 end end def compare(a, b), do: fail_currencies_must_be_equal(a, b) @doc """ Compares two `Money` structs with each other. They must each be of the same currency and then their amounts are compared. If the first amount is larger than the second `:gt` is returned, if less than `:lt` is returned, if both amounts are equal `:eq` is returned. See `compare/2` for a similar function that returns `-1`, `0` or `1` instead. ## Examples iex> Money.cmp(Money.new(100, :USD), Money.new(100, :USD)) :eq iex> Money.cmp(Money.new(100, :USD), Money.new(101, :USD)) :lt iex> Money.cmp(Money.new(101, :USD), Money.new(100, :USD)) :gt """ @spec cmp(t, t) :: :lt | :eq | :gt def cmp(a, b) do case compare(a, b) do x when x == -1 -> :lt x when x == 0 -> :eq x when x == 1 -> :gt end end @spec zero?(t) :: boolean @doc ~S""" Returns true if the amount of a `Money` struct is zero ## Examples iex> Money.zero?(Money.new(0, :USD)) true iex> Money.zero?(Money.new(1, :USD)) false """ def zero?(%Money{amount: amount}) do amount == 0 end @spec positive?(t) :: boolean @doc ~S""" Returns true if the amount of a `Money` is greater than zero ## Examples iex> Money.positive?(Money.new(0, :USD)) false iex> Money.positive?(Money.new(1, :USD)) true iex> Money.positive?(Money.new(-1, :USD)) false """ def positive?(%Money{amount: amount}) do amount > 0 end @spec negative?(t) :: boolean @doc ~S""" Returns true if the amount of a `Money` is less than zero ## Examples iex> Money.negative?(Money.new(0, :USD)) false iex> Money.negative?(Money.new(1, :USD)) false iex> Money.negative?(Money.new(-1, :USD)) true """ def negative?(%Money{amount: amount}) do amount < 0 end @spec equals?(t, t) :: boolean @doc ~S""" Returns true if two `Money` of the same currency have the same amount ## Examples iex> Money.equals?(Money.new(100, :USD), Money.new(100, :USD)) true iex> Money.equals?(Money.new(101, :USD), Money.new(100, :USD)) false iex> Money.equals?(Money.new(100, :USD), Money.new(100, :CAD)) false """ def equals?(%Money{amount: amount, currency: cur}, %Money{amount: amount, currency: cur}), do: true def equals?(%Money{}, %Money{}), do: false @spec neg(t) :: t @doc ~S""" Returns a `Money` with the amount negated. ## Examples iex> Money.new(100, :USD) |> Money.neg %Money{amount: -100, currency: :USD} iex> Money.new(-100, :USD) |> Money.neg %Money{amount: 100, currency: :USD} """ def neg(%Money{amount: amount, currency: cur}), do: %Money{amount: -amount, currency: cur} @spec abs(t) :: t @doc ~S""" Returns a `Money` with the arithmetical absolute of the amount. ## Examples iex> Money.new(-100, :USD) |> Money.abs %Money{amount: 100, currency: :USD} iex> Money.new(100, :USD) |> Money.abs %Money{amount: 100, currency: :USD} """ def abs(%Money{amount: amount, currency: cur}), do: %Money{amount: Kernel.abs(amount), currency: cur} @spec add(t, t | integer | float) :: t @doc ~S""" Adds two `Money` together or an integer (cents) amount to a `Money` ## Examples iex> Money.add(Money.new(100, :USD), Money.new(50, :USD)) %Money{amount: 150, currency: :USD} iex> Money.add(Money.new(100, :USD), 50) %Money{amount: 150, currency: :USD} iex> Money.add(Money.new(100, :USD), 5.55) %Money{amount: 655, currency: :USD} """ def add(%Money{amount: a, currency: cur}, %Money{amount: b, currency: cur}), do: Money.new(a + b, cur) def add(%Money{amount: amount, currency: cur}, addend) when is_integer(addend), do: Money.new(amount + addend, cur) def add(%Money{} = m, addend) when is_float(addend), do: add(m, round(addend * 100)) def add(a, b), do: fail_currencies_must_be_equal(a, b) @spec subtract(t, t | integer | float) :: t @doc ~S""" Subtracts one `Money` from another or an integer (cents) from a `Money` ## Examples iex> Money.subtract(Money.new(150, :USD), Money.new(50, :USD)) %Money{amount: 100, currency: :USD} iex> Money.subtract(Money.new(150, :USD), 50) %Money{amount: 100, currency: :USD} iex> Money.subtract(Money.new(150, :USD), 1.25) %Money{amount: 25, currency: :USD} """ def subtract(%Money{amount: a, currency: cur}, %Money{amount: b, currency: cur}), do: Money.new(a - b, cur) def subtract(%Money{amount: a, currency: cur}, subtractend) when is_integer(subtractend), do: Money.new(a - subtractend, cur) def subtract(%Money{} = m, subtractend) when is_float(subtractend), do: subtract(m, round(subtractend * 100)) def subtract(a, b), do: fail_currencies_must_be_equal(a, b) @spec multiply(t, integer | float) :: t @doc ~S""" Multiplies a `Money` by an amount ## Examples iex> Money.multiply(Money.new(100, :USD), 10) %Money{amount: 1000, currency: :USD} iex> Money.multiply(Money.new(100, :USD), 1.5) %Money{amount: 150, currency: :USD} """ def multiply(%Money{amount: amount, currency: cur}, multiplier) when is_integer(multiplier), do: Money.new(amount * multiplier, cur) def multiply(%Money{amount: amount, currency: cur}, multiplier) when is_float(multiplier), do: Money.new(round(amount * multiplier), cur) @spec divide(t, integer) :: [t] @doc ~S""" Divides up `Money` by an amount ## Examples iex> Money.divide(Money.new(100, :USD), 2) [%Money{amount: 50, currency: :USD}, %Money{amount: 50, currency: :USD}] iex> Money.divide(Money.new(101, :USD), 2) [%Money{amount: 51, currency: :USD}, %Money{amount: 50, currency: :USD}] """ def divide(%Money{amount: amount, currency: cur}, denominator) when is_integer(denominator) do value = div(amount, denominator) rem = rem(amount, denominator) do_divide(cur, value, rem, denominator, []) end defp do_divide(_currency, _value, _rem, 0, acc), do: acc |> Enum.reverse() defp do_divide(currency, value, 0, count, acc) do acc = [new(next_amount(value, 0, count), currency) | acc] count = decrement_abs(count) do_divide(currency, value, 0, count, acc) end defp do_divide(currency, value, rem, count, acc) do acc = [new(next_amount(value, rem, count), currency) | acc] rem = decrement_abs(rem) count = decrement_abs(count) do_divide(currency, value, rem, count, acc) end defp next_amount(0, -1, count) when count > 0, do: -1 defp next_amount(value, 0, _count), do: value defp next_amount(value, _rem, _count), do: increment_abs(value) defp increment_abs(n) when n >= 0, do: n + 1 defp increment_abs(n) when n < 0, do: n - 1 defp decrement_abs(n) when n >= 0, do: n - 1 defp decrement_abs(n) when n < 0, do: n + 1 @spec to_string(t, Keyword.t()) :: String.t() @doc ~S""" Converts a `Money` struct to a string representation The following options are available: * `:separator` - default `","`, sets the separator for groups of thousands. "1,000" * `:delimiter` - default `"."`, sets the decimal delimiter. "1.23" * `:symbol` - default `true`, sets whether to display the currency symbol or not. * `:symbol_on_right` - default `false`, display the currency symbol on the right of the number, eg: 123.45€ * `:symbol_space` - default `false`, add a space between currency symbol and number, eg: € 123,45 or 123.45 € * `:fractional_unit` - default `true`, show the remaining units after the delimeter * `:strip_insignificant_zeros` - default `false`, strip zeros after the delimeter * `:code` - default `false`, append the currency code after the number ## Examples iex> Money.to_string(Money.new(123456, :GBP)) "£1,234.56" iex> Money.to_string(Money.new(123456, :EUR), separator: ".", delimiter: ",") "€1.234,56" iex> Money.to_string(Money.new(123456, :EUR), symbol: false) "1,234.56" iex> Money.to_string(Money.new(123456, :EUR), symbol: false, separator: "") "1234.56" iex> Money.to_string(Money.new(123456, :EUR), fractional_unit: false) "€1,234" iex> Money.to_string(Money.new(123450, :EUR), strip_insignificant_zeros: true) "€1,234.5" iex> Money.to_string(Money.new(123450, :EUR), code: true) "€1,234.50 EUR" It can also be interpolated (It implements the String.Chars protocol) To control the formatting, you can use the above options in your config, more information is in the introduction to `Money` ## Examples iex> "Total: #{Money.new(100_00, :USD)}" "Total: $100.00" """ def to_string(%Money{} = money, opts \\ []) do {separator, delimeter, symbol, symbol_on_right, symbol_space, fractional_unit, strip_insignificant_zeros, code} = get_display_options(money, opts) number = format_number(money, separator, delimeter, fractional_unit, strip_insignificant_zeros, money) sign = if negative?(money), do: "-" space = if symbol_space, do: " " code = if code, do: " #{money.currency}" parts = if symbol_on_right do [sign, number, space, symbol, code] else [symbol, space, sign, number, code] end parts |> Enum.join() |> String.trim_leading() end if Code.ensure_loaded?(Decimal) do @spec to_decimal(t) :: Decimal.t() @doc ~S""" Converts a `Money` struct to a `Decimal` representation ## Examples iex> Money.to_decimal(Money.new(123456, :GBP)) #Decimal<1234.56> iex> Money.to_decimal(Money.new(-123420, :EUR)) #Decimal<-1234.20> """ def to_decimal(%Money{} = money) do sign = if money.amount >= 0, do: 1, else: -1 coef = Money.abs(money).amount exp = -Money.Currency.exponent!(money) Decimal.new(sign, coef, exp) end end defp format_number(%Money{amount: amount}, separator, delimeter, fractional_unit, strip_insignificant_zeros, money) do exponent = Currency.exponent(money) amount_abs = if amount < 0, do: -amount, else: amount amount_str = Integer.to_string(amount_abs) [sub_unit, super_unit] = amount_str |> String.pad_leading(exponent + 1, "0") |> String.reverse() |> String.split_at(exponent) |> Tuple.to_list() |> Enum.map(&String.reverse/1) super_unit = super_unit |> reverse_group(3) |> Enum.join(separator) sub_unit = prepare_sub_unit(sub_unit, %{strip_insignificant_zeros: strip_insignificant_zeros}) if fractional_unit && sub_unit != "" do [super_unit, sub_unit] |> Enum.join(delimeter) else super_unit end end defp prepare_sub_unit([value], options), do: prepare_sub_unit(value, options) defp prepare_sub_unit([], _), do: "" defp prepare_sub_unit(value, %{strip_insignificant_zeros: false}), do: value defp prepare_sub_unit(value, %{strip_insignificant_zeros: true}), do: Regex.replace(~r/0+$/, value, "") defp get_display_options(m, opts) do {separator, delimiter} = get_parse_options(opts) default_symbol = Application.get_env(:money, :symbol, true) default_symbol_on_right = Application.get_env(:money, :symbol_on_right, false) default_symbol_space = Application.get_env(:money, :symbol_space, false) default_fractional_unit = Application.get_env(:money, :fractional_unit, true) default_strip_insignificant_zeros = Application.get_env(:money, :strip_insignificant_zeros, false) default_code = Application.get_env(:money, :code, false) symbol = if Keyword.get(opts, :symbol, default_symbol), do: Currency.symbol(m), else: "" symbol_on_right = Keyword.get(opts, :symbol_on_right, default_symbol_on_right) symbol_space = Keyword.get(opts, :symbol_space, default_symbol_space) fractional_unit = Keyword.get(opts, :fractional_unit, default_fractional_unit) strip_insignificant_zeros = Keyword.get(opts, :strip_insignificant_zeros, default_strip_insignificant_zeros) code = Keyword.get(opts, :code, default_code) {separator, delimiter, symbol, symbol_on_right, symbol_space, fractional_unit, strip_insignificant_zeros, code} end defp get_parse_options(opts) do default_separator = Application.get_env(:money, :separator, ",") separator = Keyword.get(opts, :separator, default_separator) default_delimiter = Application.get_env(:money, :delimiter) || Application.get_env(:money, :delimeter, ".") delimiter = Keyword.get(opts, :delimiter) || Keyword.get(opts, :delimeter, default_delimiter) {separator, delimiter} end defp fail_currencies_must_be_equal(a, b) do raise ArgumentError, message: "Currency of #{a.currency} must be the same as #{b.currency}" end defp reverse_group(str, count) when is_binary(str) do reverse_group(str, Kernel.abs(count), []) end defp reverse_group("", _count, list) do list end defp reverse_group(str, count, list) do {first, last} = String.split_at(str, -count) reverse_group(first, count, [last | list]) end defimpl String.Chars do def to_string(%Money{} = m) do Money.to_string(m) end end end
lib/money.ex
0.895546
0.671942
money.ex
starcoder
defmodule Mix.Tasks.PromEx.Dashboard.Export do @moduledoc """ This will render a PromEx dashboard either to STDOUT or to a file depending on the CLI arguments that are provided. The following CLI flags are supported: ```md -d, --dashboard The name of the dashboard that you would like to export from PromEx. For example, if you would like to export the Ecto dashboard, provide the value `ecto.json`. -m, --module The PromEx module which will be used to render the dashboards. This is needed to fetch any relevant assigns from the `c:PromEx.dashboard_assigns/0` callback -s, --stdout A boolean flag denoting that the rendered dashboard should be output to STDOUT. -f, --file_path If you would like the write the generated JSON dashboard definition to a file, you can provide a relative file path in the project's `priv` directory. -a, --assign Any additional assigns you would like to pass to the dashboard for rendering. You are able to pass multiple assigns by passing multiple --assign arguments. For example: `--assign some=thing --assign another=thing`. ``` """ @shortdoc "Export a rendered dashboard to STDOUT or a file" use Mix.Task alias PromEx.DashboardRenderer @impl true def run(args) do # Compile the project Mix.Task.run("compile") # Get CLI args and set up uploader cli_args = parse_options(args) prom_ex_module = "Elixir.#{cli_args.module}" |> String.to_atom() |> Code.ensure_compiled() |> case do {:module, module} -> module {:error, reason} -> raise "#{cli_args.module} is not a valid PromEx module because #{inspect(reason)}" end check_valid_dashboard(cli_args) render_dashboard(prom_ex_module, cli_args) end defp parse_options(args) do cli_options = [module: :string, stdout: :boolean, file_path: :string, dashboard: :string, assign: [:string, :keep]] cli_aliases = [m: :module, s: :stdout, f: :file_path, d: :dashboard, a: :assign] # Parse out the arguments and put defaults where necessary args |> OptionParser.parse(aliases: cli_aliases, strict: cli_options) |> case do {options, _remaining_args, [] = _errors} -> options |> Enum.reduce(%{}, fn {:assign, assign_value}, acc when is_map_key(acc, :assigns) -> [key, value] = String.split(assign_value, "=", parts: 2) new_assign = {String.to_atom(key), value} Map.put(acc, :assigns, [new_assign | acc.assigns]) {:assign, assign_value}, acc -> [key, value] = String.split(assign_value, "=", parts: 2) Map.put(acc, :assigns, [{String.to_atom(key), value}]) {opt, value}, acc -> Map.put(acc, opt, value) end) |> Map.put_new(:assigns, []) {_options, _remaining_args, errors} -> raise "Invalid CLI args were provided: #{inspect(errors)}" end |> Map.put_new(:stdout, false) |> Map.put_new(:dashboard, nil) |> Map.put_new_lazy(:module, fn -> Mix.Project.config() |> Keyword.get(:app) |> Atom.to_string() |> Macro.camelize() |> Kernel.<>(".PromEx") end) end defp check_valid_dashboard(%{dashboard: nil}) do raise "You must provide a --dashboard argument" end defp check_valid_dashboard(_args) do :ok end defp render_dashboard(prom_ex_module, cli_args) do user_provided_assigns = prom_ex_module.dashboard_assigns() default_title = prom_ex_module.__otp_app__() |> Atom.to_string() |> Macro.camelize() default_dashboard_name = cli_args.dashboard |> Path.basename() |> normalize_file_name() |> Macro.camelize() default_dashboard_assigns = [ otp_app: prom_ex_module.__otp_app__(), title: "#{default_title} - PromEx #{default_dashboard_name} Dashboard" ] dashboard_render = :prom_ex |> DashboardRenderer.build(cli_args.dashboard, prom_ex_module.__otp_app__()) |> DashboardRenderer.merge_assigns(default_dashboard_assigns) |> DashboardRenderer.merge_assigns(user_provided_assigns) |> DashboardRenderer.merge_assigns(cli_args.assigns) |> DashboardRenderer.render_dashboard(prom_ex_module) |> DashboardRenderer.decode_dashboard() |> check_dashboard_render() handle_export(cli_args, prom_ex_module, dashboard_render) end defp handle_export(%{stdout: true}, _prom_ex_module, dashboard_render) do IO.puts(dashboard_render.rendered_file) end defp handle_export(%{file_path: file_path}, prom_ex_module, dashboard_render) do priv_path = prom_ex_module.__otp_app__() |> :code.priv_dir() |> :erlang.list_to_binary() full_path = Path.join([priv_path, file_path]) File.write!(full_path, dashboard_render.rendered_file) end defp handle_export(_cli_args, _prom_ex_module, _dashboard_render) do raise "You must specify either a file path to write the dashboard to, or provide the --stdout flag to print to STDOUT" end defp check_dashboard_render(%DashboardRenderer{valid_json?: false}) do raise "The rendered dashboard yielded an invalid JSON data structure. Be sure to check your assigns." end defp check_dashboard_render(%DashboardRenderer{valid_file?: false}) do raise "The dashboard that you selected does not exist in PromEx. Be sure that you typed it correctly." end defp check_dashboard_render(dashboard_render) do dashboard_render end defp normalize_file_name(path) do if Path.extname(path) == "" do path else path |> Path.rootname() |> normalize_file_name() end end end
lib/mix/tasks/prom_ex.dashboard.export.ex
0.810591
0.799638
prom_ex.dashboard.export.ex
starcoder
defmodule PewPew.Mailer do @moduledoc """ This is the module used to manage the messages themselves. """ defstruct [ to: [], cc: [], bcc: [], subject: nil, text: nil, html: nil, attach: [], from: nil ] # The Mailgun API base path. @mailgun "https://api.mailgun.net/v3" @doc """ This is the function used to generate a new mailer. It populates the struct properly, leaving undefined fields at their default values. Note that if you do not specify them here, they will not be sent to the API, for better or for worse. If you do not wish to specify them here, you can always modify them later on using normal struct manipulation. The available fields are: - `:from` which specifies the address the email should be sent from. This can be used to override the configured default (if there is no configured default, then it is required). - `:to` which specifies the addresses the email should be sent to. This can be a list or single address (no more than 1000 addresses long). This is required. - `:cc` which specifies the addresses the email should be CCed to. This can be a list or single address (no more than 1000 addresses long). - `:bcc` which specifies the addresses the email should be BCCed to. This can be a list or single address (no more than 1000 addresses long). - `:subject` which specifies the subject line on the email. This is required. - `:text` which specifies the `text/plain` version of the email. Either text or html (or both) must be specified. - `:html` which specifies the `text/html` version of the email. Either text or html (or both) must be specified. - `:attach` which specifies any attachments you want on the email. """ def new(opts \\ []) do %__MODULE__{} |> set_to(Keyword.get(opts, :to, [])) |> set_cc(Keyword.get(opts, :cc, [])) |> set_bcc(Keyword.get(opts, :bcc, [])) |> set_subject(Keyword.get(opts, :subject)) |> set_text(Keyword.get(opts, :text)) |> set_html(Keyword.get(opts, :html)) |> set_attach(Keyword.get(opts, :attach, [])) end @doc """ This function is used from set the `:from` field in the given struct. """ def set_from(%__MODULE__{} = m, from), do: %{m | from: from} @doc """ This function is used to set the `:to` field in the given struct. """ def set_to(%__MODULE__{} = m, to), do: %{m | to: to} @doc """ This function is used to add to the given addresses to the existing (or empty) list of `:to` addresses. """ def add_to(%__MODULE__{to: []} = m, to), do: set_to(m, to) def add_to(%__MODULE__{to: [_ | _]} = m, to) when is_binary(to), do: %{m | to: [to | m.to]} def add_to(%__MODULE__{to: [_ | _]} = m, to) when is_list(to), do: %{m | to: to ++ m.to} @doc """ This function is used to set the `:cc` field in the given struct. """ def set_cc(%__MODULE__{} = m, cc), do: %{m | cc: cc} @doc """ This function is used to add to the given addresses to the existing (or empty) list of `:cc` addresses. """ def add_cc(%__MODULE__{cc: []} = m, cc), do: set_cc(m, cc) def add_cc(%__MODULE__{cc: [_ | _]} = m, cc) when is_binary(cc), do: %{m | cc: [cc | m.cc]} def add_cc(%__MODULE__{cc: [_ | _]} = m, cc) when is_list(cc), do: %{m | cc: cc ++ m.cc} @doc """ This function is used to set the `:bcc` field in the given struct. """ def set_bcc(%__MODULE__{} = m, bcc), do: %{m | bcc: bcc} @doc """ This function is used to add to the given addresses to the existing (or empty) list of `:bcc` addresses. """ def add_bcc(%__MODULE__{bcc: []} = m, bcc), do: set_bcc(m, bcc) def add_bcc(%__MODULE__{bcc: [_ | _]} = m, bcc) when is_binary(bcc), do: %{m | bcc: [bcc | m.bcc]} def add_bcc(%__MODULE__{bcc: [_ | _]} = m, bcc) when is_list(bcc), do: %{m | bcc: bcc ++ m.bcc} @doc """ This function is used to set the `:subject` field in the given struct. """ def set_subject(%__MODULE__{} = m, subject), do: %{m | subject: subject} @doc """ This function is used to set the `:text` field in the given struct, which defines the plaintext version of the email. """ def set_text(%__MODULE__{} = m, text), do: %{m | text: text} @doc """ This function is used to set the `:html` field in the given struct, which defines the HTML version of the email. """ def set_html(%__MODULE__{} = m, html), do: %{m | html: html} @doc """ This function is used to set the `:attach` field in the given struct, which defines which files should be attached to the email. """ def set_attach(%__MODULE__{} = m, attach), do: %{m | attach: attach} @doc """ This function is used to add to the `:attach` field in the given struct. """ def add_attach(%__MODULE__{attach: []} = m, attach), do: set_attach(m, attach) def add_attach(%__MODULE__{attach: [_ | _]} = m, attach) when is_binary(attach), do: %{m | attach: [attach | m.attach]} def add_attach(%__MODULE__{attach: [_ | _]} = m, attach) when is_list(attach), do: %{m | attach: attach ++ m.attach} @doc """ This function is used to send an email, adding any specified attachments, and either text or HTML (or both), depending on what was specified. If `:to` is a list, we send a batch request, otherwise we send a single request. For now, if the operation was successful, we return `:ok` (otherwise an error tuple is returned). """ def send(%__MODULE__{to: nil}), do: {:error, ":to is a required field"} def send(%__MODULE__{subject: nil}), do: {:error, ":subject is a required field"} def send(%__MODULE__{text: nil, html: nil}), do: {:error, "must specify at least one of :text or :html"} def send(%__MODULE__{} = m) do from = if is_nil(m.from), do: from(), else: m.from fields = [{"from", from}] |> maybe_set("subject", m.subject) |> maybe_set("text", m.text) |> maybe_set("html", m.html) |> attach(m.attach) |> batch(m.to) headers = [{"Content-Type", "multipart/form-data"}] opts = [hackney: [basic_auth: {"api", key()}]] case HTTPoison.post(endpoint(), {:multipart, fields}, headers, opts) do {:ok, _resp} -> :ok {:error, reason} -> {:error, reason} end end # This function is used to set a field if its value is not nil. defp maybe_set(fields, _name, value) when is_nil(value), do: fields defp maybe_set(fields, name, value), do: [{name, value} | fields] # This is the function responsible for delegating the adding of attachments, # based on whether it's a string or list. defp attach(fields, file) when is_binary(file), do: add_attachment(fields, file) defp attach(fields, []), do: fields defp attach(fields, [file | rest]) do fields |> add_attachment(file) |> attach(rest) end # This is the function responsible for adding an attachment to the fields. defp add_attachment(fields, file) when is_binary(file) do name = "attachment" data = [name: name, filename: Path.basename(file)] [{:file, file, {"form-data", data}, []} | fields] end # Simple helper to define the recipient variables on the form fields, which # allows us to set up a batch send. defp batch(fields, to) when is_binary(to), do: [{"to", to} | fields] defp batch(fields, to) when is_list(to) do vars = to |> Enum.with_index() |> Enum.map(fn {addr, i} -> {addr, %{"id" => i}} end) |> Enum.into(%{}) |> Poison.encode!() [{"recipient-variables", vars} | fields] ++ Enum.map(to, &({"to", &1})) end # Simple helper to define the endpoint for mailing messages. defp endpoint, do: "#{@mailgun}/#{domain()}/messages" # Simple helper to fetch the `from` address in the email. defp from, do: Application.get_env(:pewpew, :from) # Simple helper to fetch the domain name for the Mailgun account. defp domain, do: Application.get_env(:pewpew, :domain) # Simple helper to fetch the API key for the Mailgun account. defp key, do: Application.get_env(:pewpew, :key) end
lib/pewpew/mailer.ex
0.796055
0.523299
mailer.ex
starcoder
defmodule Elsa.Supervisor do @moduledoc """ Top-level supervisor that orchestrates all other components of the Elsa library. Allows for a single point of integration into your application supervision tree and configuration by way of a series of nested keyword lists Components not needed by a running application (if your application _only_ consumes messages from Kafka and never producers back to it) can be safely omitted from the configuration. """ use Supervisor @doc """ Defines a connection for locating the Elsa Registry process. """ @spec registry(String.t() | atom()) :: atom() def registry(connection) do :"elsa_registry_#{connection}" end def via_name(registry, name) do {:via, Elsa.Registry, {registry, name}} end def dynamic_supervisor(registry) do via_name(registry, DynamicSupervisor) end @doc """ Starts the top-level Elsa supervisor and links it to the current process. Starts a brod client and a custom process registry by default and then conditionally starts and takes supervision of any brod group-based consumers or producer processes defined. ## Options * `:endpoints` - Required. Keyword list of kafka brokers. ex. `[localhost: 9092]` * `:connection` - Required. Atom used to track kafka connection. * `:config` - Optional. Client configuration options passed to brod. * `:producer` - Optional. Can be a single producer configuration of multiples in a list. * `:group_consumer` - Optional. Group consumer configuration. * `:consumer` - Optional. Simple topic consumer configuration. ## Producer Config * `:topic` - Required. Producer will be started for configured topic. * `:poll` - Optional. If set to a number in milliseconds, will poll for new partitions and startup producers on the fly. * `:config` - Optional. Producer configuration options passed to `brod_producer`. ## Group Consumer Config * `:group` - Required. Name of consumer group. * `:topics` - Required. List of topics to subscribe to. * `:handler` - Required. Module that implements Elsa.Consumer.MessageHandler behaviour. * `:handler_init_args` - Optional. Any args to be passed to init function in handler module. * `:assignment_received_handler` - Optional. Arity 4 Function that will be called with any partition assignments. Return `:ok` to for assignment to be subscribed to. Return `{:error, reason}` to stop subscription. Arguments are group, topic, partition, generation_id. * `:assignments_revoked_handler` - Optional. Zero arity function that will be called when assignments are revoked. All workers will be shutdown before callback is invoked and must return `:ok`. * `:config` - Optional. Consumer configuration options passed to `brod_consumer`. ## Consumer Config * `:topic` - Required. Topic to subscribe to. * `:begin_offset` - Required. Where to begin consuming from. Must be either `:earliest`, `:latest`, or a valid offset integer. * `:handler` - Required. Module that implements `Elsa.Consumer.MessageHandler` behaviour. * `:partition` - Optional. Topic partition to subscribe to. If `nil`, will default to all partitions. * `:handler_init_args` - Optional. Any args to be passed to init function in handler module. * `:poll` - Optional. If set to number of milliseconds, will poll for new partitions and startup consumers on the fly. ## Example ``` Elsa.Supervisor.start_link([ endpoints: [localhost: 9092], connection: :conn, producer: [topic: "topic1"], consumer: [ topic: "topic2", partition: 0, begin_offset: :earliest, handler: ExampleHandler ], group_consumer: [ group: "example-group", topics: ["topic1"], handler: ExampleHandler, config: [ begin_offset: :earliest, offset_reset_policy: :reset_to_earliest ] ] ]) ``` """ @spec start_link(keyword()) :: GenServer.on_start() def start_link(args) do opts = Keyword.take(args, [:name]) Supervisor.start_link(__MODULE__, args, opts) end @doc """ Starts producer processes under Elsa's `DynamicSupervisor` for the specified connection. Polling cannot be configured for producers at runtime. Configuration at `Elsa.Supervisor` start is how polling will behave for all producers on that connection. Other than polling, producer configuration is the same as `Elsa.Supervisor.start_link/1`. ## Producer Config * `:topic` - Required. Producer will be started for configured topic. * `:config` - Optional. Producer configuration options passed to `brod_producer`. """ @spec start_producer(String.t() | atom, keyword) :: [DynamicSupervisor.on_start_child()] def start_producer(connection, args) do registry = registry(connection) process_manager = via_name(registry, :producer_process_manager) Elsa.Producer.Initializer.init(registry, args) |> Enum.map(&Elsa.DynamicProcessManager.start_child(process_manager, &1)) end def init(args) do connection = Keyword.fetch!(args, :connection) registry = registry(connection) children = [ {Elsa.Registry, name: registry}, {DynamicSupervisor, strategy: :one_for_one, name: dynamic_supervisor(registry)}, start_client(args), producer_spec(registry, Keyword.get(args, :producer, [])), start_group_consumer(connection, registry, Keyword.get(args, :group_consumer)), start_consumer(connection, registry, Keyword.get(args, :consumer)) ] |> List.flatten() Supervisor.init(children, strategy: :rest_for_one) end defp start_client(args) do connection = Keyword.fetch!(args, :connection) endpoints = Keyword.fetch!(args, :endpoints) config = Keyword.get(args, :config, []) {Elsa.Wrapper, mfa: {:brod_client, :start_link, [endpoints, connection, config]}, register: {registry(connection), :brod_client}} end defp start_group_consumer(_connection, _registry, nil), do: [] defp start_group_consumer(connection, registry, args) do group_consumer_args = args |> Keyword.put(:registry, registry) |> Keyword.put(:connection, connection) |> Keyword.put(:name, via_name(registry, Elsa.Group.Supervisor)) {Elsa.Group.Supervisor, group_consumer_args} end defp start_consumer(_connection, _registry, nil), do: [] defp start_consumer(connection, registry, args) do topics = case Keyword.has_key?(args, :partition) do true -> [{Keyword.fetch!(args, :topic), Keyword.fetch!(args, :partition)}] false -> [Keyword.fetch!(args, :topic)] end consumer_args = args |> Keyword.put(:registry, registry) |> Keyword.put(:connection, connection) |> Keyword.put(:topics, topics) |> Keyword.put_new(:config, []) {Elsa.DynamicProcessManager, id: :worker_process_manager, dynamic_supervisor: dynamic_supervisor(registry), poll: Keyword.get(args, :poll, false), initializer: {Elsa.Consumer.Worker.Initializer, :init, [consumer_args]}} end defp producer_spec(registry, args) do initializer = case Keyword.take(args, [:topic, :config]) do [] -> nil init_args -> {Elsa.Producer.Initializer, :init, [registry, init_args]} end [ { Elsa.DynamicProcessManager, id: :producer_process_manager, dynamic_supervisor: dynamic_supervisor(registry), initializer: initializer, poll: Keyword.get(args, :poll, false), name: via_name(registry, :producer_process_manager) } ] end end
deps/elsa/lib/elsa/supervisor.ex
0.861844
0.805861
supervisor.ex
starcoder
defmodule Cldr.Time.Interval do @moduledoc """ Interval formats allow for software to format intervals like "Jan 10-12, 2008" as a shorter and more natural format than "Jan 10, 2008 - Jan 12, 2008". They are designed to take a start and end date, time or datetime plus a formatting pattern and use that information to produce a localized format. See `Cldr.Interval.to_string/3` and `Cldr.Time.Interval.to_string/3` """ alias Cldr.DateTime.Format import Cldr.Date.Interval, only: [ format_error: 2, style_error: 1, greatest_difference: 2 ] import Cldr.Calendar, only: [ time: 0 ] # Time styles not defined # by a grouping but can still # be used directly @doc false @style_map %{ # Can be used with any # time time: %{ short: :h, medium: :hm, long: :hm }, # Includes the timezone zone: %{ short: :hv, medium: :hmv, long: :hmv }, # Includes flex times # annotation like # ".. in the evening" flex: %{ short: :bh, medium: :bhm, long: :bhm } } @styles Map.keys(@style_map) @formats Map.keys(@style_map.time) @default_format :medium @default_style :time def styles do @style_map end @doc false def to_string(unquote(time()) = from, unquote(time()) = to) do {locale, backend} = Cldr.locale_and_backend_from(nil, nil) to_string(from, to, backend, locale: locale) end @doc false def to_string(unquote(time()) = from, unquote(time()) = to, backend) when is_atom(backend) do {locale, backend} = Cldr.locale_and_backend_from(nil, backend) to_string(from, to, backend, locale: locale) end @doc false def to_string(unquote(time()) = from, unquote(time()) = to, options) when is_list(options) do {locale, backend} = Cldr.locale_and_backend_from(options) to_string(from, to, backend, Keyword.put_new(options, :locale, locale)) end @doc """ Returns a string representing the formatted interval formed by two times. ## Arguments * `from` is any map that conforms to the `Calendar.time` type. * `to` is any map that conforms to the `Calendar.time` type. `to` must occur on or after `from`. * `backend` is any module that includes `use Cldr` and is therefore `Cldr` backend module * `options` is a keyword list of options. The default is `[]`. ## Options * `:format` is one of `:short`, `:medium` or `:long` or a specific format type or a string representing of an interval format. The default is `:medium`. * `:style` supports dfferent formatting styles. The alternatives are `:time`, `:zone`, and `:flex`. The default is `:time`. * `locale` is any valid locale name returned by `Cldr.known_locale_names/0` or a `Cldr.LanguageTag` struct. The default is `Cldr.get_locale/0` * `number_system:` a number system into which the formatted date digits should be transliterated ## Returns * `{:ok, string}` or * `{:error, {exception, reason}}` ## Notes * For more information on interval format string see `Cldr.Interval`. * The available predefined formats that can be applied are the keys of the map returned by `Cldr.DateTime.Format.interval_formats("en", :gregorian)` where `"en"` can be replaced by any configured locale name and `:gregorian` is the underlying `CLDR` calendar type. * In the case where `from` and `to` are equal, a single time is formatted instead of an interval ## Examples iex> Cldr.Time.Interval.to_string ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, format: :short {:ok, "10 – 10"} iex> Cldr.Time.Interval.to_string ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, format: :medium {:ok, "10:00 – 10:03"} iex> Cldr.Time.Interval.to_string ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, format: :long {:ok, "10:00 – 10:03"} iex> Cldr.Time.Interval.to_string ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, ...> format: :long, style: :flex {:ok, "10:00 – 10:03 in the morning"} iex> Cldr.Time.Interval.to_string ~U[2020-01-01 00:00:00.0Z], ~U[2020-01-01 10:00:00.0Z], ...> MyApp.Cldr, format: :long, style: :flex {:ok, "12:00 – 10:00 in the morning"} iex> Cldr.Time.Interval.to_string ~U[2020-01-01 00:00:00.0Z], ~U[2020-01-01 10:00:00.0Z], ...> MyApp.Cldr, format: :long, style: :zone {:ok, "00:00 – 10:00 Etc/UTC"} iex> Cldr.Time.Interval.to_string ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, ...> format: :long, style: :flex, locale: "th" {:ok, "10:00 – 10:03 ในตอนเช้า"} """ def to_string(from, to, backend, options \\ []) def to_string(%{calendar: calendar} = from, %{calendar: calendar} = to, backend, options) when calendar == Calendar.ISO do from = %{from | calendar: Cldr.Calendar.Gregorian} to = %{to | calendar: Cldr.Calendar.Gregorian} to_string(from, to, backend, options) end def to_string(unquote(time()) = from, unquote(time()) = to, backend, options) do {locale, backend} = Cldr.locale_and_backend_from(options[:locale], backend) formatter = Module.concat(backend, DateTime.Formatter) format = Keyword.get(options, :format, @default_format) locale_number_system = Cldr.Number.System.number_system_from_locale(locale, backend) number_system = Keyword.get(options, :number_system, locale_number_system) options = options |> Keyword.put(:locale, locale) |> Keyword.put(:nunber_system, number_system) with {:ok, _} <- from_less_than_or_equal_to(from, to), {:ok, backend} <- Cldr.validate_backend(backend), {:ok, locale} <- Cldr.validate_locale(locale, backend), {:ok, _} <- Cldr.Number.validate_number_system(locale, number_system, backend), {:ok, calendar} <- Cldr.Calendar.validate_calendar(from.calendar), {:ok, formats} <- Format.interval_formats(locale, calendar.cldr_calendar_type, backend), {:ok, [left, right]} <- resolve_format(from, to, formats, options), {:ok, left_format} <- formatter.format(from, left, locale, options), {:ok, right_format} <- formatter.format(to, right, locale, options) do {:ok, left_format <> right_format} else {:error, :no_practical_difference} -> options = Cldr.DateTime.Interval.adjust_options(options, locale, format) Cldr.Time.to_string(from, backend, options) other -> other end end @doc false def to_string!(unquote(time()) = from, unquote(time()) = to) do {locale, backend} = Cldr.locale_and_backend_from(nil, nil) to_string!(from, to, backend, locale: locale) end @doc """ Returns a string representing the formatted interval formed by two times. ## Arguments * `from` is any map that conforms to the `Calendar.time` type. * `to` is any map that conforms to the `Calendar.time` type. `to` must occur on or after `from`. * `backend` is any module that includes `use Cldr` and is therefore `Cldr` backend module * `options` is a keyword list of options. The default is `[]`. ## Options * `:format` is one of `:short`, `:medium` or `:long` or a specific format type or a string representing of an interval format. The default is `:medium`. * `:style` supports dfferent formatting styles. The alternatives are `:time`, `:zone`, and `:flex`. The default is `:time`. * `locale` is any valid locale name returned by `Cldr.known_locale_names/0` or a `Cldr.LanguageTag` struct. The default is `Cldr.get_locale/0` * `number_system:` a number system into which the formatted date digits should be transliterated ## Returns * `string` or * raises an exception ## Notes * For more information on interval format string see `Cldr.Interval`. * The available predefined formats that can be applied are the keys of the map returned by `Cldr.DateTime.Format.interval_formats("en", :gregorian)` where `"en"` can be replaced by any configured locale name and `:gregorian` is the underlying `CLDR` calendar type. * In the case where `from` and `to` are equal, a single time is formatted instead of an interval ## Examples iex> Cldr.Time.Interval.to_string! ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, format: :short "10 – 10" iex> Cldr.Time.Interval.to_string! ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, format: :medium "10:00 – 10:03" iex> Cldr.Time.Interval.to_string! ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, format: :long "10:00 – 10:03" iex> Cldr.Time.Interval.to_string! ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, ...> format: :long, style: :flex "10:00 – 10:03 in the morning" iex> Cldr.Time.Interval.to_string! ~U[2020-01-01 00:00:00.0Z], ~U[2020-01-01 10:00:00.0Z], ...> MyApp.Cldr, format: :long, style: :flex "12:00 – 10:00 in the morning" iex> Cldr.Time.Interval.to_string! ~U[2020-01-01 00:00:00.0Z], ~U[2020-01-01 10:00:00.0Z], ...> MyApp.Cldr, format: :long, style: :zone "00:00 – 10:00 Etc/UTC" iex> Cldr.Time.Interval.to_string! ~T[10:00:00], ~T[10:03:00], MyApp.Cldr, ...> format: :long, style: :flex, locale: "th" "10:00 – 10:03 ในตอนเช้า" """ def to_string!(from, to, backend, options \\ []) do case to_string(from, to, backend, options) do {:ok, string} -> string {:error, {exception, reason}} -> raise exception, reason end end defp from_less_than_or_equal_to(from, to) do case Time.compare(from, to) do comp when comp in [:eq, :lt] -> {:ok, comp} _other -> {:error, Cldr.Date.Interval.datetime_order_error(from, to)} end end defp resolve_format(from, to, formats, options) do format = Keyword.get(options, :format, @default_format) style = Keyword.get(options, :style, @default_style) with {:ok, style} <- validate_style(style), {:ok, format} <- validate_format(formats, style, format), {:ok, greatest_difference} <- greatest_difference(from, to) do greatest_difference_format(format, greatest_difference) end end defp greatest_difference_format(format, _) when is_binary(format) do {:ok, format} end defp greatest_difference_format(format, :H) do case Map.fetch(format, :h) do :error -> {:error, format_error(format, format)} success -> success end end defp greatest_difference_format(format, :m = difference) do case Map.fetch(format, difference) do :error -> greatest_difference_format(format, :H) success -> success end end defp greatest_difference_format(_format, _difference) do {:error, :no_practical_difference} end defp validate_style(style) when style in @styles, do: {:ok, style} defp validate_style(style), do: {:error, style_error(style)} # Using standard format terms like :short, :medium, :long defp validate_format(formats, style, format) when format in @formats do format_key = styles() |> Map.fetch!(style) |> Map.fetch!(format) Map.fetch(formats, format_key) end # Direct specification of a format defp validate_format(formats, _style, format_key) when is_atom(format_key) do case Map.fetch(formats, format_key) do :error -> {:error, format_error(formats, format_key)} success -> success end end # Direct specification of a format as a string defp validate_format(_formats, _style, format) when is_binary(format) do Cldr.DateTime.Format.split_interval(format) end end
lib/cldr/interval/time.ex
0.918361
0.650301
time.ex
starcoder
defmodule Bella.Server.Reconciler do @moduledoc """ Continuously reconciles a set of kubernetes resources. `reconcile/1` will be executed asynchronously with each result returned from `reconcilable_resources/0`. `reconcilable_resources/0` has a default implementation of running `K8s.Client.stream/2` with `reconcile_operation/0`. For a working example of the `Reconciler` see `Bella.Server.Scheduler` ## Examples Print every pod. Not very useful, but a simple copy-paste example. defmodule PodPrinterReconciler do use Bella.Server.Reconciler, frequency: 15 @impl true def reconcile(pod) do IO.inspect(pod) :ok end @impl true def reconcile_operation(), do: K8s.Client.list("v1", :pods, namespace: "default") @impl true def reconcilable_resources() do operation = reconcile_operation() cluster = Bella.Config.cluster_name() K8s.Client.stream(operation, cluster) end end PodPrinterReconciler.start_link() A quick and dirty chaos monkey for pods. 20% chance of eviction every 15 seconds. defmodule ChaosMonkeyReconciler do use Bella.Server.Reconciler, frequency: 15 @percent_chance_evicted 20 @impl true def reconcile(pod) do chance = :rand.uniform(100) if chance < @percent_chance_evicted do my_function_to_evict_pod(pod) end :ok end @impl true def reconcile_operation(), do: K8s.Client.list("v1", :pods, namespace: :all) @impl true def reconcilable_resources() do operation = reconcile_operation() cluster = Bella.Config.cluster_name() K8s.Client.stream(operation, cluster) end end ChaosMonkeyReconciler.start_link() Reconcile a CRD's resources every 15 seconds defmodule MyCustomResourceReconciler do use Bella.Server.Reconciler, frequency: 15 @impl true def reconcile(resource) do # You should do something much cooler than inspect here... IO.inspect(resource) :ok end @impl true def reconcile_operation() do K8s.Client.list("example.com/v1", "MyCustomResourceDef", namespace: :all) end @impl true def reconcilable_resources() do operation = reconcile_operation() cluster = Bella.Config.cluster_name() K8s.Client.stream(operation, cluster) end end MyCustomResourceReconciler.start_link() """ @doc """ Reconciles a resource. This will receive a list of resources from `reconcilable_resources/0`. """ @callback reconcile(map()) :: :ok | {:ok, any()} | {:error, any()} @doc """ [`K8s.Operation`](https://hexdocs.pm/k8s/K8s.Operation.html) to reconcile. ## Examples ```elixir def reconcile_operation() do K8s.Client.list("v1", :pods, namespace: :all) end ``` """ @callback reconcile_operation() :: K8s.Operation.t() @doc """ (Optional) List of resources to be reconciled. Default implementation is to stream all resources (`reconcile_operation/0`) from the cluster (`Bella.Config.cluster_name/0`). """ @callback reconcilable_resources() :: {:ok, list(map())} | {:error, any()} defmacro __using__(opts) do quote bind_quoted: [opts: opts] do @behaviour Bella.Server.Reconciler use GenServer @frequency (opts[:frequency] || 30) * 1000 @initial_delay opts[:initial_delay] || 500 @client opts[:client] || K8s.Client def start_link, do: start_link([]) def start_link(opts), do: GenServer.start_link(__MODULE__, :ok, opts) @doc false @spec client() :: any() def client, do: @client @impl GenServer def init(:ok) do Bella.Sys.Event.reconciler_initialized(%{}, %{module: __MODULE__}) Bella.Server.Reconciler.schedule(self(), @initial_delay) {:ok, %{}} end @impl GenServer def handle_info(:run, state) do Bella.Server.Reconciler.run(__MODULE__) Bella.Server.Reconciler.schedule(self(), @frequency) {:noreply, state} end @impl GenServer def handle_info({:DOWN, _ref, :process, _pid, _reason}, state) do Bella.Sys.Event.reconciler_genserver_down(%{}, %{module: __MODULE__}) {:stop, :normal, state} end @impl GenServer def handle_info(_other, state) do {:noreply, state} end @impl Bella.Server.Reconciler def reconcilable_resources do operation = reconcile_operation() cluster = Bella.Config.cluster_name() @client.stream(operation, cluster) end defoverridable reconcilable_resources: 0 end end @doc """ Schedules a run of a started `Reconciler` """ @spec schedule(pid(), pos_integer()) :: reference() def schedule(pid, frequency) do Process.send_after(pid, :run, frequency) end @doc """ Runs a `Reconcilers` `reconcile/1` for each resource return by `reconcilable_resources/0` """ @spec run(module) :: no_return def run(module) do metadata = %{module: module} Bella.Sys.Event.reconciler_run_started(%{}, metadata) {measurements, result} = Bella.Sys.Event.measure(module, :reconcilable_resources, []) case result do {:ok, resources} -> Enum.each(resources, fn resource when is_map(resource) -> Bella.Sys.Event.reconciler_fetch_succeeded(measurements, metadata) reconcile_async(resource, module) {:error, error} -> metadata = Map.put(metadata, :error, error) Bella.Sys.Event.reconciler_fetch_failed(measurements, metadata) end) {:error, error} -> metadata = Map.put(metadata, :error, error) Bella.Sys.Event.reconciler_fetch_failed(measurements, metadata) end nil end @spec reconcile_async(map, module) :: no_return defp reconcile_async(resource, module) do Task.start(fn -> {measurements, result} = Bella.Sys.Event.measure(module, :reconcile, [resource]) metadata = %{ module: module, name: K8s.Resource.name(resource), namespace: K8s.Resource.namespace(resource), kind: K8s.Resource.kind(resource), api_version: resource["apiVersion"] } case result do :ok -> Bella.Sys.Event.reconciler_reconcile_succeeded(measurements, metadata) {:ok, _} -> Bella.Sys.Event.reconciler_reconcile_succeeded(measurements, metadata) {:error, error} -> metadata = Map.put(metadata, :error, error) Bella.Sys.Event.reconciler_reconcile_failed(measurements, metadata) end end) end end
lib/bella/server/reconciler.ex
0.89616
0.671636
reconciler.ex
starcoder
defmodule Calendar do @moduledoc """ This module defines the responsibilities for working with calendars, dates, times and datetimes in Elixir. Currently it defines types and the minimal implementation for a calendar behaviour in Elixir. The goal of the Calendar features in Elixir is to provide a base for interoperability instead of full-featured datetime API. For the actual date, time and datetime structures, see `Date`, `Time`, `NaiveDateTime` and `DateTime`. Note the year, month, day, etc. designations are overspecified (i.e. an integer instead of `1..12` for months) because different calendars may have a different number of days per month, months per year and so on. """ @type year :: integer @type month :: pos_integer @type day :: pos_integer @type week :: pos_integer @type day_of_week :: non_neg_integer @type era :: non_neg_integer @type hour :: non_neg_integer @type minute :: non_neg_integer @type second :: non_neg_integer @typedoc """ The internal time format is used when converting between calendars. It represents time as a fraction of a day (starting from midnight). `parts_in_day` specifies how much of the day is already passed, while `parts_per_day` signifies how many parts there fit in a day. """ @type day_fraction :: {parts_in_day :: non_neg_integer, parts_per_day :: pos_integer} @typedoc """ The internal date format that is used when converting between calendars. This is the number of days including the fractional part that has passed of the last day since 0000-01-01+00:00T00:00.000000 in ISO 8601 notation (also known as midnight 1 January BC 1 of the proleptic Gregorian calendar). """ @type iso_days :: {days :: integer, day_fraction} @typedoc """ Microseconds with stored precision. The precision represents the number of digits that must be used when representing the microseconds to external format. If the precision is 0, it means microseconds must be skipped. """ @type microsecond :: {0..999_999, 0..6} @typedoc "A calendar implementation" @type calendar :: module @typedoc "The time zone ID according to the IANA tz database (e.g. Europe/Zurich)" @type time_zone :: String.t() @typedoc "The time zone abbreviation (e.g. CET or CEST or BST etc.)" @type zone_abbr :: String.t() @typedoc "The time zone UTC offset in seconds" @type utc_offset :: integer @typedoc "The time zone standard offset in seconds (not zero in summer times)" @type std_offset :: integer @typedoc "Any map/struct that contains the date fields" @type date :: %{optional(any) => any, calendar: calendar, year: year, month: month, day: day} @typedoc "Any map/struct that contains the time fields" @type time :: %{ optional(any) => any, hour: hour, minute: minute, second: second, microsecond: microsecond } @typedoc "Any map/struct that contains the naive_datetime fields" @type naive_datetime :: %{ optional(any) => any, calendar: calendar, year: year, month: month, day: day, hour: hour, minute: minute, second: second, microsecond: microsecond } @typedoc "Any map/struct that contains the datetime fields" @type datetime :: %{ optional(any) => any, calendar: calendar, year: year, month: month, day: day, hour: hour, minute: minute, second: second, microsecond: microsecond, time_zone: time_zone, zone_abbr: zone_abbr, utc_offset: utc_offset, std_offset: std_offset } @typedoc """ Specifies the time zone database for calendar operations. Many functions in the `DateTime` module require a time zone database. By default, it uses the default time zone database returned by `Calendar.get_time_zone_database/0`, which defaults to `Calendar.UTCOnlyTimeZoneDatabase` which only handles "Etc/UTC" datetimes and returns `{:error, :utc_only_time_zone_database}` for any other time zone. Other time zone databases (including ones provided by packages) can be configure as default either via configuration: config :elixir, :time_zone_database, CustomTimeZoneDatabase or by calling `Calendar.put_time_zone_database/1`. See `Calendar.TimeZoneDatabase` for more information on custom time zone databases. """ @type time_zone_database :: module() @doc """ Returns how many days there are in the given year-month. """ @callback days_in_month(year, month) :: day @doc """ Returns how many months there are in the given year. """ @callback months_in_year(year) :: month @doc """ Returns `true` if the given year is a leap year. A leap year is a year of a longer length than normal. The exact meaning is up to the calendar. A calendar must return `false` if it does not support the concept of leap years. """ @callback leap_year?(year) :: boolean @doc """ Calculates the day of the week from the given `year`, `month`, and `day`. """ @callback day_of_week(year, month, day) :: day_of_week() @doc """ Calculates the day of the year from the given `year`, `month`, and `day`. """ @callback day_of_year(year, month, day) :: non_neg_integer() @doc """ Calculates the quarter of the year from the given `year`, `month`, and `day`. """ @callback quarter_of_year(year, month, day) :: non_neg_integer() @doc """ Calculates the year and era from the given `year`. """ @callback year_of_era(year) :: {year, era} @doc """ Calculates the day and era from the given `year`, `month`, and `day`. """ @callback day_of_era(year, month, day) :: {non_neg_integer(), era} @doc """ Converts the date into a string according to the calendar. """ @callback date_to_string(year, month, day) :: String.t() @doc """ Converts the datetime (without time zone) into a string according to the calendar. """ @callback naive_datetime_to_string(year, month, day, hour, minute, second, microsecond) :: String.t() @doc """ Converts the datetime (with time zone) into a string according to the calendar. """ @callback datetime_to_string( year, month, day, hour, minute, second, microsecond, time_zone, zone_abbr, utc_offset, std_offset ) :: String.t() @doc """ Converts the time into a string according to the calendar. """ @callback time_to_string(hour, minute, second, microsecond) :: String.t() @doc """ Converts the given datetime (without time zone) into the `t:iso_days/0` format. """ @callback naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond) :: iso_days @doc """ Converts `t:iso_days/0` to the Calendar's datetime format. """ @callback naive_datetime_from_iso_days(iso_days) :: {year, month, day, hour, minute, second, microsecond} @doc """ Converts the given time to the `t:day_fraction/0` format. """ @callback time_to_day_fraction(hour, minute, second, microsecond) :: day_fraction @doc """ Converts `t:day_fraction/0` to the Calendar's time format. """ @callback time_from_day_fraction(day_fraction) :: {hour, minute, second, microsecond} @doc """ Define the rollover moment for the given calendar. This is the moment, in your calendar, when the current day ends and the next day starts. The result of this function is used to check if two calendars rollover at the same time of day. If they do not, we can only convert datetimes and times between them. If they do, this means that we can also convert dates as well as naive datetimes between them. This day fraction should be in its most simplified form possible, to make comparisons fast. ## Examples * If, in your Calendar, a new day starts at midnight, return {0, 1}. * If, in your Calendar, a new day starts at sunrise, return {1, 4}. * If, in your Calendar, a new day starts at noon, return {1, 2}. * If, in your Calendar, a new day starts at sunset, return {3, 4}. """ @callback day_rollover_relative_to_midnight_utc() :: day_fraction @doc """ Should return `true` if the given date describes a proper date in the calendar. """ @callback valid_date?(year, month, day) :: boolean @doc """ Should return `true` if the given time describes a proper time in the calendar. """ @callback valid_time?(hour, minute, second, microsecond) :: boolean @doc """ Implements inspect for a date. """ @callback inspect_date(year, month, day, Inspect.Opts.t()) :: Inspect.Algebra.t() @doc """ Implements inspect for a time. """ @callback inspect_time(hour, minute, second, microsecond, Inspect.Opts.t()) :: Inspect.Algebra.t() @doc """ Implements inspect for a naive datetime. """ @callback inspect_naive_datetime( year, month, day, hour, minute, second, microsecond, Inspect.Opts.t() ) :: Inspect.Algebra.t() @doc """ Implements inspect for a datetime. """ @callback inspect_datetime( year, month, day, hour, minute, second, microsecond, time_zone, zone_abbr, utc_offset, std_offset, Inspect.Opts.t() ) :: Inspect.Algebra.t() # General Helpers @doc """ Returns `true` if two calendars have the same moment of starting a new day, `false` otherwise. If two calendars are not compatible, we can only convert datetimes and times between them. If they are compatible, this means that we can also convert dates as well as naive datetimes between them. """ @doc since: "1.5.0" @spec compatible_calendars?(Calendar.calendar(), Calendar.calendar()) :: boolean def compatible_calendars?(calendar, calendar), do: true def compatible_calendars?(calendar1, calendar2) do calendar1.day_rollover_relative_to_midnight_utc() == calendar2.day_rollover_relative_to_midnight_utc() end @doc """ Returns a microsecond tuple truncated to a given precision (`:microsecond`, `:millisecond` or `:second`). """ @doc since: "1.6.0" @spec truncate(Calendar.microsecond(), :microsecond | :millisecond | :second) :: Calendar.microsecond() def truncate(microsecond_tuple, :microsecond), do: microsecond_tuple def truncate({microsecond, precision}, :millisecond) do output_precision = min(precision, 3) {div(microsecond, 1000) * 1000, output_precision} end def truncate(_, :second), do: {0, 0} @doc """ Sets the current time zone database. """ @doc since: "1.8.0" @spec put_time_zone_database(time_zone_database()) :: :ok def put_time_zone_database(database) do Application.put_env(:elixir, :time_zone_database, database) end @doc """ Gets the current time zone database. """ @doc since: "1.8.0" @spec get_time_zone_database() :: time_zone_database() def get_time_zone_database() do Application.get_env(:elixir, :time_zone_database, Calendar.UTCOnlyTimeZoneDatabase) end end
lib/elixir/lib/calendar.ex
0.936125
0.74375
calendar.ex
starcoder
defmodule ExCoveralls.Stats do @moduledoc """ Provide calculation logics of coverage stats. """ alias ExCoveralls.Cover alias ExCoveralls.Settings defmodule Source do @moduledoc """ Stores count information for a file and all source lines. """ defstruct filename: "", coverage: 0, sloc: 0, hits: 0, misses: 0, source: [] end defmodule Line do @moduledoc """ Stores count information and source for a sigle line. """ defstruct coverage: nil, source: "" end @doc """ Report the statistical information for he specified module. """ def report(modules) do calculate_stats(modules) |> generate_coverage |> generate_source_info |> skip_files |> ExCoveralls.StopWords.filter end @doc """ Calculate the statistical information for the specified list of modules. It uses :cover.analyse for getting the information. """ def calculate_stats(modules) do Enum.reduce(modules, Map.new, fn(module, dict) -> {:ok, lines} = Cover.analyze(module) analyze_lines(lines, dict) end) end defp analyze_lines(lines, module_hash) do Enum.reduce(lines, module_hash, fn({{module, line}, count}, module_hash) -> add_counts(module_hash, module, line, count) end) end defp add_counts(module_hash, module, line, count) do path = Cover.module_path(module) count_hash = Map.get(module_hash, path, Map.new) Map.put(module_hash, path, Map.put(count_hash, line, max(Map.get(count_hash, line, 0), count))) end @doc """ Generate coverage, based on the pre-calculated statistic information. """ def generate_coverage(hash) do keys = Map.keys(hash) Enum.map(keys, fn(file_path) -> total = get_source_line_count(file_path) {file_path, do_generate_coverage(Map.fetch!(hash, file_path), total, [])} end) end defp do_generate_coverage(_hash, 0, acc), do: acc defp do_generate_coverage(hash, index, acc) do count = Map.get(hash, index, nil) do_generate_coverage(hash, index - 1, [count | acc]) end @doc """ Generate objects which stores source-file and coverage stats information. """ def generate_source_info(coverage) do Enum.map(coverage, fn({file_path, stats}) -> [ name: file_path, source: read_source(file_path), coverage: stats ] end) end @doc """ Append the name of the sub app to the source info stats. """ def append_sub_app_name(stats, sub_app_name, _apps_path) do Enum.map(stats, fn([{:name, name}, {:source, source}, {:coverage, coverage}]) -> [{:name, "#{sub_app_name}/#{name}"}, {:source, source}, {:coverage, coverage}] end) end @doc """ Returns total line counts of the specified source file. """ def get_source_line_count(file_path) do read_source(file_path) |> count_lines end defp count_lines(string) do 1 + Enum.count(string_to_charlist(string), fn(x) -> x == ?\n end) end @doc """ Returns the source file of the specified module. """ def read_module_source(module) do Cover.module_path(module) |> read_source end @doc """ Wrapper for reading the specified file. """ def read_source(file_path) do ExCoveralls.PathReader.expand_path(file_path) |> File.read! |> trim_empty_prefix_and_suffix end def trim_empty_prefix_and_suffix(string) do string = Regex.replace(~r/\n\z/m, string, "") Regex.replace(~r/\A\n/m, string, "") end def skip_files(converage) do skip = Settings.get_skip_files Enum.reject(converage, fn cov -> Enum.any?(skip, &Regex.match?(&1, cov[:name])) end) end @doc """ Summarizes source coverage details. """ def source(stats, _patterns = nil), do: source(stats) def source(stats, _patterns = []), do: source(stats) def source(stats, patterns) do Enum.filter(stats, fn(stat) -> String.contains?(stat[:name], patterns) end) |> source end def source(stats) do stats = Enum.sort(stats, fn(x, y) -> x[:name] <= y[:name] end) stats |> transform_cov end defp transform_cov(stats) do files = Enum.map(stats, &populate_file/1) {relevant, hits, misses} = Enum.reduce(files, {0,0,0}, &reduce_file_counts/2) covered = relevant - misses %{coverage: get_coverage(relevant, covered), sloc: relevant, hits: hits, misses: misses, files: files} end defp populate_file(stat) do coverage = stat[:coverage] source = map_source(stat[:source], coverage) relevant = Enum.count(coverage, fn e -> e != nil end) hits = Enum.reduce(coverage, 0, fn e, acc -> (e || 0) + acc end) misses = Enum.count(coverage, fn e -> e == 0 end) covered = relevant - misses %Source{filename: stat[:name], coverage: get_coverage(relevant, covered), sloc: relevant, hits: hits, misses: misses, source: source} end defp reduce_file_counts(%{sloc: sloc, hits: hits, misses: misses}, {s,h,m}) do {s+sloc, h+hits, m+misses} end defp get_coverage(relevant, covered) do value = case relevant do 0 -> Settings.default_coverage_value _ -> (covered / relevant) * 100 end if value == trunc(value) do trunc(value) else Float.round(value, 1) end end defp map_source(source, coverage) do source |> String.split("\n") |> Enum.with_index() |> Enum.map(&(populate_source(&1,coverage))) end defp populate_source({line, i}, coverage) do %Line{coverage: Enum.at(coverage, i) , source: line} end @doc """ Exit the process with a status of 1 if coverage is below the minimum. """ def ensure_minimum_coverage(stats) do coverage_options = ExCoveralls.Settings.get_coverage_options minimum_coverage = coverage_options["minimum_coverage"] || 0 if minimum_coverage > 0, do: check_coverage_threshold(stats, minimum_coverage) end defp check_coverage_threshold(stats, minimum_coverage) do result = source(stats) if result.coverage < minimum_coverage do message = "FAILED: Expected minimum coverage of #{minimum_coverage}%, got #{result.coverage}%." IO.puts IO.ANSI.format([:red, :bright, message]) exit({:shutdown, 1}) end end if Version.compare(System.version, "1.3.0") == :lt do defp string_to_charlist(string), do: String.to_char_list(string) else defp string_to_charlist(string), do: String.to_charlist(string) end end
lib/excoveralls/stats.ex
0.797714
0.462473
stats.ex
starcoder
defmodule Yggdrasil.Redis.Application do @moduledoc """ [![Build Status](https://travis-ci.org/gmtprime/yggdrasil_redis.svg?branch=master)](https://travis-ci.org/gmtprime/yggdrasil_redis) [![Hex pm](http://img.shields.io/hexpm/v/yggdrasil_redis.svg?style=flat)](https://hex.pm/packages/yggdrasil_redis) [![hex.pm downloads](https://img.shields.io/hexpm/dt/yggdrasil_redis.svg?style=flat)](https://hex.pm/packages/yggdrasil_redis) This project is a Redis adapter for `Yggdrasil` publisher/subscriber. ## Small example The following example uses Redis adapter to distribute messages: ```elixir iex(1)> channel = %Yggdrasil.Channel{name: "some_channel", adapter: :redis} iex(2)> Yggdrasil.subscribe(channel) iex(3)> flush() {:Y_CONNECTED, %Yggdrasil.Channel{(...)}} ``` and to publish a message for the subscribers: ```elixir iex(4)> Yggdrasil.publish(channel, "message") iex(5)> flush() {:Y_EVENT, %Yggdrasil.Channel{(...)}, "message"} ``` When the subscriber wants to stop receiving messages, then it can unsubscribe from the channel: ```elixir iex(6)> Yggdrasil.unsubscribe(channel) iex(7)> flush() {:Y_DISCONNECTED, %Yggdrasil.Channel{(...)}} ``` ## Redis adapter The Redis adapter has the following rules: * The `adapter` name is identified by the atom `:redis`. * The channel `name` must be a string. * The `transformer` must encode to a string. From the `transformer`s provided it defaults to `:default`, but `:json` can also be used. * Any `backend` can be used (by default is `:default`). The following is an example of a valid channel for both publishers and subscribers: ```elixir %Yggdrasil.Channel{ name: "redis_channel_name", adapter: :redis, transformer: :json } ``` It will expect valid JSONs from Redis and it will write valid JSONs in Redis. ## Redis configuration Uses the list of options for `Redix`, but the more relevant optuons are shown below: * `hostname` - Redis hostname (defaults to `"localhost"`). * `port` - Redis port (defaults to `6379`). * `password` - Redis password (defaults to `""`). The following shows a configuration with and without namespace: ```elixir # Without namespace config :yggdrasil, redis: [hostname: "redis.zero"] # With namespace config :yggdrasil, RedisOne, redis: [ hostname: "redis.one", port: 1234 ] ``` Also the options can be provided as OS environment variables. The available variables are: * `YGGDRASIL_REDIS_HOSTNAME` or `<NAMESPACE>_YGGDRASIL_REDIS_HOSTNAME`. * `YGGDRASIL_REDIS_PORT` or `<NAMESPACE>_YGGDRASIL_REDIS_PORT`. * `YGGDRASIL_REDIS_PASSWORD` or `<NAMESPACE>_YGGDRASIL_REDIS_PASSWORD`. * `YGGDRASIL_REDIS_DATABASE` or `<NAMESPACE>_YGGDRASIL_REDIS_DATABASE`. where `<NAMESPACE>` is the snakecase of the namespace chosen e.g. for the namespace `RedisTwo`, you would use `REDIS_TWO` as namespace in the OS environment variable. ## Installation Using this Redis adapter with `Yggdrasil` is a matter of adding the available hex package to your `mix.exs` file e.g: ```elixir def deps do [{:yggdrasil_redis, "~> 4.1"}] end ``` """ use Application @impl true def start(_type, _args) do children = [ Supervisor.child_spec({Yggdrasil.Adapter.Redis, []}, []) ] opts = [strategy: :one_for_one, name: Yggdrasil.Redis.Supervisor] Supervisor.start_link(children, opts) end end
lib/yggdrasil/redis/application.ex
0.901567
0.969785
application.ex
starcoder
defmodule SanbaseWeb.Graphql.ProjectTypes do use Absinthe.Schema.Notation import Absinthe.Resolution.Helpers import SanbaseWeb.Graphql.Cache, only: [cache_resolve: 1, cache_resolve: 2] alias SanbaseWeb.Graphql.Resolvers.{ ClickhouseResolver, ProjectResolver, ProjectSignalsResolver, ProjectMetricsResolver, ProjectBalanceResolver, ProjectTransfersResolver, IcoResolver, TwitterResolver } alias Sanbase.Model.Project alias SanbaseWeb.Graphql.SanbaseRepo alias SanbaseWeb.Graphql.Complexity alias SanbaseWeb.Graphql.Middlewares.AccessControl enum :operator_name do value(:less_than) value(:greater_than) value(:greater_than_or_equal_to) value(:less_than_or_equal_to) value(:inside_channel) value(:inside_channel_inclusive) value(:inside_channel_exclusive) value(:outside_channel_inclusive) value(:outside_channel_exclusive) end enum :direction_type do value(:asc) value(:desc) end enum :filters_combinator do value(:and) value(:or) end input_object :project_pagination_input_object do field(:page, non_null(:integer)) field(:page_size, non_null(:integer)) end input_object :project_filter_input_object do field(:name, :string) field(:args, :json) field(:metric, :string) field(:from, :datetime) field(:to, :datetime) field(:dynamic_from, :interval_or_now) field(:dynamic_to, :interval_or_now) field(:aggregation, :aggregation, default_value: nil) field(:operator, :operator_name) field(:threshold, :float) end input_object :project_order_input_object do field(:metric, non_null(:string)) field(:from, non_null(:datetime)) field(:to, non_null(:datetime)) field(:aggregation, :aggregation, default_value: nil) field(:direction, non_null(:direction_type)) end input_object :base_projects_input_object do field(:watchlist_id, :integer) field(:watchlist_slug, :string) field(:slugs, list_of(:string)) end input_object :projects_selector_input_object do field(:base_projects, list_of(:base_projects_input_object)) field(:filters, list_of(:project_filter_input_object)) field(:filters_combinator, :filters_combinator, default_value: :and) field(:order_by, :project_order_input_object) field(:pagination, :project_pagination_input_object) end input_object :aggregated_timeseries_data_selector_input_object do field(:label, :string) field(:labels, list_of(:string)) field(:owner, :string) field(:owners, list_of(:string)) field(:holders_count, :integer) field(:source, :string) end object :metric_anomalies do field(:metric, :string) field(:anomalies, list_of(:string)) end object :project_tag do field(:name, non_null(:string)) field(:type, :string) end object :projects_object_stats do field(:projects_count, non_null(:integer)) end object :projects_object do field(:projects, list_of(:project)) field(:stats, :projects_object_stats) end # Includes all available fields @desc ~s""" A type fully describing a project. """ object :project do @desc ~s""" Returns a list of available signals. Every one of the signals in the list can be passed as the `metric` argument of the `getMetric` query. For example, any of of the signals from the query: ``` { projectBySlug(slug: "ethereum"){ availableSignals } } ``` can be used like this: ``` { getSignal(signal: "<signal>"){ timeseriesData( slug: "ethereum" from: "2019-01-01T00:00:00Z" to: "2019-02-01T00:00:00Z" interval: "1d"){ datetime value } } ``` """ field :available_signals, list_of(:string) do cache_resolve(&ProjectSignalsResolver.available_signals/3, ttl: 600) end @desc ~s""" Returns a list of available metrics. Every one of the metrics in the list can be passed as the `metric` argument of the `getMetric` query. For example, any of of the metrics from the query: ``` { projectBySlug(slug: "ethereum"){ availableMetrics } } ``` can be used like this: ``` { getMetric(metric: "<metric>"){ timeseriesData( slug: "ethereum" from: "2019-01-01T00:00:00Z" to: "2019-02-01T00:00:00Z" interval: "1d"){ datetime value } } } ``` or ``` { getMetric(metric: "<metric>"){ histogramData( slug: "ethereum" from: "2019-01-01T00:00:00Z" to: "2019-02-01T00:00:00Z" interval: "1d" limit: 50){ datetime value } } } ``` The breakdown of the metrics into those fetchable by `timeseriesData` and `histogramData` is fetchable by the following fields: ``` { projectBySlug(slug: "ethereum"){ availableTimeseriesMetrics availableHistogramMetrics } } ``` """ field :available_metrics, list_of(:string) do cache_resolve(&ProjectMetricsResolver.available_metrics/3, ttl: 1200) end @desc ~s""" Returns a subset of the availableMetrics that are fetchable by getMetric's timeseriesData ``` { getMetric(metric: "<metric>"){ timeseriesData( slug: "ethereum" from: "2019-01-01T00:00:00Z" to: "2019-02-01T00:00:00Z" interval: "1d"){ datetime value } } } ``` """ field :available_timeseries_metrics, list_of(:string) do cache_resolve(&ProjectMetricsResolver.available_timeseries_metrics/3, ttl: 1200) end @desc ~s""" Returns a subset of the availableMetrics that are fetchable by getMetric's histogramDAta ``` { getMetric(metric: "<metric>"){ histogramData( slug: "ethereum" from: "2019-01-01T00:00:00Z" to: "2019-02-01T00:00:00Z" interval: "1d" limit: 50){ datetime value } } } ``` """ field :available_histogram_metrics, list_of(:string) do cache_resolve(&ProjectMetricsResolver.available_histogram_metrics/3, ttl: 1200) end field :available_table_metrics, list_of(:string) do cache_resolve(&ProjectMetricsResolver.available_table_metrics/3, ttl: 1200) end field :traded_on_exchanges, list_of(:string) do cache_resolve(&ProjectResolver.traded_on_exchanges/3) end field :traded_on_exchanges_count, :integer do cache_resolve(&ProjectResolver.traded_on_exchanges_count/3) end @desc ~s""" Returns a list of GraphQL queries that have data for the given slug. For example, any of the queries returned from the query: ``` { projectBySlug(slug: "ethereum"){ availableQueries } } ``` can be executed with "ethereum" slug as parameter and it will have data. `devActivity` query will be part of the result if that project has a known github link. So the following query will have data: ``` { devActivity( slug: "ethereum" from: "2019-01-01T00:00:00Z" to: "2019-02-01T00:00:00Z" interval: "1d"){ datetime activity } } ``` """ field :available_queries, list_of(:string) do cache_resolve(&ProjectResolver.available_queries/3, ttl: 1800) end field :aggregated_timeseries_data, :float do arg(:selector, :aggregated_timeseries_data_selector_input_object) arg(:metric, non_null(:string)) arg(:from, non_null(:datetime)) arg(:to, non_null(:datetime)) arg(:aggregation, :aggregation, default_value: nil) arg(:include_incomplete_data, :boolean, default_value: false) arg(:caching_params, :caching_params_input_object) complexity(&Complexity.from_to_interval/3) middleware(AccessControl) cache_resolve(&ProjectMetricsResolver.aggregated_timeseries_data/3, ttl: 120, max_ttl_offset: 60 ) end field(:id, non_null(:id)) field(:name, non_null(:string)) field(:slug, :string) field(:ticker, :string) field(:logo_url, :string) field(:dark_logo_url, :string) field(:website_link, :string) field(:email, :string) field(:btt_link, :string) field(:facebook_link, :string) field(:github_link, :string) field(:reddit_link, :string) field(:twitter_link, :string) field(:whitepaper_link, :string) field(:blog_link, :string) field(:telegram_chat_id, :integer) field(:slack_link, :string) field(:discord_link, :string) field(:linkedin_link, :string) field(:telegram_link, :string) field(:token_address, :string) field(:team_token_wallet, :string) field(:description, :string) field(:long_description, :string) field(:token_decimals, :integer) field :main_contract_address, :string do cache_resolve( dataloader(SanbaseRepo, :contract_addresses, callback: fn contract_addresses, _project, _args -> case contract_addresses do [_ | _] -> main = Project.ContractAddress.list_to_main_contract_address(contract_addresses) {:ok, main.address} _ -> {:ok, nil} end end ), fun_name: :project_main_contract_address ) end field :contract_addresses, list_of(:contract_address) do cache_resolve( dataloader(SanbaseRepo), fun_name: :project_contract_addresses ) end field :eth_addresses, list_of(:eth_address) do cache_resolve( dataloader(SanbaseRepo), fun_name: :eth_addresses_resolver_fun ) end field :social_volume_query, :string do cache_resolve( dataloader(SanbaseRepo, :social_volume_query, callback: fn nil, project, _args -> {:ok, Project.SocialVolumeQuery.default_query(project)} svq, _project, _args -> case svq.query do query when query in [nil, ""] -> {:ok, svq.autogenerated_query} _ -> {:ok, svq.query} end end ), fun_name: :social_volume_query ) end field :source_slug_mappings, list_of(:source_slug_mapping) do cache_resolve( dataloader(SanbaseRepo, :source_slug_mappings, callback: fn query, _project, _args -> {:ok, query} end ), fun_name: :source_slug_mappings ) end field :market_segment, :string do # Introduce a different function name so it does not share cache with the # :market_segments as they query the same data cache_resolve( dataloader(SanbaseRepo, :market_segments, callback: fn query, _project, _args -> {:ok, query |> Enum.map(& &1.name) |> List.first()} end ), fun_name: :market_segment ) end field :market_segments, list_of(:string) do cache_resolve( dataloader(SanbaseRepo, :market_segments, callback: fn query, _project, _args -> {:ok, query |> Enum.map(& &1.name)} end ), fun_name: :market_segments ) end field :tags, list_of(:project_tag) do cache_resolve( dataloader(SanbaseRepo, :market_segments, callback: fn query, _project, _args -> {:ok, query} end, fun_name: :project_market_segment_tags ) ) end field :is_trending, :boolean do cache_resolve(&ProjectResolver.is_trending/3) end field :github_links, list_of(:string) do cache_resolve(&ProjectResolver.github_links/3) end field :related_posts, list_of(:post) do cache_resolve(&ProjectResolver.related_posts/3) end field :infrastructure, :string do cache_resolve(&ProjectResolver.infrastructure/3) end field :eth_balance, :float do cache_resolve(&ProjectBalanceResolver.eth_balance/3) end field :btc_balance, :float do deprecate("The field btc_balance is deprecated") cache_resolve(&ProjectBalanceResolver.btc_balance/3) end field :usd_balance, :float do cache_resolve(&ProjectBalanceResolver.usd_balance/3) end field :funds_raised_icos, list_of(:currency_amount) do cache_resolve(&ProjectResolver.funds_raised_icos/3, ttl: 600, max_ttl_offset: 600 ) end field :roi_usd, :decimal do cache_resolve(&ProjectResolver.roi_usd/3) end field :coinmarketcap_id, :string do resolve(fn %Project{slug: slug}, _, _ -> {:ok, slug} end) end field :symbol, :string do resolve(&ProjectResolver.symbol/3) end field :rank, :integer do resolve(&ProjectResolver.rank/3) end field :price_usd, :float do resolve(&ProjectResolver.price_usd/3) end field :price_btc, :float do resolve(&ProjectResolver.price_btc/3) end field :price_eth, :float do resolve(&ProjectResolver.price_eth/3) end field :volume_usd, :float do resolve(&ProjectResolver.volume_usd/3) end field :volume_change24h, :float do cache_resolve(&ProjectResolver.volume_change_24h/3, max_ttl_offset: 600 ) end field :average_dev_activity, :float do description("Average dev activity for the last `days` days") arg(:days, :integer, default_value: 30) cache_resolve(&ProjectResolver.average_dev_activity/3, ttl: 600, max_ttl_offset: 600 ) end field :average_github_activity, :float do description("Average github activity for the last `days` days") arg(:days, :integer, default_value: 30) cache_resolve(&ProjectResolver.average_github_activity/3, ttl: 600, max_ttl_offset: 600 ) end field :twitter_data, :twitter_data do cache_resolve(&TwitterResolver.twitter_data/3, ttl: 600, max_ttl_offset: 600 ) end field :marketcap_usd, :float do resolve(&ProjectResolver.marketcap_usd/3) end field :available_supply, :decimal do resolve(&ProjectResolver.available_supply/3) end field :total_supply, :decimal do resolve(&ProjectResolver.total_supply/3) end field :percent_change1h, :decimal do resolve(&ProjectResolver.percent_change_1h/3) end field :percent_change24h, :decimal do resolve(&ProjectResolver.percent_change_24h/3) end field :percent_change7d, :decimal do resolve(&ProjectResolver.percent_change_7d/3) end field :funds_raised_usd_ico_end_price, :float do cache_resolve(&ProjectResolver.funds_raised_usd_ico_end_price/3, ttl: 600, max_ttl_offset: 600 ) end field :funds_raised_eth_ico_end_price, :float do cache_resolve(&ProjectResolver.funds_raised_eth_ico_end_price/3, ttl: 600, max_ttl_offset: 600 ) end field :funds_raised_btc_ico_end_price, :float do cache_resolve(&ProjectResolver.funds_raised_btc_ico_end_price/3, ttl: 600, max_ttl_offset: 600 ) end field :initial_ico, :ico do cache_resolve(&ProjectResolver.initial_ico/3, ttl: 600, max_ttl_offset: 600) end field(:icos, list_of(:ico), resolve: dataloader(SanbaseRepo)) field :ico_price, :float do cache_resolve(&ProjectResolver.ico_price/3) end field :price_to_book_ratio, :float do deprecate("The field price_to_book_ratio is deprecated") cache_resolve(&ProjectResolver.price_to_book_ratio/3) end @desc "Total ETH spent from the project's team wallets for the last `days`" field :eth_spent, :float do arg(:days, :integer, default_value: 30) cache_resolve(&ProjectTransfersResolver.eth_spent/3, ttl: 600, max_ttl_offset: 240 ) end @desc "ETH spent for each `interval` from the project's team wallet and time period" field :eth_spent_over_time, list_of(:eth_spent_data) do arg(:from, non_null(:datetime)) arg(:to, non_null(:datetime)) arg(:interval, :interval, default_value: "1d") complexity(&Complexity.from_to_interval/3) cache_resolve(&ProjectTransfersResolver.eth_spent_over_time/3, ttl: 600, max_ttl_offset: 240 ) end @desc "Top ETH transactions for project's team wallets" field :eth_top_transactions, list_of(:transaction) do arg(:from, non_null(:datetime)) arg(:to, non_null(:datetime)) arg(:transaction_type, :transaction_type, default_value: :all) arg(:limit, :integer, default_value: 10) complexity(&Complexity.from_to_interval/3) cache_resolve(&ProjectTransfersResolver.eth_top_transfers/3) end @desc "Top transactions for the token of a given project" field :token_top_transactions, list_of(:transaction) do arg(:from, non_null(:datetime)) arg(:to, non_null(:datetime)) arg(:limit, :integer, default_value: 10) arg(:excluded_addresses, list_of(:string)) complexity(&Complexity.from_to_interval/3) cache_resolve(&ProjectTransfersResolver.token_top_transfers/3) end @desc "Average daily active addresses for a ERC20 project or Ethereum and given time period" field :average_daily_active_addresses, :float do arg(:from, :datetime) arg(:to, :datetime) cache_resolve(&ClickhouseResolver.average_daily_active_addresses/3, ttl: 600, max_ttl_offset: 600 ) end end object :contract_address do field(:address, non_null(:string)) field(:decimals, :integer) field(:label, :string) field(:description, :string) field(:inserted_at, :datetime) field(:updated_at, :datetime) end object :source_slug_mapping do field(:source, non_null(:string)) field(:slug, non_null(:string)) end object :eth_address do field(:address, non_null(:string)) field :balance, :float do cache_resolve(&ProjectBalanceResolver.eth_address_balance/3) end end object :ico do field(:id, non_null(:id)) field(:start_date, :date) field(:end_date, :date) field(:token_usd_ico_price, :decimal) field(:token_eth_ico_price, :decimal) field(:token_btc_ico_price, :decimal) field(:tokens_issued_at_ico, :decimal) field(:tokens_sold_at_ico, :decimal) field :funds_raised_usd_ico_end_price, :float do resolve(&IcoResolver.funds_raised_usd_ico_end_price/3) end field :funds_raised_eth_ico_end_price, :float do resolve(&IcoResolver.funds_raised_eth_ico_end_price/3) end field :funds_raised_btc_ico_end_price, :float do resolve(&IcoResolver.funds_raised_btc_ico_end_price/3) end field(:minimal_cap_amount, :decimal) field(:maximal_cap_amount, :decimal) field(:contract_block_number, :integer) field(:contract_abi, :string) field(:comments, :string) field :cap_currency, :string do resolve(&IcoResolver.cap_currency/3) end field :funds_raised, list_of(:currency_amount) do resolve(&IcoResolver.funds_raised/3) end end object :ico_with_eth_contract_info do field(:id, non_null(:id)) field(:start_date, :date) field(:end_date, :date) field(:main_contract_address, :string) field(:contract_block_number, :integer) field(:contract_abi, :string) end object :currency_amount do field(:currency_code, :string) field(:amount, :decimal) end object :eth_spent_data do field(:datetime, non_null(:datetime)) field(:eth_spent, :float) end object :projects_count do field(:erc20_projects_count, non_null(:integer)) field(:currency_projects_count, non_null(:integer)) field(:projects_count, non_null(:integer)) end end
lib/sanbase_web/graphql/schema/types/project_types.ex
0.777258
0.505554
project_types.ex
starcoder
defmodule Mix.Tasks.Xref do use Mix.Task import Mix.Compilers.Elixir, only: [read_manifest: 1, source: 0, source: 1, source: 2, module: 1] @shortdoc "Prints cross reference information" @recursive true @manifest "compile.elixir" @moduledoc """ Prints cross reference information between modules. The `xref` task expects a mode as first argument: mix xref MODE All available modes are discussed below. This task is automatically reenabled, so you can print information multiple times in the same Mix invocation. ## mix xref callers CALLEE Prints all callers of the given `MODULE`. Example: mix xref callers MyMod ## mix xref graph Prints a file dependency graph where an edge from `A` to `B` indicates that `A` (source) depends on `B` (sink). mix xref graph --format stats The following options are accepted: * `--exclude` - paths to exclude * `--label` - only shows relationships with the given label. By default, it keeps all labels that are transitive. The labels are "compile", "export" and "runtime" and there are two additional label modifiers "compile-direct" and "compile-connected". See "Dependencies types" section below. * `--only-nodes` - only shows the node names (no edges). Generally useful with the `--sink` flag * `--source` - displays all files that the given source file references (directly or indirectly) * `--sink` - displays all files that reference the given file (directly or indirectly) * `--min-cycle-size` - controls the minimum cycle size on formats like `stats` and `cycles` * `--format` - can be set to one of: * `pretty` - prints the graph to the terminal using Unicode characters. Each prints each file followed by the files it depends on. This is the default except on Windows; * `plain` - the same as pretty except ASCII characters are used instead of Unicode characters. This is the default on Windows; * `stats` - prints general statistics about the graph; * `cycles` - prints all cycles in the graph; * `dot` - produces a DOT graph description in `xref_graph.dot` in the current directory. Warning: this will override any previously generated file The `--source` and `--sink` options are particularly useful when trying to understand how the modules in a particular file interact with the whole system. You can combine those options with `--label` and `--only-nodes` to get all files that exhibit a certain property, for example: # To get all files and their direct compile time dependencies mix xref graph --label compile-direct # To get the tree that depend on lib/foo.ex at compile time mix xref graph --label compile --sink lib/foo.ex # To get all files that depend on lib/foo.ex at compile time mix xref graph --label compile --sink lib/foo.ex --only-nodes # To get all paths between two files mix xref graph --source lib/foo.ex --sink lib/bar.ex # To show general statistics about the graph mix xref graph --format stats # To limit statistics only to certain labels mix xref graph --format stats --label compile ### Understanding the printed graph When `mix xref graph` runs, it will print a tree of the following format. Imagine the following code: # lib/a.ex defmodule A do IO.puts B.hello() end # lib/b.ex defmodule B do def hello, do: C.world() end # lib/c.ex defmodule C do def world, do: "hello world" end It will print: $ mix xref graph lib/a.ex `-- lib/b.ex (compile) `-- lib/c.ex This tree means that `lib/a.ex` depends on `lib/b.ex` at compile time which then depends on `lib/c.ex` at runtime. This is often problematic because if `lib/c.ex` changes, `lib/a.ex` also has to recompile due to this indirect compile time dependency. The flags `--source` or `--sink` filters the graph but does not ultimately change how you read it. For example, if we use the `--sink lib/c.ex` flag, we would see the same tree: $ mix xref graph --sink lib/c.ex lib/a.ex `-- lib/b.ex (compile) `-- lib/c.ex If the `--label compile` flag is given with `--sink`, then `lib/c.ex` won't be shown, because no module has a compile time dependency on `lib/c.ex` but `lib/a.ex` still has an indirect compile time dependency on `lib/c.ex` via `lib/b.ex`: $ mix xref graph --sink lib/c.ex --label compile lib/a.ex `-- lib/b.ex (compile) Therefore, using a combination of `--sink` with `--label` is useful to find all files that will change once the sink changes, alongside the transitive dependencies that will cause said recompilations. ### Dependencies types Elixir tracks three types of dependencies between modules: compile, exports, and runtime. If a module has a compile time dependency on another module, the caller module has to be recompiled whenever the callee changes. Compile-time dependencies are typically added when using macros or when invoking functions in the module body (outside of functions). Exports dependencies are compile time dependencies on the module API, namely structs and its public definitions. For example, if you import a module but only use its functions, it is an export dependency. If you use a struct, it is an export dependency too. Export dependencies are only recompiled if the module API changes. Note, however, that compile time dependencies have higher precedence than exports. Therefore if you import a module and use its macros, it is a compile time dependency. Runtime dependencies are added whenever you invoke another module inside a function. Modules with runtime dependencies do not have to be compiled when the callee changes, unless there is a transitive compile or export time dependency between them. The option `--label compile-connected` can be used to find such cases. Overall, there are two label modifiers: "compile-connected" and "compile-direct". The label modifier "compile-connected" can be used to find files that have at least one compile dependency between them, excluding the compile time dependency itself. "compile-direct" only shows direct compile time dependencies, removing the transitive aspect. ## Shared options Those options are shared across all modes: * `--include-siblings` - includes dependencies that have `:in_umbrella` set to true in the current project in the reports. This can be used to find callers or to analyze graphs between projects * `--no-compile` - does not compile even if files require compilation * `--no-deps-check` - does not check dependencies * `--no-archives-check` - does not check archives * `--no-elixir-version-check` - does not check the Elixir version from mix.exs """ @switches [ abort_if_any: :boolean, archives_check: :boolean, compile: :boolean, deps_check: :boolean, elixir_version_check: :boolean, exclude: :keep, format: :string, include_siblings: :boolean, label: :string, only_nodes: :boolean, sink: :string, source: :string, min_cycle_size: :integer ] @impl true def run(args) do Mix.Task.run("compile", args) Mix.Task.reenable("xref") {opts, args} = OptionParser.parse!(args, strict: @switches) case args do ["callers", callee] -> callers(callee, opts) ["graph"] -> graph(opts) # TODO: Remove on v2.0 ["deprecated"] -> Mix.shell().error( "The deprecated check has been moved to the compiler and has no effect now" ) # TODO: Remove on v2.0 ["unreachable"] -> Mix.shell().error( "The unreachable check has been moved to the compiler and has no effect now" ) _ -> Mix.raise("xref doesn't support this command. For more information run \"mix help xref\"") end end @doc """ Returns a list of information of all the runtime function calls in the project. Each item in the list is a map with the following keys: * `:callee` - a tuple containing the module, function, and arity of the call * `:line` - an integer representing the line where the function is called * `:file` - a binary representing the file where the function is called * `:caller_module` - the module where the function is called This function returns an empty list when used at the root of an umbrella project because there is no compile manifest to extract the function call information from. To get the function calls of each child in an umbrella, execute the function at the root of each individual application. """ # TODO: Deprecate me on v1.14 @doc deprecated: "Use compilation tracers described in the Code module" @spec calls(keyword()) :: [ %{ callee: {module(), atom(), arity()}, line: integer(), file: String.t() } ] def calls(opts \\ []) do for manifest <- manifests(opts), source(source: source, modules: modules) <- read_manifest(manifest) |> elem(1), module <- modules, call <- collect_calls(source, module), do: call end defp collect_calls(source, module) do with [_ | _] = path <- :code.which(module), {:ok, {_, [debug_info: debug_info]}} <- :beam_lib.chunks(path, [:debug_info]), {:debug_info_v1, backend, data} <- debug_info, {:ok, %{definitions: defs}} <- backend.debug_info(:elixir_v1, module, data, []), do: walk_definitions(module, source, defs), else: (_ -> []) end defp walk_definitions(module, file, definitions) do state = %{ file: file, module: module, calls: [] } state = Enum.reduce(definitions, state, &walk_definition/2) state.calls end defp walk_definition({_function, _kind, meta, clauses}, state) do with_file_meta(state, meta, fn state -> Enum.reduce(clauses, state, &walk_clause/2) end) end defp with_file_meta(%{file: original_file} = state, meta, fun) do case Keyword.fetch(meta, :file) do {:ok, {meta_file, _}} -> state = fun.(%{state | file: meta_file}) %{state | file: original_file} :error -> fun.(state) end end defp walk_clause({_meta, args, _guards, body}, state) do state = walk_expr(args, state) walk_expr(body, state) end # &Mod.fun/arity defp walk_expr({:&, meta, [{:/, _, [{{:., _, [module, fun]}, _, []}, arity]}]}, state) when is_atom(module) and is_atom(fun) do add_call(module, fun, arity, meta, state) end # Mod.fun(...) defp walk_expr({{:., _, [module, fun]}, meta, args}, state) when is_atom(module) and is_atom(fun) do state = add_call(module, fun, length(args), meta, state) walk_expr(args, state) end # %Module{...} defp walk_expr({:%, meta, [module, {:%{}, _meta, args}]}, state) when is_atom(module) and is_list(args) do state = add_call(module, :__struct__, 0, meta, state) walk_expr(args, state) end # Function call defp walk_expr({left, _meta, right}, state) when is_list(right) do state = walk_expr(right, state) walk_expr(left, state) end # {x, y} defp walk_expr({left, right}, state) do state = walk_expr(right, state) walk_expr(left, state) end # [...] defp walk_expr(list, state) when is_list(list) do Enum.reduce(list, state, &walk_expr/2) end defp walk_expr(_other, state) do state end defp add_call(module, fun, arity, meta, state) do call = %{ callee: {module, fun, arity}, caller_module: state.module, file: state.file, line: meta[:line] } %{state | calls: [call | state.calls]} end ## Modes defp callers(callee, opts) do module = parse_callee(callee) file_callers = for source <- sources(opts), reference = reference(module, source), do: {source(source, :source), reference} for {file, type} <- Enum.sort(file_callers) do Mix.shell().info([file, " (", type, ")"]) end :ok end defp graph(opts) do {direct_filter, transitive_filter} = label_filter(opts[:label]) write_graph(file_references(direct_filter, opts), transitive_filter, opts) :ok end ## Callers defp parse_callee(callee) do case Mix.Utils.parse_mfa(callee) do {:ok, [module]} -> module _ -> Mix.raise("xref callers MODULE expects a MODULE, got: " <> callee) end end defp reference(module, source) do cond do module in source(source, :compile_references) -> "compile" module in source(source, :export_references) -> "export" module in source(source, :runtime_references) -> "runtime" true -> nil end end ## Graph defp excluded(opts) do opts |> Keyword.get_values(:exclude) |> Enum.flat_map(&[{&1, nil}, {&1, :compile}, {&1, :export}]) end defp label_filter(nil), do: {:all, :all} defp label_filter("compile"), do: {:all, :compile} defp label_filter("export"), do: {:all, :export} defp label_filter("runtime"), do: {:all, nil} defp label_filter("compile-connected"), do: {:all, :compile_connected} defp label_filter("compile-direct"), do: {:compile, :all} defp label_filter(other), do: Mix.raise("unknown --label #{other}") defp file_references(filter, opts) do module_sources = for manifest_path <- manifests(opts), {manifest_modules, manifest_sources} = read_manifest(manifest_path), module(module: module, sources: sources) <- manifest_modules, source <- sources, source = Enum.find(manifest_sources, &match?(source(source: ^source), &1)), do: {module, source} all_modules = MapSet.new(module_sources, &elem(&1, 0)) Map.new(module_sources, fn {current, source} -> source( runtime_references: runtime, export_references: exports, compile_references: compile, source: file ) = source compile_references = modules_to_nodes(compile, :compile, current, source, module_sources, all_modules, filter) export_references = modules_to_nodes(exports, :export, current, source, module_sources, all_modules, filter) runtime_references = modules_to_nodes(runtime, nil, current, source, module_sources, all_modules, filter) references = runtime_references |> Map.merge(export_references) |> Map.merge(compile_references) |> Enum.to_list() {file, references} end) end defp modules_to_nodes(_, label, _, _, _, _, filter) when filter != :all and label != filter do %{} end defp modules_to_nodes(modules, label, current, source, module_sources, all_modules, _filter) do for module <- modules, module != current, module in all_modules, module_sources[module] != source, do: {source(module_sources[module], :source), label}, into: %{} end defp write_graph(file_references, filter, opts) do excluded = excluded(opts) source = opts[:source] sink = opts[:sink] if source && is_nil(file_references[source]) do Mix.raise("Source could not be found: #{source}") end if sink && is_nil(file_references[sink]) do Mix.raise("Sink could not be found: #{sink}") end file_references = if sink = opts[:sink] do filter_for_sink(file_references, sink, filter) else filter_for_source(file_references, filter) end roots = if source = opts[:source] do %{source => nil} else file_references |> Map.delete(opts[:sink]) |> Enum.map(&{elem(&1, 0), nil}) |> Kernel.--(excluded) |> Map.new() end callback = fn {file, type} -> children = if opts[:only_nodes], do: [], else: Map.get(file_references, file, []) type = type && "(#{type})" {{file, type}, Enum.sort(children -- excluded)} end case opts[:format] do "dot" -> Mix.Utils.write_dot_graph!( "xref_graph.dot", "xref graph", Enum.sort(roots), callback, opts ) """ Generated "xref_graph.dot" in the current directory. To generate a PNG: dot -Tpng xref_graph.dot -o xref_graph.png For more options see http://www.graphviz.org/. """ |> String.trim_trailing() |> Mix.shell().info() "stats" -> print_stats(file_references, opts) "cycles" -> print_cycles(file_references, opts) _ -> Mix.Utils.print_tree(Enum.sort(roots), callback, opts) end end defp connected?([_ | _]), do: true defp connected?(_), do: false defp filter_fn(_file_references, :all), do: fn _ -> true end defp filter_fn(file_references, :compile_connected) do fn {key, type} -> type == :compile && connected?(file_references[key]) end end defp filter_fn(_file_references, filter), do: fn {_, type} -> type == filter end defp filter_for_source(file_references, :all), do: file_references defp filter_for_source(file_references, filter) do fun = filter_fn(file_references, filter) Enum.reduce(file_references, %{}, fn {key, _}, acc -> {children, _} = filter_for_source(file_references, key, %{}, %{}, fun) Map.put(acc, key, children |> Map.delete(key) |> Map.to_list()) end) end defp filter_for_source(references, key, acc, seen, filter_fn) do nodes = references[key] if is_nil(nodes) || seen[key] do {acc, seen} else seen = Map.put(seen, key, true) Enum.reduce(nodes, {acc, seen}, fn {child_key, type} = reference, {acc, seen} -> if filter_fn.(reference) do {Map.put(acc, child_key, type), Map.put(seen, child_key, true)} else filter_for_source(references, child_key, acc, seen, filter_fn) end end) end end defp filter_for_sink(file_references, sink, filter) do fun = filter_fn(file_references, filter) file_references |> invert_references(fn _ -> true end) |> depends_on_sink([{sink, nil}], %{}) |> invert_references(fun) end defp depends_on_sink(file_references, new_nodes, acc) do Enum.reduce(new_nodes, acc, fn {new_node_name, _type}, acc -> new_nodes = file_references[new_node_name] if acc[new_node_name] || !new_nodes do acc else depends_on_sink(file_references, new_nodes, Map.put(acc, new_node_name, new_nodes)) end end) end defp invert_references(file_references, fun) do Enum.reduce(file_references, %{}, fn {file, references}, acc -> Enum.reduce(references, acc, fn {file_reference, type} = reference, acc -> if fun.(reference) do Map.update(acc, file_reference, [{file, type}], &[{file, type} | &1]) else acc end end) end) end defp print_stats(references, opts) do with_digraph(references, fn graph -> shell = Mix.shell() counters = Enum.reduce(references, %{compile: 0, export: 0, nil: 0}, fn {_, deps}, acc -> Enum.reduce(deps, acc, fn {_, value}, acc -> Map.update!(acc, value, &(&1 + 1)) end) end) shell.info("Tracked files: #{map_size(references)} (nodes)") shell.info("Compile dependencies: #{counters.compile} (edges)") shell.info("Exports dependencies: #{counters.export} (edges)") shell.info("Runtime dependencies: #{counters.nil} (edges)") shell.info("Cycles: #{length(cycles(graph, opts))}") outgoing = references |> Enum.map(fn {file, _} -> {:digraph.out_degree(graph, file), file} end) |> Enum.sort(:desc) |> Enum.take(10) shell.info("\nTop #{length(outgoing)} files with most outgoing dependencies:") for {count, file} <- outgoing, do: shell.info(" * #{file} (#{count})") incoming = references |> Enum.map(fn {file, _} -> {:digraph.in_degree(graph, file), file} end) |> Enum.sort(:desc) |> Enum.take(10) shell.info("\nTop #{length(incoming)} files with most incoming dependencies:") for {count, file} <- incoming, do: shell.info(" * #{file} (#{count})") end) end defp with_digraph(references, callback) do graph = :digraph.new() try do for {file, _} <- references do :digraph.add_vertex(graph, file) end for {file, deps} <- references, {dep, label} <- deps do :digraph.add_edge(graph, file, dep, label) end callback.(graph) after :digraph.delete(graph) end end defp cycles(graph, opts) do cycles = graph |> :digraph_utils.cyclic_strong_components() |> Enum.reduce([], &inner_cycles(graph, &1, &2)) |> Enum.map(&{length(&1), &1}) if min = opts[:min_cycle_size], do: Enum.filter(cycles, &(elem(&1, 0) > min)), else: cycles end defp inner_cycles(_graph, [], acc), do: acc defp inner_cycles(graph, [v | vertices], acc) do cycle = :digraph.get_cycle(graph, v) inner_cycles(graph, vertices -- cycle, [cycle | acc]) end defp print_cycles(references, opts) do with_digraph(references, fn graph -> shell = Mix.shell() case graph |> cycles(opts) |> Enum.sort(:desc) do [] -> shell.info("No cycles found") cycles -> shell.info("#{length(cycles)} cycles found. Showing them in decreasing size:\n") for {length, cycle} <- cycles do shell.info("Cycle of length #{length}:\n") for node <- cycle do shell.info(" " <> node) end shell.info("") end end end) end ## Helpers defp sources(opts) do for manifest <- manifests(opts), source() = source <- read_manifest(manifest) |> elem(1), do: source end defp manifests(opts) do siblings = if opts[:include_siblings] do for %{scm: Mix.SCM.Path, opts: opts} <- Mix.Dep.cached(), opts[:in_umbrella], do: Path.join([opts[:build], ".mix", @manifest]) else [] end [Path.join(Mix.Project.manifest_path(), @manifest) | siblings] end end
lib/mix/lib/mix/tasks/xref.ex
0.852844
0.548915
xref.ex
starcoder
defmodule Phoenix.Logger do @moduledoc """ Instrumenter to handle logging of various instrumentation events. ## Parameter filtering When logging parameters, Phoenix can filter out sensitive parameters such as passwords and tokens. Parameters to be filtered can be added via the `:filter_parameters` option: config :phoenix, :filter_parameters, ["password", "secret"] With the configuration above, Phoenix will filter any parameter that contains the terms `password` or `secret`. The match is case sensitive. Phoenix's default is `["password"]`. Phoenix can filter all parameters by default and selectively keep parameters. This can be configured like so: config :phoenix, :filter_parameters, {:keep, ["id", "order"]} With the configuration above, Phoenix will filter all parameters, except those that match exactly `id` or `order`. If a kept parameter matches, all parameters nested under that one will also be kept. """ require Logger @doc false def install do handlers = %{ [:phoenix, :endpoint, :start] => &phoenix_endpoint_start/4, [:phoenix, :endpoint, :stop] => &phoenix_endpoint_stop/4, [:phoenix, :router_dispatch, :start] => &phoenix_router_dispatch_start/4, # [:phoenix, :router_dispatch, :stop] => &phoenix_router_dispatch_stop/4, [:phoenix, :error_rendered] => &phoenix_error_rendered/4, [:phoenix, :socket_connected] => &phoenix_socket_connected/4, [:phoenix, :channel_joined] => &phoenix_channel_joined/4, [:phoenix, :channel_handled_in] => &phoenix_channel_handled_in/4 } for {key, fun} <- handlers do :telemetry.attach({__MODULE__, key}, key, fun, :ok) end end @doc false def duration(duration) do duration = System.convert_time_unit(duration, :native, :microsecond) if duration > 1000 do [duration |> div(1000) |> Integer.to_string(), "ms"] else [Integer.to_string(duration), "µs"] end end @doc false def filter_values(values, params \\ Application.get_env(:phoenix, :filter_parameters, [])) def filter_values(values, {:discard, params}), do: discard_values(values, params) def filter_values(values, {:keep, params}), do: keep_values(values, params) def filter_values(values, params), do: discard_values(values, params) defp discard_values(%{__struct__: mod} = struct, _params) when is_atom(mod) do struct end defp discard_values(%{} = map, params) do Enum.into(map, %{}, fn {k, v} -> if is_binary(k) and String.contains?(k, params) do {k, "[FILTERED]"} else {k, discard_values(v, params)} end end) end defp discard_values([_ | _] = list, params) do Enum.map(list, &discard_values(&1, params)) end defp discard_values(other, _params), do: other defp keep_values(%{__struct__: mod}, _params) when is_atom(mod), do: "[FILTERED]" defp keep_values(%{} = map, params) do Enum.into(map, %{}, fn {k, v} -> if is_binary(k) and k in params do {k, discard_values(v, [])} else {k, keep_values(v, params)} end end) end defp keep_values([_ | _] = list, params) do Enum.map(list, &keep_values(&1, params)) end defp keep_values(_other, _params), do: "[FILTERED]" ## Event: [:phoenix, :endpoint, *] defp phoenix_endpoint_start(_, _, %{conn: conn} = metadata, _) do level = metadata[:options][:log] || :info Logger.log(level, fn -> %{method: method, request_path: request_path} = conn [method, ?\s, request_path] end) end defp phoenix_endpoint_stop(_, %{duration: duration}, %{conn: conn} = metadata, _) do level = metadata[:options][:log] || :info Logger.log(level, fn -> %{status: status, state: state} = conn status = Integer.to_string(status) [connection_type(state), ?\s, status, " in ", duration(duration)] end) end defp connection_type(:set_chunked), do: "Chunked" defp connection_type(_), do: "Sent" ## Event: [:phoenix, :error_rendered] defp phoenix_error_rendered(_, _, %{log: false}, _), do: :ok defp phoenix_error_rendered(_, _, %{log: level, status: status, kind: kind, reason: reason}, _) do Logger.log(level, fn -> [ "Converted ", Atom.to_string(kind), ?\s, error_banner(kind, reason), " to ", Integer.to_string(status), " response" ] end) end defp error_banner(:error, %type{}), do: inspect(type) defp error_banner(_kind, reason), do: inspect(reason) ## Event: [:phoenix, :routed, *] defp phoenix_router_dispatch_start(_, _, %{log: false}, _), do: :ok defp phoenix_router_dispatch_start(_, _, metadata, _) do %{log: level, conn: conn, pipe_through: pipe_through, plug: plug, plug_opts: plug_opts} = metadata Logger.log(level, fn -> [ "Processing with ", inspect(plug), maybe_action(plug_opts), ?\n, " Parameters: ", params(conn.params), ?\n, " Pipelines: ", inspect(pipe_through) ] end) end defp maybe_action(action) when is_atom(action), do: [?., Atom.to_string(action), ?/, ?2] defp maybe_action(_), do: [] defp params(%Plug.Conn.Unfetched{}), do: "[UNFETCHED]" defp params(params), do: params |> filter_values() |> inspect() ## Event: [:phoenix, :socket_connected] defp phoenix_socket_connected(_, _, %{log: false}, _), do: :ok defp phoenix_socket_connected(_, %{duration: duration}, %{log: level} = meta, _) do Logger.log(level, fn -> %{ transport: transport, params: params, connect_info: connect_info, user_socket: user_socket, result: result, serializer: serializer } = meta [ connect_result(result), inspect(user_socket), " in ", duration(duration), "\n Transport: ", inspect(transport), "\n Serializer: ", inspect(serializer), "\n Connect Info: ", inspect(connect_info), "\n Parameters: ", inspect(filter_values(params)) ] end) end defp connect_result(:ok), do: "CONNECTED TO " defp connect_result(:error), do: "REFUSED CONNECTION TO " ## Event: [:phoenix, :channel_joined] def phoenix_channel_joined(_, %{duration: duration}, %{socket: socket} = metadata, _) do channel_log(:log_join, socket, fn -> %{result: result, params: params} = metadata [ join_result(result), socket.topic, " in ", duration(duration), "\n Parameters: ", inspect(filter_values(params)) ] end) end defp join_result(:ok), do: "JOINED " defp join_result(:error), do: "REFUSED JOIN " ## Event: [:phoenix, :channel_handle_in] def phoenix_channel_handled_in(_, %{duration: duration}, %{socket: socket} = metadata, _) do channel_log(:log_handle_in, socket, fn -> %{event: event, params: params} = metadata [ "HANDLED ", event, " INCOMING ON ", socket.topic, " (", inspect(socket.channel), ") in ", duration(duration), "\n Parameters: ", inspect(filter_values(params)) ] end) end defp channel_log(_log_option, %{topic: "phoenix" <> _}, _fun), do: :ok defp channel_log(log_option, %{private: private}, fun) do if level = Map.get(private, log_option) do Logger.log(level, fun) end end end
lib/phoenix/logger.ex
0.802788
0.52141
logger.ex
starcoder
defmodule Robotica.Scheduler.Classifier do @moduledoc """ Process schedule classifier days """ alias Robotica.Config.Loader alias Robotica.Types defp is_week_day?(date) do case Date.day_of_week(date) do dow when dow in 1..5 -> true _ -> false end end if Application.compile_env(:robotica_common, :compile_config_files) do @filename Application.compile_env(:robotica, :classifications_file) @external_resource @filename @data Loader.classifications(@filename) defp get_data, do: @data else defp get_data do filename = Application.get_env(:robotica, :classifications_file) Loader.classifications(filename) end end defp is_date_in_classification?(%Types.Classification{} = classification, date) do cond do not is_nil(classification.date) -> case Date.compare(date, classification.date) do :eq -> true _ -> false end true -> true end end defp is_date_start_ok?(%Types.Classification{} = classification, date) do cond do not is_nil(classification.start) -> case Date.compare(date, classification.start) do a when a in [:gt, :eq] -> true _ -> false end true -> true end end defp is_date_stop_ok?(%Types.Classification{} = classification, date) do cond do not is_nil(classification.stop) -> case Date.compare(date, classification.stop) do b when b in [:lt, :eq] -> true _ -> false end true -> true end end defp is_week_day_in_classification?(%Types.Classification{} = classification, date) do cond do is_nil(classification.week_day) -> true classification.week_day == true -> is_week_day?(date) classification.week_day == false -> not is_week_day?(date) end end defp is_day_of_week_in_classification?(%Types.Classification{} = classification, date) do cond do is_nil(classification.day_of_week) -> true classification.day_of_week == Date.day_of_week(date) -> true true -> false end end defp is_in_classification?(%Types.Classification{} = classification, date) do with true <- is_date_in_classification?(classification, date), true <- is_date_start_ok?(classification, date), true <- is_date_stop_ok?(classification, date), true <- is_week_day_in_classification?(classification, date), true <- is_day_of_week_in_classification?(classification, date) do true else false -> false end end defp is_excluded_entry?(classification_names, %Types.Classification{} = classification) do exclude_list = if is_nil(classification.exclude) do [] else classification.exclude end Enum.any?(exclude_list, fn exclude_name -> Enum.member?(classification_names, exclude_name) end) end defp reject_excluded(classifications) do classification_names = Enum.map(classifications, fn classification -> classification.day_type end) Enum.reject(classifications, fn classification -> is_excluded_entry?(classification_names, classification) end) end def classify_date(date) do get_data() |> Enum.filter(fn classification -> is_in_classification?(classification, date) end) |> reject_excluded() |> Enum.map(fn classification -> classification.day_type end) end end
robotica/lib/robotica/scheduler/classifier.ex
0.651466
0.537345
classifier.ex
starcoder
defmodule Mux.Video.Assets do @moduledoc """ This module provides functions for managing assets in Mux Video. [API Documentation](https://docs.mux.com/v1/reference#assets) """ alias Mux.{Base, Fixtures} @path "/video/v1" @doc """ Create a new asset. Returns `{:ok, asset, %Tesla.Client{}}`. ## Examples iex> client = Mux.Base.new("my_token_id", "my_token_secret") iex> Mux.Video.Assets.create(client, %{input: "https://example.com/video.mp4"}) {:ok, #{inspect(Fixtures.asset(:create))}, #{ inspect(Fixtures.tesla_env({:asset, [:create]})) }} """ def create(client, params) do Base.post(client, @path <> "/assets", params) end @doc """ List assets. Returns a tuple such as `{:ok, assets, %Telsa.Env{}}` ## Examples iex> client = Mux.Base.new("my_token_id", "my_token_secret") iex> {:ok, assets, _env} = Mux.Video.Assets.list(client) iex> assets #{inspect([Fixtures.asset(), Fixtures.asset()])} """ def list(client, params \\ []), do: Base.get(client, @path <> "/assets", query: params) @doc """ Retrieve an asset by ID. Returns a tuple such as `{:ok, asset, %Telsa.Env{}}` ## Examples iex> client = Mux.Base.new("my_token_id", "my_token_secret") iex> {:ok, asset, _env} = Mux.Video.Assets.get(client, "00ecNLnqiG8v00TLqqeZ00uCE5wCAaO3kKc") iex> asset #{inspect(Fixtures.asset())} """ def get(client, asset_id, options \\ []) do Base.get(client, @path <> "/assets/" <> asset_id, query: options) end @doc """ Delete an asset. Returns a tuple such as `{:ok, %Telsa.Env{}}` ## Examples iex> client = Mux.Base.new("my_token_id", "my_token_secret") iex> {status, "", _env} = Mux.Video.Assets.delete(client, "00ecNLnqiG8v00TLqqeZ00uCE5wCAaO3kKc") iex> status :ok """ def delete(client, asset_id, params \\ []) do Base.delete(client, @path <> "/assets/" <> asset_id, query: params) end @doc """ Retrieve the asset's input info. Returns a tuple such as `{:ok, input_info, %Telsa.Env{}}` ## Examples iex> client = Mux.Base.new("my_token_id", "my_token_secret") iex> {:ok, input_info, _env} = Mux.Video.Assets.input_info(client, "00ecNLnqiG8v00TLqqeZ00uCE5wCAaO3kKc") iex> input_info [#{inspect(Fixtures.input_info())}] """ def input_info(client, asset_id, params \\ []) do Base.get(client, @path <> "/assets/" <> asset_id <> "/input-info", query: params) end end
lib/mux/video/assets.ex
0.817684
0.445771
assets.ex
starcoder
defmodule Numerix.Distance do @moduledoc """ Distance functions between two vectors. """ use Numerix.Tensor import Numerix.LinearAlgebra alias Numerix.{Common, Correlation, Statistics} @doc """ Mean squared error, the average of the squares of the errors betwen two vectors, i.e. the difference between predicted and actual values. """ @spec mse(Common.vector(), Common.vector()) :: Common.maybe_float() def mse(x = %Tensor{}, y = %Tensor{}) do p = pow(x - y, 2) Statistics.mean(p.items) end def mse(vector1, vector2) do x = Tensor.new(vector1) y = Tensor.new(vector2) mse(x, y) end @doc """ Root mean square error of two vectors, or simply the square root of mean squared error of the same set of values. It is a measure of the differences between predicted and actual values. """ @spec rmse(Common.vector(), Common.vector()) :: Common.maybe_float() def rmse(vector1, vector2) do :math.sqrt(mse(vector1, vector2)) end @doc """ The Pearson's distance between two vectors. """ @spec pearson(Common.vector(), Common.vector()) :: Common.maybe_float() def pearson(vector1, vector2) do case Correlation.pearson(vector1, vector2) do nil -> nil correlation -> 1.0 - correlation end end @doc """ The Minkowski distance between two vectors. """ @spec minkowski(Common.vector(), Common.vector(), integer) :: Common.maybe_float() def minkowski(x, y, p \\ 3) def minkowski(x = %Tensor{}, y = %Tensor{}, p) do norm(p, x - y) end def minkowski(vector1, vector2, p) do x = Tensor.new(vector1) y = Tensor.new(vector2) minkowski(x, y, p) end @doc """ The Euclidean distance between two vectors. """ @spec euclidean(Common.vector(), Common.vector()) :: Common.maybe_float() def euclidean(x = %Tensor{}, y = %Tensor{}) do l2_norm(x - y) end def euclidean(vector1, vector2) do x = Tensor.new(vector1) y = Tensor.new(vector2) euclidean(x, y) end @doc """ The Manhattan distance between two vectors. """ @spec manhattan(Common.vector(), Common.vector()) :: Common.maybe_float() def manhattan(x = %Tensor{}, y = %Tensor{}) do l1_norm(x - y) end def manhattan(vector1, vector2) do x = Tensor.new(vector1) y = Tensor.new(vector2) manhattan(x, y) end @doc """ The Jaccard distance (1 - Jaccard index) between two vectors. """ @spec jaccard(Common.vector(), Common.vector()) :: Common.maybe_float() def jaccard(%Tensor{items: []}, %Tensor{items: []}), do: 0.0 def jaccard(%Tensor{items: []}, _), do: nil def jaccard(_, %Tensor{items: []}), do: nil def jaccard([], []), do: 0.0 def jaccard([], _), do: nil def jaccard(_, []), do: nil def jaccard(vector1, vector2) do vector1 |> Stream.zip(vector2) |> Enum.reduce({0, 0}, fn {x, y}, {intersection, union} -> case {x, y} do {x, y} when x == 0 or y == 0 -> {intersection, union} {x, y} when x == y -> {intersection + 1, union + 1} _ -> {intersection, union + 1} end end) |> to_jaccard_distance end defp to_jaccard_distance({_intersection, union}) when union == 0, do: 1.0 defp to_jaccard_distance({intersection, union}) do 1 - intersection / union end end
lib/distance.ex
0.881242
0.925027
distance.ex
starcoder
defmodule Cassette.Plug do @moduledoc """ A plug to authenticate using Cassette When plugged, this will test the session for the presence of the user. When not present it will test for presence of a ticket parameter and validate it. If none of those are present, it will redirect the user to the cas login. To add to your router: ``` defmodule Router do use Plug.Router plug Cassette.Plug plug :match plug :dispatch (...) end ``` Just be sure that your `Plug.Session` is configured and plugged before `Cassette.Plug` If you are using this with phoenix, plug into one of your pipelines: ``` defmodule MyApp.Router do use MyApp.Web, :router pipeline :browser do (...) plug :fetch_session plug Cassette.Plug plug :fetch_flash (...) end end ``` Be sure that is module is plugged after the `:fetch_session` plug since this is a requirement ## Customizing behaviour The behaviour for authentication failures may be customized using your own `Cassette.Plug.AuthenticationHandler`. Please refer to the documentation on that module for more details. """ @spec init([]) :: [] @doc "Initializes this plug" def init(options), do: options @type options :: [cassette: Cassette.Support, handler: Cassette.Plug.AuthenticationHandler] @spec call(Plug.Conn.t, options) :: Plug.Conn.t @doc """ Runs this plug. Your custom Cassette module may be provided with the `:cassette` key. It will default to the `Cassette` module. """ def call(conn, options) do cassette = Keyword.get(options, :cassette, Cassette) handler = Keyword.get(options, :handler, Cassette.Plug.AuthenticationHandler.default) case handler.user_or_token(conn, options) do {%Cassette.User{}, _} -> conn {nil, :error} -> handler.unauthenticated(conn, options) {nil, {:ok, ticket}} -> case cassette.validate(ticket, handler.service(conn, options)) do {:ok, user} -> handler.user_authenticated(conn, user, options) _ -> handler.invalid_authentication(conn, options) end end end end
lib/cassette/plug.ex
0.816809
0.809201
plug.ex
starcoder
defmodule Drab.Core do @moduledoc ~S""" Drab module providing the base of communication between the browser and the server. `Drab.Core` defines the method to declare client-side events, which are handled server-side in the commander module. Also provides basic function for running JS code directly from Phoenix on the browser. ## Commander Commander is the module to keep your Drab functions (event handlers) in. See `Drab.Commander` for more info, and just for this part of docs let's assume you have the following one defined: defmodule DrabExample.PageCommander do use Drab.Commander, modules: [] def button_clicked(socket, payload) do socket |> console("You've sent me this: #{payload |> inspect}") end end ## Events Events are defined directly in the HTML by adding the `drab` attribute with the following pattern: <button drab='event_name#options:event_handler_function_name(argument)'>clickme</button> * `event_name` is the DOM event name, eg. "click", "blur" * `event_handler_function_name` - the name of the event handler function in the commander on the server side * `options` - optional, so far the only available option is "debounce(milliseconds)" for "keyup" event * `argument` - optional, additional argument to be passed to the event handler function as a third argument Example: <button drab='click:button_clicked'>clickme</button> Clicking above button launches `DrabExample.PageCommander.button_clicked/2` on the server side. <button drab='click:button_clicked(42)'>clickme</button> Clicking the button above launches `DrabExample.PageCommander.button_clicked/3` on the server side, with third argument of value 42. This is evaluated on the client side, so it could be any valid JS expression: <button drab='click:button_clicked({the_answer: 42})'> <button drab='click:button_clicked(window.location)'> You may have multiple events defined for a DOM object, but the specific event may appear there only once (can't define two handlers for one event). Separate `event:handler` pairs with whitespaces: <button drab='click:button_clicked mouseover:prepare_button'>clickme</button> ### Shortcut form There are few shortcuts for the most popular events: `click`, `keyup`, `keydown`, `change`. For those events an attribute `drab-EVENTNAME` must be set. The following is an equivalent for the previous one: <button drab-click='button_clicked'>clickme</button> As above, there is a possibility to define multiple event handlers for one DOM object, but the only one handler for the event. The following form is valid: <button drab-click='button_clicked' drab-mouseover='prepare_button(42)'>clickme</button> But the next one is prohibited: <button drab-click='handler1' drab-click='handler2'>INCORRECT</button> In this case you may provide options with `drab-options` attribute, but only when you have the only one event defined. There is a possibility to configure the shortcut list: config :drab, :events_shorthands, ["click", "keyup", "blur"] Please keep this list short, as it affects client script performance. ### Long form [depreciated] You may also configure drab handler with `drab-event` and `drab-handler` combination, but please don't. This is coming from the ancient version of the software and will be removed in the stable release. #### Defining optional argument in multiple nodes with `drab-argument` attribute If you add `drab-argument` attribute to any tag, all children of this tag will use this as an optional attribute. Notice that the existing arguments are not overwritten, so this: <div drab-argument='42'> <button drab-click='button_clicked'> <button drab-click='button_clicked(43)'> </div> is the equivalent to: <button drab-click='button_clicked(42)'> <button drab-click='button_clicked(43)'> ### Handling event in any commander (Shared Commander) By default Drab runs the event handler in the commander module corresponding to the controller, which rendered the current page. But it is possible to choose the module by simply provide the full path to the commander: <button drab-click='MyAppWeb.MyCommander.button_clicked'>clickme</button> Notice that the module must be a commander module, ie. it must be marked with `use Drab.Commander`, and the function must be marked as public with `Drab.Commander.public/1` macro. ### Form values If the sender object is inside a `<form>` tag, it sends the "form" map, which contains values of all the inputs found withing the form. Keys of that map are "name" attribute of the input or, if not found, an "id" attribute. If neither "name" or "id" is given, the value of the form is not included. ## Running Elixir code from the Browser There is the Javascript method [`Drab.exec_elixir()`](Drab.Client.html#module-drab-exec_elixir-elixir_function_name-argument) in the global `Drab` object, which allows you to run the Elixir function defined in the Commander. ## Store Analogically to Plug, Drab can store the values in its own session. To avoid confusion with the Plug Session session, it is called a Store. You can use functions: `put_store/3` and `get_store/2` to read and write the values in the Store. It works exactly the same way as a "normal", Phoenix session. * By default, Drab Store is kept in browser Local Storage. This means it is gone when you close the browser or the tab. You may set up where to keep the data with `drab_store_storage` config entry, see Drab.Config * Drab Store is not the Plug Session! This is a different entity. Anyway, you have an access to the Plug Session (details below). * Drab Store is stored on the client side and it is signed, but - as the Plug Session cookie - not ciphered. ## Session Although Drab Store is a different entity than Plug Session (used in Controllers), there is a way to access the Session. First, you need to whitelist the keys you want to access in `access_session/1` macro in the Commander (you may give it a list of atoms or a single atom). Whitelisting is due to security: it is kept in Token, on the client side, and it is signed but not encrypted. defmodule DrabPoc.PageCommander do use Drab.Commander onload :page_loaded, access_session :drab_test def page_loaded(socket) do socket |> update(:val, set: get_session(socket, :drab_test), on: "#show_session_test") end end There is no way to update the session from Drab. Session is read-only. ## Broadcasting Normally Drab operates on the user interface of the browser which generared the event, but you may use it for broadcasting changes to all connected browsers. Drab uses a *subject* for distinguishing browsers, which are allowed to receive the change. Broadcasting function receives `socket` or `subject` as the first argument. If `socket` is used, function derives the `subject` from the commander configuration. See `Drab.Commander.broadcasting/1` to learn how to configure the broadcasting options. Broadcasting functions may be launched without the `socket` given. In this case, you need to define it manually, using helper functions: `Drab.Core.same_path/1`, `Drab.Core.same_topic/1` and `Drab.Core.same_controller/1`. See `broadcast_js/3` for more. List of broadcasting functions: * `Drab.Core`: * `Drab.Core.broadcast_js/3` * `Drab.Core.broadcast_js!/3` * `Drab.Element`: * `Drab.Element.broadcast_insert/4` * `Drab.Element.broadcast_prop/3` * `Drab.Query`: * `Drab.Query.delete!/2` * `Drab.Query.execute/2`, `Drab.Query.execute/3` * `Drab.Query.insert!/2`, `Drab.Query.insert!/3` * `Drab.Query.update!/2`, `Drab.Query.update!/3` """ require Logger use DrabModule @typedoc "Returned status of all Core operations" @type status :: :ok | :error | :timeout @typedoc "Types returned from the browser" @type return :: String.t() | map | float | integer | list @typedoc "Return value of `exec_js/2`" @type result :: {status, return} @typedoc "Return value of `broadcast_js/2`" @type bcast_result :: {:ok, term} | {:error, term} @typedoc "Subject for broadcasting" @type subject :: Phoenix.Socket.t() | String.t() | list @impl true def js_templates(), do: ["drab.core.js", "drab.events.js"] @impl true def transform_payload(payload, _state) do case payload["form"] do nil -> payload form -> payload |> Map.put_new(:params, form |> normalize_params()) end end @doc false @spec normalize_params(map) :: map def normalize_params(params) do params |> Enum.reduce("", fn {k, v}, acc -> acc <> k <> "=" <> URI.encode_www_form(v) <> "&" end) |> String.trim_trailing("&") |> Plug.Conn.Query.decode() end @doc """ Synchronously executes the given javascript on the client side. Returns tuple `{status, return_value}`, where status could be `:ok`, `:error` or `:timeout`, and return value contains the output computed by the Javascript or the error message. ### Options * `timeout` in milliseconds ### Examples iex> socket |> exec_js("2 + 2") {:ok, 4} iex> socket |> exec_js("not_existing_function()") {:error, "not_existing_function is not defined"} iex> socket |> exec_js("for(i=0; i<1000000000; i++) {}") {:timeout, "timed out after 5000 ms."} iex> socket |> exec_js("alert('hello from IEx!')", timeout: 500) {:timeout, "timed out after 500 ms."} """ @spec exec_js(Phoenix.Socket.t(), String.t(), Keyword.t()) :: result def exec_js(socket, js, options \\ []) do Drab.push_and_wait_for_response(socket, self(), "execjs", [js: js], options) end @doc """ Exception raising version of `exec_js/2` ### Examples iex> socket |> exec_js!("2 + 2") 4 iex> socket |> exec_js!("nonexistent") ** (Drab.JSExecutionError) nonexistent is not defined (drab) lib/drab/core.ex:100: Drab.Core.exec_js!/2 iex> socket |> exec_js!("for(i=0; i<1000000000; i++) {}") ** (Drab.JSExecutionError) timed out after 5000 ms. (drab) lib/drab/core.ex:100: Drab.Core.exec_js!/2 iex> socket |> exec_js!("for(i=0; i<10000000; i++) {}", timeout: 1000) ** (Drab.JSExecutionError) timed out after 1000 ms. lib/drab/core.ex:114: Drab.Core.exec_js!/3 """ @spec exec_js!(Phoenix.Socket.t(), String.t(), Keyword.t()) :: return | no_return def exec_js!(socket, js, options \\ []) do case exec_js(socket, js, options) do {:ok, result} -> result {:error, message} -> raise Drab.JSExecutionError, message: message end end @doc """ Asynchronously executes the javascript on all the browsers listening on the given subject. The subject is derived from the first argument, which could be: * socket - in this case broadcasting option is derived from the setup in the commander. See `Drab.Commander.broadcasting/1` for the broadcasting options * same_path(string) - sends the JS to browsers sharing (and configured as listening to same_path in `Drab.Commander.broadcasting/1`) the same url * same_commander(atom) - broadcast goes to all browsers configured with :same_commander * same_topic(string) - broadcast goes to all browsers listening to this topic; notice: this is internal Drab topic, not a Phoenix Socket topic First argument may be a list of the above. The second argument is a JavaScript string. See `Drab.Commander.broadcasting/1` to find out how to change the listen subject. iex> Drab.Core.broadcast_js(socket, "alert('Broadcasted!')") {:ok, :broadcasted} iex> Drab.Core.broadcast_js(same_path("/drab/live"), "alert('Broadcasted!')") {:ok, :broadcasted} iex> Drab.Core.broadcast_js(same_controller(MyApp.LiveController), "alert('Broadcasted!')") {:ok, :broadcasted} iex> Drab.Core.broadcast_js(same_topic("my_topic"), "alert('Broadcasted!')") {:ok, :broadcasted} iex> Drab.Core.broadcast_js([same_topic("my_topic"), same_path("/drab/live")], "alert('Broadcasted!')") {:ok, :broadcasted} Returns `{:ok, :broadcasted}` """ @spec broadcast_js(subject, String.t(), Keyword.t()) :: bcast_result def broadcast_js(subject, js, _options \\ []) do ret = Drab.broadcast(subject, self(), "broadcastjs", js: js) {ret, :broadcasted} end @doc """ Bang version of `Drab.Core.broadcast_js/3` Returns subject. """ @spec broadcast_js!(subject, String.t(), Keyword.t()) :: return def broadcast_js!(subject, js, _options \\ []) do Drab.broadcast(subject, self(), "broadcastjs", js: js) subject end @doc """ Helper for broadcasting functions, returns topic for a given URL path. iex> same_path("/test/live") "same_path:/test/live" """ @spec same_path(String.t()) :: String.t() def same_path(url), do: "same_path:#{url}" @doc """ Helper for broadcasting functions, returns topic for a given controller. iex> same_controller(DrabTestApp.LiveController) "controller:Elixir.DrabTestApp.LiveController" """ @spec same_controller(String.t() | atom) :: String.t() def same_controller(controller), do: "controller:#{controller}" @doc """ Helper for broadcasting functions, returns topic for a given topic string. iex> same_topic("mytopic") "topic:mytopic" """ @spec same_topic(String.t()) :: String.t() def same_topic(topic), do: "topic:#{topic}" @doc false @spec encode_js(term) :: String.t() | no_return def encode_js(value), do: Jason.encode!(value) @doc false @spec decode_js(iodata) :: term def decode_js(value) do case Jason.decode(value) do {:ok, v} -> v _ -> value end end @doc """ Returns the value of the Drab store represented by the given key. uid = get_store(socket, :user_id) """ @spec get_store(Phoenix.Socket.t(), atom) :: term def get_store(socket, key) do store = Drab.get_store(Drab.pid(socket)) store[key] # store(socket)[key] end @doc """ Returns the value of the Drab store represented by the given key or `default` when key not found counter = get_store(socket, :counter, 0) """ @spec get_store(Phoenix.Socket.t(), atom, term) :: term def get_store(socket, key, default) do get_store(socket, key) || default end @doc """ Saves the key => value in the Store. Returns unchanged socket. put_store(socket, :counter, 1) """ @spec put_store(Phoenix.Socket.t(), atom, term) :: Phoenix.Socket.t() def put_store(socket, key, value) do store = socket |> store() |> Map.merge(%{key => value}) {:ok, _} = exec_js(socket, "Drab.set_drab_store_token(\"#{tokenize_store(socket, store)}\")") # store the store in Drab server, to have it on terminate save_store(socket, store) socket end @doc false @spec save_store(Phoenix.Socket.t(), map) :: :ok def save_store(socket, store) do # TODO: too complicated, too many functions Drab.set_store(Drab.pid(socket), store) end @doc false @spec save_socket(Phoenix.Socket.t()) :: :ok def save_socket(socket) do Drab.set_socket(Drab.pid(socket), socket) end @doc """ Returns the value of the Plug Session represented by the given key. counter = get_session(socket, :userid) You must explicit which session keys you want to access in `:access_session` option in `use Drab.Commander`. """ @spec get_session(Phoenix.Socket.t(), atom) :: term def get_session(socket, key) do Drab.get_session(socket.assigns.__drab_pid)[key] # session(socket)[key] end @doc """ Returns the value of the Plug Session represented by the given key or `default` when key not found counter = get_session(socket, :userid, 0) You must explicit which session keys you want to access in `:access_session` option in `use Drab.Commander`. """ @spec get_session(Phoenix.Socket.t(), atom, term) :: term def get_session(socket, key, default) do get_session(socket, key) || default end @doc false @spec save_session(Phoenix.Socket.t(), map) :: :ok def save_session(socket, session) do Drab.set_session(socket.assigns.__drab_pid, session) end @doc false @spec store(Phoenix.Socket.t()) :: map def store(socket) do # TODO: error {:error, "The operation is insecure."} {:ok, store_token} = exec_js(socket, "Drab.get_drab_store_token()") detokenize_store(socket, store_token) end @doc false @spec session(Phoenix.Socket.t()) :: map def session(socket) do {:ok, session_token} = exec_js(socket, "Drab.get_drab_session_token()") detokenize_store(socket, session_token) end @doc false @spec tokenize_store(Phoenix.Socket.t() | Plug.Conn.t(), map) :: String.t() def tokenize_store(socket, store) do Drab.tokenize(socket, store, "drab_store_token") end @doc false @spec detokenize_store(Phoenix.Socket.t() | Plug.Conn.t(), String.t()) :: map # empty store def detokenize_store(_socket, drab_store_token) when drab_store_token == nil, do: %{} def detokenize_store(socket, drab_store_token) do # we just ignore wrong token and defauklt the store to %{} # this is because it is read on connect, and raising here would cause infinite reconnects case Phoenix.Token.verify(socket, "drab_store_token", drab_store_token, max_age: 86_400) do {:ok, drab_store} -> drab_store {:error, _reason} -> %{} end end @doc """ Finds the DOM object which triggered the event. To be used only in event handlers. def button_clicked(socket, sender) do set_prop socket, this(sender), innerText: "already clicked" set_prop socket, this(sender), disabled: true end Do not use it with with broadcast functions (`Drab.Query.update!`, `Drab.Core.broadcast_js`, etc), because it returns the *exact* DOM object in *exact* browser. In case if you want to broadcast, use `this!/1` instead. """ @spec this(map) :: String.t() def this(sender) do "[drab-id=#{Drab.Core.encode_js(sender["drab_id"])}]" end @doc """ Like `this/1`, but returns object ID, so it may be used with broadcasting functions. def button_clicked(socket, sender) do socket |> update!(:text, set: "alread clicked", on: this!(sender)) socket |> update!(attr: "disabled", set: true, on: this!(sender)) end Raises exception when being used on the object without an ID. """ @spec this!(map) :: String.t() def this!(sender) do id = sender["id"] unless id, do: raise(ArgumentError, """ Try to use Drab.Core.this!/1 on DOM object without an ID: #{inspect(sender)} """) "##{id}" end end
lib/drab/core.ex
0.780077
0.540378
core.ex
starcoder
defmodule Rayray.Renderings.SphereShaded do alias Rayray.Canvas alias Rayray.Intersect alias Rayray.Intersection alias Rayray.Lights alias Rayray.Material # alias Rayray.Matrix alias Rayray.Normal alias Rayray.Ray alias Rayray.Sphere alias Rayray.Tuple def do_it(ray_origin \\ Tuple.point(0, 0, -5), wall_z \\ 10, wall_size \\ 7.0) do canvas_pixels = 1000 pixel_size = wall_size / canvas_pixels half = wall_size / 2 canvas = Canvas.canvas(canvas_pixels, canvas_pixels) # color = Tuple.color(1, 0, 0) shape = Sphere.new() material = Material.new() material = %{material | color: Tuple.color(1, 0.2, 1)} shape = %{shape | material: material} # transform = Matrix.multiply(Matrix.shearing(1, 0, 0, 0, 0, 0), Matrix.scaling(0.5, 1, 1)) # shape = %{shape | transform: transform} light_position = Tuple.point(-20_000, 5000, 5000) light_color = Tuple.color(1, 1, 1) light = Lights.point_light(light_position, light_color) # black = Tuple.color(0, 0, 0) coords = for y <- 0..(canvas_pixels - 1), x <- 0..(canvas_pixels - 1) do {x, y} end ppm = coords |> Flow.from_enumerable() |> Flow.map(fn {x, y} -> world_y = half - pixel_size * y world_x = -half + pixel_size * x position = Tuple.point(world_x, world_y, wall_z) ray = Ray.new(ray_origin, Tuple.normalize(Tuple.subtract(position, ray_origin))) xs = Intersect.intersect(shape, ray) if Intersection.hit(xs) do hit = Intersection.hit(xs) point = Ray.position(ray, hit.t) normal = Normal.normal_at(hit.object, point) eye = Tuple.multiply(ray.direction, -1) {x, y, Lights.lighting(hit.object.material, light, point, eye, normal, false)} else {x, y, :black} end end) |> Enum.reduce( canvas, fn {_x, _y, :black}, c -> c {x, y, color}, c -> Canvas.write_pixel(c, x, y, color) end ) |> Canvas.canvas_to_ppm() # ppm = # Enum.reduce(0..(canvas_pixels - 1), canvas, fn y, cacc -> # world_y = half - pixel_size * y # Enum.reduce(0..(canvas_pixels - 1), cacc, fn x, cacc2 -> # world_x = -half + pixel_size * x # position = Tuple.point(world_x, world_y, wall_z) # ray = Ray.new(ray_origin, Tuple.normalize(Tuple.subtract(position, ray_origin))) # xs = Intersect.intersect(shape, ray) # if Intersection.hit(xs) do # hit = Intersection.hit(xs) # point = Ray.position(ray, hit.t) # normal = Normal.normal_at(hit.object, point) # eye = Tuple.multiply(ray.direction, -1) # color = Lights.lighting(hit.object.material, light, point, eye, normal) # Canvas.write_pixel(cacc2, x, y, color) # else # cacc2 # end # end) # end) # |> Canvas.canvas_to_ppm() File.write!("sphere_shaded.ppm", ppm) end end
lib/rayray/renderings/sphere_shaded.ex
0.596316
0.566049
sphere_shaded.ex
starcoder
defmodule SteamEx.ICheatReportingService do @moduledoc """ This service allows your game to report cheats and cheaters to the [VAC](https://partner.steamgames.com/doc/features/anticheat#VAC) system and provides the toolset behind the [Game Bans](https://partner.steamgames.com/doc/features/anticheat#GameBans) system. To use this interface you must first opt in to VAC support. This can be done from the [Anti-Cheat Configuration](https://partner.steamgames.com/apps/vac/) page in the App Admin panel. **NOTE**: This is a Service interface, methods in this interface should be called with the `input_json` parameter. For more info on how to use the Steamworks Web API please see the [Web API Overview](https://partner.steamgames.com/doc/webapi_overview). """ import SteamEx.API.Base @interface "ICheatReportingService" @doc """ Reports cheat data. Only use on test account that is running the game but not in a multiplayer session. This is for reporting specific cheats to the VAC system. This is done by running the cheat and the game and then calling this webapi. | Name | Type | Required | Description | |key | string | ✔ | Steamworks Web API user authentication key. | |steamid | uint64 | ✔ | Steam ID of the user running and reporting the cheat.| |appid | uint32 | ✔ | The App ID of the game.| |pathandfilename | string | ✔ | Path and file name of the cheat executable.| |webcheaturl | string | ✔ | Web url where the cheat was found and downloaded.| |time_now | uint64 | ✔ | Local system time now. 64 bit windows system time.| |time_started | uint64 | ✔ | Local system time when cheat process started. ( 0 if not yet run ) 64 bit windows system time.| |time_stopped | uint64 | ✔ | Local system time when cheat process stopped. ( 0 if still running ) 64 bit windows system time.| |cheatname | string | ✔ | Descriptive name for the cheat.| |game_process_id | uint32 | ✔ | Process ID of the running game.| |cheat_process_id| uint32 | ✔ | Process ID of the cheat process that ran.| |cheat_param_1 | uint64 | ✔ | Extra cheat data.| |cheat_param_2 | uint64 | ✔ | Extra cheat data.| See other: [https://partner.steamgames.com/doc/webapi/ICheatReportingService#ReportCheatData](https://partner.steamgames.com/doc/webapi/ICheatReportingService#ReportCheatData) """ def report_cheat_data(access_key, params \\ %{}, headers \\ %{}) do post(@interface <> "/ReportCheatData/v1/", access_key, params, headers) end end
lib/interfaces/i_cheat_reporting_service.ex
0.675551
0.712857
i_cheat_reporting_service.ex
starcoder
defmodule Bunch do @moduledoc """ A bunch of general-purpose helper and convenience functions. """ alias __MODULE__.Type @doc """ Imports a bunch of Bunch macros: `withl/1`, `withl/2`, `~>/2`, `~>>/2`, `quote_expr/1`, `quote_expr/2` """ defmacro __using__(_args) do quote do import unquote(__MODULE__), only: [ withl: 1, withl: 2, ~>: 2, ~>>: 2, quote_expr: 1, quote_expr: 2 ] end end @compile {:inline, listify: 1, error_if_nil: 2} @doc """ Extracts the key from a key-value tuple. """ @spec key({key, value}) :: key when key: any, value: any def key({key, _value}), do: key @doc """ Extracts the value from a key-value tuple. """ @spec value({key, value}) :: value when key: any, value: any def value({_key, value}), do: value @doc """ Creates a short reference. """ @spec make_short_ref() :: Bunch.ShortRef.t() defdelegate make_short_ref, to: Bunch.ShortRef, as: :new @doc """ Works like `quote/2`, but doesn't require a do/end block and options are passed as the last argument. Useful when quoting a single expression. ## Examples iex> use Bunch iex> quote_expr(String.t()) quote do String.t() end iex> quote_expr(unquote(x) + 2, unquote: false) quote unquote: false do unquote(x) + 2 end ## Nesting Nesting calls to `quote` disables unquoting in the inner call, while placing `quote_expr` in `quote` or another `quote_expr` does not: iex> use Bunch iex> quote do quote do unquote(:code) end end == quote do quote do :code end end false iex> quote do quote_expr(unquote(:code)) end == quote do quote_expr(:code) end true """ defmacro quote_expr(code, opts \\ []) do {:quote, [], [opts, [do: code]]} end @doc """ A labeled version of the `with/1` macro. This macro works like `with/1`, but enforces user to mark corresponding `withl` and `else` clauses with the same label (atom). If a `withl` clause does not match, only the `else` clauses marked with the same label are matched against the result. iex> use #{inspect(__MODULE__)} iex> names = %{1 => "Harold", 2 => "Małgorzata"} iex> test = fn id -> ...> withl id: {int_id, _} <- Integer.parse(id), ...> name: {:ok, name} <- Map.fetch(names, int_id) do ...> {:ok, "The name is \#{name}"} ...> else ...> id: :error -> {:error, :invalid_id} ...> name: :error -> {:error, :name_not_found} ...> end ...> end iex> test.("1") {:ok, "The name is Harold"} iex> test.("5") {:error, :name_not_found} iex> test.("something") {:error, :invalid_id} `withl` clauses using no `<-` operator are supported, but they also have to be labeled due to Elixir syntax restrictions. iex> use #{inspect(__MODULE__)} iex> names = %{1 => "Harold", 2 => "Małgorzata"} iex> test = fn id -> ...> withl id: {int_id, _} <- Integer.parse(id), ...> do: int_id = int_id + 1, ...> name: {:ok, name} <- Map.fetch(names, int_id) do ...> {:ok, "The name is \#{name}"} ...> else ...> id: :error -> {:error, :invalid_id} ...> name: :error -> {:error, :name_not_found} ...> end ...> end iex> test.("0") {:ok, "The name is Harold"} All the `withl` clauses that use `<-` operator must have at least one corresponding `else` clause. iex> use #{inspect(__MODULE__)} iex> try do ...> Code.compile_quoted(quote do ...> withl a: a when a > 0 <- 1, ...> b: b when b > 0 <- 2 do ...> {:ok, a + b} ...> else ...> a: _ -> :error ...> end ...> end) ...> rescue ...> e -> e.description ...> end "Label `:b` not present in withl else clauses" ## Variable scoping Because the labels are resolved in the compile time, they make it possible to access results of already succeeded matches from `else` clauses. This may help handling errors, like below: iex> use #{inspect(__MODULE__)} iex> names = %{1 => "Harold", 2 => "Małgorzata"} iex> test = fn id -> ...> withl id: {int_id, _} <- Integer.parse(id), ...> do: int_id = int_id + 1, ...> name: {:ok, name} <- Map.fetch(names, int_id) do ...> {:ok, "The name is \#{name}"} ...> else ...> id: :error -> {:error, :invalid_id} ...> name: :error -> {:ok, "The name is Defaultius the \#{int_id}th"} ...> end ...> end iex> test.("0") {:ok, "The name is Harold"} iex> test.("5") {:ok, "The name is Defaultius the 6th"} ## Duplicate labels `withl` supports marking multiple `withl` clauses with the same label, however in that case all the `else` clauses marked with such label are simply put multiple times into the generated code. Note that this may lead to confusion, in particular when variables are rebound in `withl` clauses: iex> use #{inspect(__MODULE__)} iex> test = fn x -> ...> withl a: x when x > 1 <- x, ...> do: x = x + 1, ...> a: x when x < 4 <- x do ...> :ok ...> else ...> a: x -> {:error, x} ...> end ...> end iex> test.(2) :ok iex> test.(1) {:error, 1} iex> test.(3) {:error, 4} """ @spec withl(keyword(with_clause :: term), do: code_block :: term(), else: match_clauses :: term) :: term defmacro withl(with_clauses, do: block, else: else_clauses) do do_withl(with_clauses, block, else_clauses, __CALLER__) end @doc """ Works like `withl/2`, but allows shorter syntax. ## Examples iex> use #{inspect(__MODULE__)} iex> x = 1 iex> y = 2 iex> withl a: true <- x > 0, ...> b: false <- y |> rem(2) == 0, ...> do: {x, y}, ...> else: (a: false -> {:error, :x}; b: true -> {:error, :y}) {:error, :y} For more details and more verbose and readable syntax, check docs for `withl/2`. """ @spec withl( keyword :: [ {key :: atom(), with_clause :: term} | {:do, code_block :: term} | {:else, match_clauses :: term} ] ) :: term defmacro withl(keyword) do {{:else, else_clauses}, keyword} = keyword |> List.pop_at(-1) {{:do, block}, keyword} = keyword |> List.pop_at(-1) with_clauses = keyword do_withl(with_clauses, block, else_clauses, __CALLER__) end defp do_withl(with_clauses, block, else_clauses, caller) do else_clauses = else_clauses |> Enum.map(fn {:->, meta, [[[{label, left}]], right]} -> {label, {:->, meta, [[left], right]}} end) |> Enum.group_by(fn {k, _v} -> k end, fn {_k, v} -> v end) with_clauses |> Enum.reverse() |> Enum.reduce(block, fn {label, {:<-, meta, _args} = clause}, acc -> label_else_clauses = else_clauses[label] || "Label `#{inspect(label)}` not present in withl else clauses" |> raise_compile_error(caller, meta) args = [clause, [do: acc] ++ [else: label_else_clauses]] {:with, meta, args} {_label, clause}, acc -> quote do unquote(clause) unquote(acc) end end) end @doc """ Embeds the argument in a one-element list if it is not a list itself. Otherwise works as identity. Works similarly to `List.wrap/1`, but treats `nil` as any non-list value, instead of returning empty list in this case. ## Examples iex> #{inspect(__MODULE__)}.listify(:a) [:a] iex> #{inspect(__MODULE__)}.listify([:a, :b, :c]) [:a, :b, :c] iex> #{inspect(__MODULE__)}.listify(nil) [nil] """ @spec listify(l) :: l when l: list @spec listify(a) :: [a] when a: any def listify(list) when is_list(list) do list end def listify(non_list) do [non_list] end @doc """ Returns an `:error` tuple if given value is `nil` and `:ok` tuple otherwise. ## Examples iex> map = %{:answer => 42} iex> #{inspect(__MODULE__)}.error_if_nil(map[:answer], :reason) {:ok, 42} iex> #{inspect(__MODULE__)}.error_if_nil(map[:invalid], :reason) {:error, :reason} """ @spec error_if_nil(value, reason) :: Type.try_t(value) when value: any(), reason: any() def error_if_nil(nil, reason), do: {:error, reason} def error_if_nil(v, _), do: {:ok, v} @doc """ Returns given stateful try value along with its status. """ @spec stateful_try_with_status(result) :: {status, result} when status: Type.try_t(), result: Type.stateful_try_t(state :: any) | Type.stateful_try_t(value :: any, state :: any) def stateful_try_with_status({:ok, _state} = res), do: {:ok, res} def stateful_try_with_status({{:ok, _res}, _state} = res), do: {:ok, res} def stateful_try_with_status({{:error, reason}, _state} = res), do: {{:error, reason}, res} @doc """ Helper for writing pipeline-like syntax. Maps given value using match clauses or lambda-like syntax. ## Examples iex> use #{inspect(__MODULE__)} iex> {:ok, 10} ~> ({:ok, x} -> x) 10 iex> 5 ~> &1 + 2 7 Lambda-like expressions are not converted to lambdas under the hood, but result of `expr` is injected to `&1` at the compile time. Useful especially when dealing with a pipeline of operations (made up e.g. with pipe (`|>`) operator) some of which are hard to express in such form: iex> use #{inspect(__MODULE__)} iex> ["Joe", "truck", "jacket"] ...> |> Enum.map(&String.downcase/1) ...> |> Enum.filter(& &1 |> String.starts_with?("j")) ...> ~> ["Words:" | &1] ...> |> Enum.join("\\n") "Words: joe jacket" """ # Case when the mapper is a list of match clauses defmacro expr ~> ([{:->, _, _} | _] = mapper) do quote do case unquote(expr) do unquote(mapper) end end end # Case when the mapper is a piece of lambda-like code defmacro expr ~> mapper do {mapped, arg_present?} = mapper |> Macro.prewalk(false, fn {:&, _meta, [1]}, _acc -> quote do: {expr_result, true} {:&, _meta, [i]} = node, acc when is_integer(i) -> {node, acc} {:&, meta, _}, _acc -> """ The `&` (capture) operator is not allowed in lambda-like version of \ `#{inspect(__MODULE__)}.~>/2`. Use `&1` alone instead. """ |> raise_compile_error(__CALLER__, meta) other, acc -> {other, acc} end) if not arg_present? do """ `#{inspect(__MODULE__)}.~>/2` operator requires either match clauses or \ at least one occurence of `&1` argument on the right hand side. """ |> raise_compile_error(__CALLER__) end quote do expr_result = unquote(expr) unquote(mapped) end end @doc """ Works similar to `~>/2`, but accepts only `->` clauses and appends default identity clause at the end. ## Examples iex> use #{inspect(__MODULE__)} iex> {:ok, 10} ~>> ({:ok, x} -> {:ok, x+1}) {:ok, 11} iex> :error ~>> ({:ok, x} -> {:ok, x+1}) :error """ defmacro expr ~>> ([{:->, _, _} | _] = mapper_clauses) do default = quote do default_result -> default_result end quote do case unquote(expr) do unquote(mapper_clauses ++ default) end end end defmacro _expr ~>> _ do """ `#{inspect(__MODULE__)}.~>>/2` operator expects match clauses on the right \ hand side. """ |> raise_compile_error(__CALLER__) end defp raise_compile_error(reason, caller, meta \\ []) do raise CompileError, file: caller.file, line: meta |> Keyword.get(:line, caller.line), description: reason end end
lib/bunch.ex
0.783326
0.490419
bunch.ex
starcoder
defmodule Saucexages.IO.BinaryReader do @moduledoc """ Reads SAUCE data from binaries according to the SAUCE specification. SAUCE data is decoded decoded according to the SAUCE spec. If you need to read files from the local file system directly in a more efficient manner, see `Saucexages.IO.FileReader`. ## General Usage The general use-case for the binary reader is to work with binaries that you can fit in memory. You should prefer this use-case whenever possible to free yourself from problems that may arise when working directly with another underlying medium such as the FileSystem. Most files that were commonly used for SAUCE are very small (ex: ANSi graphics) and an ideal fit for the binary reader. This also implies that this reader is suitable for use perhaps in a network service, streaming process, or similar context where loading the entire binary would not be a burden on the rest of the system. A general usage pattern on a local machine might be as follows: ```elixir File.read!("LD-ACID2.ANS") |> BinaryReader.sauce() ``` ## Layout Binaries are generally assumed to take the following forms in pseudo-code below. ### Sauce with No comments ```elixir <<contents::binary, eof_character::binary, sauce::binary-size(128)>> ``` ### Sauce with Comments ```elixir <<contents::binary, eof_character::binary, comments::binary-size(line_count), sauce::binary-size(128)>> ``` ### No SAUCE with EOF character ```elixir <<contents::binary, eof_charter::binary>> ``` ### No SAUCE with no EOF character ```elixir <<contents::binary>> ``` ## Notes The operations within this module take the approach of tolerant reading while still following the SAUCE spec as closely as possible. For example, comments with a SAUCE are only read according to the comment lines value specified in a SAUCE record. A binary may have a comments block buried within it, but if the SAUCE record does not agree, no effort is made to find the binary. If you wish to work with SAUCE-related binaries at a lower-level or build your own binary reader, see `Saucexages.SauceBinary`. This module can also be used to build readers that relax or constrain the SAUCE spec, such as in the case of reading comment blocks. """ require Saucexages.Sauce alias Saucexages.{SauceBlock} alias Saucexages.IO.SauceBinary alias Saucexages.Codec.Decoder @doc """ Reads a binary containing a SAUCE record and returns decoded SAUCE information as `{:ok, sauce_block}`. If the binary does not contain a SAUCE record, `{:error, :no_sauce}` is returned. """ @spec sauce(binary()) :: {:ok, SauceBlock.t} | {:error, :no_sauce} | {:error, :invalid_sauce} | {:error, term()} def sauce(bin) when is_binary(bin) do with {:ok, {sauce_bin, comments_bin}} <- SauceBinary.sauce(bin), {:ok, %{comment_lines: comment_lines} = sauce_record} <- Decoder.decode_record(sauce_bin), {:ok, comments} <- read_comments(comments_bin, comment_lines) do Decoder.decode_sauce(sauce_record, comments) else {:error, _reason} = err -> err err -> {:error, {"Unable to read sauce", err}} end end defp read_comments(comments_bin, comment_lines) do case Decoder.decode_comments(comments_bin, comment_lines) do {:ok, comments} -> {:ok, comments} {:error, :no_comments} -> {:ok, []} {:error, _reason} = err -> err end end @doc """ Reads a binary containing a SAUCE record and returns the raw binary in the form `{:ok, {sauce_bin, comments_bin}}`. If the binary does not contain a SAUCE record, `{:error, :no_sauce}` is returned. """ @spec raw(binary()) :: {:ok, {binary(), binary()}} | {:error, :no_sauce} | {:error, term()} def raw(bin) when is_binary(bin) do SauceBinary.sauce(bin) end @doc """ Reads a binary containing a SAUCE record and returns the decoded SAUCE comments. """ @spec comments(binary()) :: {:ok, [String.t]} | {:error, :no_sauce} | {:error, :no_comments} | {:error, term()} def comments(bin) when is_binary(bin) do with {:ok, {comments_bin, line_count}} <- SauceBinary.comments(bin) do Decoder.decode_comments(comments_bin, line_count) end end @doc """ Reads a binary and returns the contents without the SAUCE block. """ @spec contents(binary()) :: {:ok, binary()} | {:error, term()} def contents(bin) when is_binary(bin) do SauceBinary.contents(bin) end @doc """ Reads a binary and returns whether or not a SAUCE comments block exists within the SAUCE block. Will match a comments block only if it a SAUCE record exists. Comment fragments are not considered to be valid without the presence of a SAUCE record. """ @spec comments?(binary()) :: boolean() def comments?(bin) when is_binary(bin) do SauceBinary.comments?(bin) end @doc """ Reads a binary and returns whether or not a SAUCE record exists. Will match both binary that is a SAUCE record and binary that contains a SAUCE record. """ @spec sauce?(binary()) :: boolean() def sauce?(bin) when is_binary(bin) do SauceBinary.sauce?(bin) end end
lib/saucexages/io/binary_reader.ex
0.911293
0.897919
binary_reader.ex
starcoder
defmodule Grizzly.ZWave.Commands.MultiChannelAggregatedMembersReport do @moduledoc """ This command is used to advertise the members of an Aggregated End Point. Params: * `:aggregated_end_point` - an aggregated end_point (required) * `:members` - the lists of end points member of the aggregated end point (required, can be an empty list) """ @behaviour Grizzly.ZWave.Command alias Grizzly.ZWave.Command alias Grizzly.ZWave.CommandClasses.MultiChannel @type param :: {:aggregated_end_point, MultiChannel.end_point()} | {:members, [MultiChannel.end_point()]} @impl true def new(params) do command = %Command{ name: :multi_channel_aggregated_members_report, command_byte: 0x0F, command_class: MultiChannel, params: params, impl: __MODULE__ } {:ok, command} end @impl true def encode_params(command) do aggregated_end_point = Command.param!(command, :aggregated_end_point) members = Command.param!(command, :members) encoded_members = encode_members(members) count = byte_size(encoded_members) <<0x00::size(1), aggregated_end_point::size(7), count>> <> encoded_members end @impl true def decode_params( <<0x00::size(1), aggregated_end_point::size(7), count, bitmasks::binary-size(count)>> ) do members = decode_members(bitmasks) {:ok, [aggregated_end_point: aggregated_end_point, members: members]} end defp encode_members(members) do masks_count = ceil(Enum.max(members) / 8) for i <- 0..(masks_count - 1), into: <<>> do start = i * 8 + 1 for j <- (start + 7)..start, into: <<>> do if j in members, do: <<0x01::size(1)>>, else: <<0x00::size(1)>> end end end defp decode_members(bitmasks) do masks = for byte <- :erlang.binary_to_list(bitmasks), do: <<byte>> for i <- 0..(Enum.count(masks) - 1) do mask = Enum.at(masks, i) indexed_bits = for(<<x::1 <- mask>>, do: x) |> Enum.reverse() |> Enum.with_index(1) start = i * 8 Enum.reduce(indexed_bits, [], fn {bit, index}, acc -> if bit == 1, do: [index + start | acc], else: acc end) end |> List.flatten() |> Enum.sort() end end
lib/grizzly/zwave/commands/multi_channel_aggregated_members_report.ex
0.829008
0.471223
multi_channel_aggregated_members_report.ex
starcoder
defmodule Rules.Poker.Mechanics.SeatedPlayer do @moduledoc """ A SP status is just a data structure defining the following details regarding their play in this turn: - status: :active, :all_in, :away, :folded. Other useful statuses: - :pending: The [new] hand has not yet started. Non-away users enter this stage after a hand ends. Fresh users, when just sitting, enter this stage. Starting a new hand will involve moving these users to active. - user_id: I don't know yet. Either a unique ID or username. What I only require is that this user_id is UNIQUE AMONG THE WHOLE SERVER. - aggression_order: The initial aggression order comes each turn (complete hand!) to 0, both for the table's internal counter and the users. Each time one user bets or raises, the table's aggression order increases, and such value is set to the user's current status. The active/all_in user with the highest aggression_order in one hand will be the first one commanded to start the showdown. - hole_chips: The chips the user has FOR THIS TABLE. This has nothing to do with the chips the user has on its overall account (if such thing ever exists!). - cards: the user's current cards. Hand deliverers will pick exactly THESE cards (alongside any community cards if the game has one). - bet_chips: The chips the user is currently betting in this round. This constitutes a "personal" pot for bets that lasts until the current betting stage gets "cold" (all but one player folded, all active players called the last aggressor, just one active and all remaining active players are actually all-in...). This structure is for regular games, and does not contemplate other modes like "Shark's Cage" when you may be forcedly away, and you may pick and send a card when being the last aggressor after the river. """ defstruct [:user_id, :status, :hole_chips, :cards, :bet_chips, :aggression_order] def new(user_id, hole_chips) do %__MODULE__{user_id: user_id, status: :pending, hole_chips: hole_chips, cards: nil, aggression_order: 0} end # When the user retreats def fold(%__MODULE__{} = user_status) do %{ user_status | status: :fold } end # When the user places a bet and has more money, it # remains active. def pay(%__MODULE__{hole_chips: hc, bet_chips: bc} = user_status, amount) when hc > amount do %{ user_status | hole_chips: hc - amount, bet_chips: bc + amount } end # This case goes all-in, in contrast to the former one def pay(%__MODULE__{hole_chips: hc, bet_chips: bc} = user_status, _) do %{ user_status | hole_chips: 0, bet_chips: bc + hc, status: :all_in } end # This one is when the engine collects the bet chips from the user. # Useful when building the side pots. def collect(%__MODULE__{} = user_status) do %{ user_status | bet_chips: 0 } end # This one is when the engine awards the user. Only valid when the # user wins a hand. def award(%__MODULE__{hole_chips: hc} = user_status, amount) do %{ user_status | hole_chips: hc + amount } end # When the showdown ends and the user is # non-away and does not wan to be away, # or is away and wants to be non-away # again. def cleanup(%__MODULE__{} = user_status) do %{ user_status | status: :pending } end # When the showdown ends and the user wants to # go away. def away(%__MODULE__{} = user_status) do %{ user_status | status: :away } end end
lib/rules/poker/mechanics/seated_player.ex
0.635109
0.503174
seated_player.ex
starcoder
defmodule Seqfuzz do @moduledoc """ Seqfuzz is an implementation of a sequential fuzzy string matching algorithm, similar to those used in code editors like Sublime Text. It is based on <NAME>'s work on [lib_ftps](https://github.com/forrestthewoods/lib_fts/) and his blog post [Reverse Engineering Sublime Text's Fuzzy Match](https://www.forrestthewoods.com/blog/reverse_engineering_sublime_texts_fuzzy_match/). There is an alternate implementation by [@WolfDan](https://github.com/WolfDan) which can be found here: [Fuzzy Match v0.2.0 Elixir](https://github.com/tajmone/fuzzy-search/tree/master/fts_fuzzy_match/0.2.0/elixir). ### Documentation * **GitHub**: [https://github.com/negcx/seqfuzz](https://github.com/negcx/seqfuzz) * **Hexdocs**: [https://hexdocs.pm/seqfuzz](https://hexdocs.pm/seqfuzz) ## Installation The package can be installed by adding `seqfuzz` to your list of dependencies in `mix.exs`: ```elixir def deps do [ {:seqfuzz, "~> 0.2.0"} ] end ``` ## Examples iex> Seqfuzz.match("Hello, world!", "hellw") %{match?: true, matches: [0, 1, 2, 3, 7], score: 202} iex> items = [{1, "Hello Goodbye"}, {2, "Hell on Wheels"}, {3, "Hello, world!"}] iex> Seqfuzz.filter(items, "hellw", &(elem(&1, 1))) [{3, "Hello, world!"}, {2, "Hell on Wheels"}] ## Scoring Scores can be passed as options if you want to override the defaults. I have added additional separators as a default as well as two additional scoring features: case match bonus and string match bonus. Case match bonus provides a small bonus for matching case. String match bonus provides a large bonus when the pattern and the string match exactly (although with different cases) to make sure that those results are always highest. ## Changelog * `0.2.0` - Change scoring to be via options instead of configuration. * `0.1.0` - Initial version. Supports basic algorithm but does not search recursively for better matches. ## Roadmap * Add support for recursive search for better matches. * Add support for asynchronous stream search. """ @type match_metadata :: %{match?: boolean, matches: [integer], score: integer} defp default_options() do [ sequential_bonus: 15, separator_bonus: 30, camel_bonus: 30, first_letter_bonus: 30, leading_letter_penalty: -3, max_leading_letter_penalty: -25, unmatched_letter_penalty: -1, case_match_bonus: 1, string_match_bonus: 20, separators: ["_", " ", ".", "/", ","], initial_score: 100, default_empty_score: -10_000, filter: false, sort: false, metadata: true ] end def match(string, pattern, opts \\ []) def match("", _pattern, opts) do opts = default_options() |> Keyword.merge(opts) %{match?: false, score: opts[:default_empty_score], matches: []} end def match(_string, "", opts) do opts = default_options() |> Keyword.merge(opts) %{match?: true, score: opts[:default_empty_score], matches: []} end @doc """ Determines whether `pattern` is a sequential fuzzy match with `string` and provides a matching score. `matches` is a list of indices within `string` where a match was found. ## Examples iex> Seqfuzz.match("Hello, world!", "hellw") %{match?: true, matches: [0, 1, 2, 3, 7], score: 202} """ @spec match(String.t(), String.t(), keyword) :: match_metadata() def match(string, pattern, opts) do opts = default_options() |> Keyword.merge(opts) match(string, pattern, 0, 0, [], opts) end @doc """ Applies the `match` algorithm to the entire `enumerable` with options to sort and filter. ## Options * `:sort` - Sort the enumerable by score, defaults to `false`. * `:filter` - Filter out elements that don't match, defaults to `false`. * `:metadata` - Include the match metadata map in the result, defaults to `true`. When `true` the return value is a tuple `{element, %{...}}`. When `false`, the return value is a list of `element`. * `:sequential_bonus` Default: 15 * `:separator_bonus`: Default: 30 * `:camel_bonus` Default: 30 * `:first_letter_bonus` Default: 15 * `:leading_letter_penalty` Default: -3 * `:max_leading_letter_penalty` Default: -25 * `:unmatched_letter_penalty` Default: -1 * `:case_match_bonus` Default: 1 * `:string_match_bonus` Default: 20 * `:separators` Default: ["_", " ", ".", "/", ","] * `:initial_score` Default: 100 ## Examples iex> strings = ["Hello Goodbye", "Hell on Wheels", "Hello, world!"] iex> Seqfuzz.matches(strings, "hellw", & &1) [ {"Hello Goodbye", %{match?: false, matches: [0, 1, 2, 3], score: 170}}, {"Hell on Wheels", %{match?: true, matches: [0, 1, 2, 3, 8], score: 200}}, {"Hello, world!", %{match?: true, matches: [0, 1, 2, 3, 7], score: 202}} ] iex> strings = ["Hello Goodbye", "Hell on Wheels", "Hello, world!"] iex> Seqfuzz.matches( iex> strings, iex> "hellw", iex> & &1, iex> metadata: false, iex> filter: true, iex> sort: true iex> ) ["Hello, world!", "Hell on Wheels"] """ @spec matches(Enumerable.t(), String.t(), (any -> String.t()), keyword) :: Enumerable.t() | [{any, match_metadata}] def matches(enumerable, pattern, string_callback, opts \\ []) do opts = default_options() |> Keyword.merge(opts) enumerable |> Enum.map(fn item -> {item, match(string_callback.(item), pattern, opts)} end) |> matches_filter(opts[:filter]) |> matches_sort(opts[:sort]) |> matches_metadata(opts[:metadata]) end @doc """ Matches against a list of strings and returns the list of matches sorted by highest score first. ## Examples iex> strings = ["Hello Goodbye", "Hell on Wheels", "Hello, world!"] iex> Seqfuzz.filter(strings, "hellw") ["Hello, world!", "Hell on Wheels"] """ @spec filter(Enumerable.t(), String.t()) :: Enumerable.t() def filter(enumerable, pattern) do enumerable |> matches(pattern, & &1, sort: true, filter: true, metadata: false) end @doc """ Matches against an enumerable using a callback to access the string to match and returns the list of matches sorted by highest score first. ## Examples iex> items = [{1, "<NAME>"}, {2, "Hell on Wheels"}, {3, "Hello, world!"}] iex> Seqfuzz.filter(items, "hellw", &(elem(&1, 1))) [{3, "Hello, world!"}, {2, "Hell on Wheels"}] """ @spec filter(Enumerable.t(), String.t(), (any -> String.t())) :: Enumerable.t() def filter(enumerable, pattern, string_callback) do enumerable |> matches(pattern, string_callback, sort: true, filter: true, metadata: false) end defp matches_metadata(enumerable, true = _metadata?) do enumerable end defp matches_metadata(enumerable, false = _metadata?) do enumerable |> Enum.map(fn {item, _} -> item end) end defp matches_filter(enumerable, true = _filter?) do enumerable |> Enum.filter(fn {_, %{match?: match?}} -> match? end) end defp matches_filter(enumerable, false = _filter?) do enumerable end defp matches_sort(enumerable, true = _sort?) do enumerable |> Enum.sort_by( fn {_, %{score: score}} -> score end, :desc ) end defp matches_sort(enumerable, false = _sort?) do enumerable end defp match(string, pattern, string_idx, pattern_idx, matches, opts) do # We must use String.length and a case statement because # byte_size does not properly capture the length of UTF-8 strings. string_len = String.length(string) pattern_len = String.length(pattern) case {string_len, string_idx, pattern_len, pattern_idx} do # Pattern length is 0 {_, _, 0, _} -> score = opts[:initial_score] |> score_leading_letter( matches, opts[:leading_letter_penalty], opts[:max_leading_letter_penalty] ) |> score_sequential_bonus(matches, opts[:sequential_bonus]) |> score_unmatched_letter_penalty(matches, string, opts[:unmatched_letter_penalty]) |> score_neighbor( matches, string, opts[:camel_bonus], opts[:separator_bonus], opts[:separators] ) |> score_first_letter_bonus(matches, opts[:first_letter_bonus]) |> score_case_match_bonus(matches, string, pattern, opts[:case_match_bonus]) |> score_string_match_bonus(string, pattern, opts[:string_match_bonus]) %{match?: false, score: score, matches: matches} # String length is 0 {0, _, _, _} -> score = opts[:initial_score] |> score_leading_letter( matches, opts[:leading_letter_penalty], opts[:max_leading_letter_penalty] ) |> score_sequential_bonus(matches, opts[:sequential_bonus]) |> score_unmatched_letter_penalty(matches, string, opts[:unmatched_letter_penalty]) |> score_neighbor( matches, string, opts[:camel_bonus], opts[:separator_bonus], opts[:separators] ) |> score_first_letter_bonus(matches, opts[:first_letter_bonus]) |> score_case_match_bonus(matches, string, pattern, opts[:case_match_bonus]) |> score_string_match_bonus(string, pattern, opts[:string_match_bonus]) %{match?: false, score: score, matches: matches} # There is more pattern left than string {string_len, string_idx, pattern_len, pattern_idx} when pattern_len - pattern_idx > string_len - string_idx -> score = opts[:initial_score] |> score_leading_letter( matches, opts[:leading_letter_penalty], opts[:max_leading_letter_penalty] ) |> score_sequential_bonus(matches, opts[:sequential_bonus]) |> score_unmatched_letter_penalty(matches, string, opts[:unmatched_letter_penalty]) |> score_neighbor( matches, string, opts[:camel_bonus], opts[:separator_bonus], opts[:separators] ) |> score_first_letter_bonus(matches, opts[:first_letter_bonus]) |> score_case_match_bonus(matches, string, pattern, opts[:case_match_bonus]) |> score_string_match_bonus(string, pattern, opts[:string_match_bonus]) %{match?: false, score: score, matches: matches} # No more pattern left - this is a match. Go to score. {_, _, pattern_len, pattern_idx} when pattern_len - pattern_idx == 0 -> score = opts[:initial_score] |> score_leading_letter( matches, opts[:leading_letter_penalty], opts[:max_leading_letter_penalty] ) |> score_sequential_bonus(matches, opts[:sequential_bonus]) |> score_unmatched_letter_penalty(matches, string, opts[:unmatched_letter_penalty]) |> score_neighbor( matches, string, opts[:camel_bonus], opts[:separator_bonus], opts[:separators] ) |> score_first_letter_bonus(matches, opts[:first_letter_bonus]) |> score_case_match_bonus(matches, string, pattern, opts[:case_match_bonus]) |> score_string_match_bonus(string, pattern, opts[:string_match_bonus]) %{match?: true, score: score, matches: matches} # If none of the terminating clauses match above, continue # walking the pattern and string. {string_len, string_idx, pattern_len, pattern_idx} when pattern_len > pattern_idx and string_len > string_idx -> if pattern |> String.at(pattern_idx) |> String.downcase() == string |> String.at(string_idx) |> String.downcase() do match(string, pattern, string_idx + 1, pattern_idx + 1, matches ++ [string_idx], opts) else match(string, pattern, string_idx + 1, pattern_idx, matches, opts) end end end defp score_leading_letter(score, matches, max_leading_letter_penalty, _leading_letter_penalty) when length(matches) == 0 do score + max_leading_letter_penalty end defp score_leading_letter(score, matches, max_leading_letter_penalty, leading_letter_penalty) when length(matches) > 0 do score + max(leading_letter_penalty * Enum.at(matches, 0), max_leading_letter_penalty) end defp score_sequential_bonus(score, matches, _sequential_bonus) when length(matches) <= 1 do score end defp score_sequential_bonus(score, matches, sequential_bonus) when length(matches) > 1 do [_head | tail] = matches (matches |> Enum.zip(tail) |> Enum.count(fn {curr, next} -> next - curr == 1 end)) * sequential_bonus + score end defp score_unmatched_letter_penalty(score, matches, string, unmatched_letter_penalty) when length(matches) > 0 do [_head | tail] = matches tail = tail ++ [String.length(string) - 1] (matches |> Enum.zip(tail) |> Enum.filter(fn {curr, next} -> next - curr != 1 end) |> Enum.map(fn {curr, next} -> next - curr - 1 end) |> Enum.sum()) * unmatched_letter_penalty + score end defp score_unmatched_letter_penalty(score, matches, string, unmatched_letter_penalty) when length(matches) == 0 do score + String.length(string) * unmatched_letter_penalty end defp score_neighbor(score, matches, string, camel_bonus, separator_bonus, separators) do (matches |> Enum.filter(&(&1 > 0)) |> Enum.map(fn index -> curr = String.at(string, index) neighbor = String.at(string, index - 1) cond do neighbor in separators -> separator_bonus curr == String.upcase(curr) and neighbor == String.downcase(neighbor) -> camel_bonus true -> 0 end end) |> Enum.sum()) + score end defp score_first_letter_bonus(score, [0 | _tail] = _matches, first_letter_bonus) do first_letter_bonus + score end defp score_first_letter_bonus(score, _matches, _first_letter_bonus) do score end defp score_case_match_bonus(score, [] = _matches, _, _, _first_letter_bonus) do score end defp score_case_match_bonus(score, matches, string, pattern, case_match_bonus) do (0..(length(matches) - 1) |> Enum.count(fn match_idx -> String.at(pattern, match_idx) == String.at(string, Enum.fetch!(matches, match_idx)) end)) * case_match_bonus + score end defp score_string_match_bonus(score, string, pattern, string_match_bonus) do if String.downcase(string) == String.downcase(pattern) do score + string_match_bonus else score end end end
lib/seqfuzz.ex
0.889054
0.886764
seqfuzz.ex
starcoder
defmodule TeslaMate.Vehicles.Vehicle.Summary do import TeslaMate.Convert, only: [miles_to_km: 2, mph_to_kmh: 1] alias TeslaApi.Vehicle.State.{Drive, Charge} alias TeslaApi.Vehicle defstruct [ :display_name, :state, :since, :battery_level, :ideal_battery_range_km, :est_battery_range_km, :battery_range_km, :charge_energy_added, :speed, :outside_temp, :inside_temp, :locked, :sentry_mode, :plugged_in, :scheduled_charging_start_time, :charge_limit_soc, :charger_power ] def into(:start, _since, nil) do %__MODULE__{state: :unavailable} end def into(state, since, vehicle) do %__MODULE__{format_vehicle(vehicle) | state: format_state(state), since: since} end defp format_state({:charging, "Complete", _process_id}), do: :charging_complete defp format_state({:charging, _state, _process_id}), do: :charging defp format_state({:driving, {:offline, _}, _id}), do: :offline defp format_state({:driving, _state, _id}), do: :driving defp format_state({state, _}) when is_atom(state), do: state defp format_state(state) when is_atom(state), do: state defp format_vehicle(%Vehicle{} = vehicle) do %__MODULE__{ display_name: vehicle.display_name, speed: speed(vehicle), ideal_battery_range_km: get_in_struct(vehicle, [:charge_state, :ideal_battery_range]) |> miles_to_km(1), est_battery_range_km: get_in_struct(vehicle, [:charge_state, :est_battery_range]) |> miles_to_km(1), battery_range_km: get_in_struct(vehicle, [:charge_state, :battery_range]) |> miles_to_km(1), battery_level: get_in_struct(vehicle, [:charge_state, :battery_level]), charge_energy_added: get_in_struct(vehicle, [:charge_state, :charge_energy_added]), charger_power: get_in_struct(vehicle, [:charge_state, :charger_power]), plugged_in: plugged_in(vehicle), scheduled_charging_start_time: get_in_struct(vehicle, [:charge_state, :scheduled_charging_start_time]) |> to_datetime(), charge_limit_soc: get_in_struct(vehicle, [:charge_state, :charge_limit_soc]), outside_temp: get_in_struct(vehicle, [:climate_state, :outside_temp]), inside_temp: get_in_struct(vehicle, [:climate_state, :inside_temp]), locked: get_in_struct(vehicle, [:vehicle_state, :locked]), sentry_mode: get_in_struct(vehicle, [:vehicle_state, :sentry_mode]) } end defp speed(%Vehicle{drive_state: %Drive{speed: s}}) when not is_nil(s), do: mph_to_kmh(s) defp speed(_vehicle), do: nil defp plugged_in(%Vehicle{charge_state: nil}), do: nil defp plugged_in(%Vehicle{vehicle_state: nil}), do: nil defp plugged_in(%Vehicle{ charge_state: %Charge{charge_port_latch: "Engaged", charge_port_door_open: true} }) do true end defp plugged_in(_vehicle), do: false defp to_datetime(nil), do: nil defp to_datetime(ts), do: DateTime.from_unix!(ts) defp get_in_struct(struct, keys) do Enum.reduce(keys, struct, fn key, acc -> if acc, do: Map.get(acc, key) end) end end
lib/teslamate/vehicles/vehicle/summary.ex
0.684686
0.541773
summary.ex
starcoder
defmodule Grizzly.CommandClass.NetworkManagementInclusion.NodeAddDSKSet do @moduledoc """ Command module for working with NETWORK_MANAGEMENT_INCLUSION NODE_ADD_DSK_SET command. command options: * `:accept`: - Boolean that indicates if S2 requested keys should be granted * `:input_dsk` - The 5 digit pin code found on the device * `:input_dsk_length` - the lengh of the DSK * `:seq_number` - The sequence number for the Z/IP Packet * `:retries` - The number of times to resend the command (default 2) """ @behaviour Grizzly.Command @typedoc """ The `input_dsk` field is the 5 digit pin found on the joining node, normally near the QR code. """ @type t :: %__MODULE__{ seq_number: Grizzly.seq_number(), accept: boolean(), input_dsk_length: non_neg_integer(), retries: non_neg_integer(), input_dsk: non_neg_integer() } alias Grizzly.Packet alias Grizzly.Command.{EncodeError, Encoding} alias Grizzly.CommandClass.NetworkManagementInclusion defstruct seq_number: nil, accept: true, input_dsk_length: nil, input_dsk: "", retries: 2 def init(opts) do {:ok, struct(__MODULE__, opts)} end @spec encode(t) :: {:ok, binary} | {:error, EncodeError.t()} def encode(%__MODULE__{seq_number: seq_number, input_dsk_length: 0} = command) do with {:ok, encoded} <- Encoding.encode_and_validate_args(command, %{ accept: {:encode_with, NetworkManagementInclusion, :encode_accept} }) do {:ok, Packet.header(seq_number) <> <<0x34, 0x14, seq_number, encoded.accept::size(1), 0::size(7), 0>>} end end def encode( %__MODULE__{ seq_number: seq_number, input_dsk_length: input_dsk_length, input_dsk: input_dsk } = command ) do with {:ok, encoded} <- Encoding.encode_and_validate_args(command, %{ accept: {:encode_with, NetworkManagementInclusion, :encode_accept}, input_dsk_length: {:bits, 4} }) do dsk = <<input_dsk::size(input_dsk_length)-unit(8)>> {:ok, Packet.header(seq_number) <> <<0x34, 0x14, seq_number, encoded.accept::size(1), input_dsk_length::size(7), dsk::binary>>} end end def handle_response(%__MODULE__{seq_number: seq_number}, %Packet{ seq_number: seq_number, types: [:ack_response] }) do {:done, :ok} end def handle_response(%__MODULE__{seq_number: seq_number, retries: 0}, %Packet{ seq_number: seq_number, types: [:nack_response] }) do {:done, {:error, :nack_response}} end def handle_response(%__MODULE__{seq_number: seq_number, retries: n} = command, %Packet{ seq_number: seq_number, types: [:nack_response] }) do {:retry, %{command | retries: n - 1}} end def handle_response(command, _packet) do {:continue, command} end end
lib/grizzly/command_class/network_management_inclusion/node_add_dsk_set.ex
0.869763
0.418786
node_add_dsk_set.ex
starcoder
defmodule Unicode.Transform.Rule.Conversion do @moduledoc """ #### 10.3.9 [Conversion Rules](https://unicode.org/reports/tr35/tr35-general.html#Conversion_Rules) Conversion rules can be forward, backward, or double. The complete conversion rule syntax is described below: **Forward** > A forward conversion rule is of the following form: > ``` > before_context { text_to_replace } after_context → completed_right | right_to_revisit ; > ``` > If there is no before_context, then the "{" can be omitted. If there is no after_context, then the "}" can be omitted. If there is no right_to_revisit, then the "|" can be omitted. A forward conversion rule is only executed for the normal transform and is ignored when generating the inverse transform. **Backward** > A backward conversion rule is of the following form: > ``` > completed_right | right_to_revisit ← before_context { text_to_replace } after_context ; > ``` > The same omission rules apply as in the case of forward conversion rules. A backward conversion rule is only executed for the inverse transform and is ignored when generating the normal transform. **Dual** > A dual conversion rule combines a forward conversion rule and a backward conversion rule into one, as discussed above. It is of the form: > > ``` > a { b | c } d ↔ e { f | g } h ; > ``` > > When generating the normal transform and the inverse, the revisit mark "|" and the left and after contexts are ignored on the sides where they do not belong. Thus, the above is exactly equivalent to the sequence of the following two rules: > > ``` > a { b c } d → f | g ; > b | c ← e { f g } h ; > ``` #### 10.3.10 <a name="Intermixing_Transform_Rules_and_Conversion_Rules" href="#Intermixing_Transform_Rules_and_Conversion_Rules">Intermixing Transform Rules and Conversion Rules</a> Transform rules and conversion rules may be freely intermixed. Inserting a transform rule into the middle of a set of conversion rules has an important side effect. Normally, conversion rules are considered together as a group. The only time their order in the rule set is important is when more than one rule matches at the same point in the string. In that case, the one that occurs earlier in the rule set wins. In all other situations, when multiple rules match overlapping parts of the string, the one that matches earlier wins. Transform rules apply to the whole string. If you have several transform rules in a row, the first one is applied to the whole string, then the second one is applied to the whole string, and so on. To reconcile this behavior with the behavior of conversion rules, transform rules have the side effect of breaking a surrounding set of conversion rules into two groups: First all of the conversion rules left the transform rule are applied as a group to the whole string in the usual way, then the transform rule is applied to the whole string, and then the conversion rules after the transform rule are applied as a group to the whole string. For example, consider the following rules: ``` abc → xyz; xyz → def; ::Upper; ``` If you apply these rules to “abcxyz”, you get “XYZDEF”. If you move the “::Upper;” to the middle of the rule set and change the cases accordingly, then applying this to “abcxyz” produces “DEFDEF”. ``` abc → xyz; ::Upper; XYZ → DEF; ``` This is because “::Upper;” causes the transliterator to reset to the beginning of the string. The first rule turns the string into “xyzxyz”, the second rule upper cases the whole thing to “XYZXYZ”, and the third rule turns this into “DEFDEF”. This can be useful when a transform naturally occurs in multiple “passes.” Consider this rule set: ``` [:Separator:]* → ' '; 'high school' → 'H.S.'; 'middle school' → 'M.S.'; 'elementary school' → 'E.S.'; ``` If you apply this rule to “high school”, you get “H.S.”, but if you apply it to “high school” (with two spaces), you just get “high school” (with one space). To have “high school” (with two spaces) turn into “H.S.”, you'd either have to have the first rule back up some arbitrary distance (far enough to see “elementary”, if you want all the rules to work), or you have to include the whole left-hand side of the first rule in the other rules, which can make them hard to read and maintain: ``` $space = [:Separator:]*; high $space school → 'H.S.'; middle $space school → 'M.S.'; elementary $space school → 'E.S.'; ``` Instead, you can simply insert “ `::Null;` ” in order to get things to work right: ``` [:Separator:]* → ' '; ::Null; 'high school' → 'H.S.'; 'middle school' → 'M.S.'; 'elementary school' → 'E.S.'; ``` The “::Null;” has no effect of its own (the null transform, by definition, does not do anything), but it splits the other rules into two “passes”: The first rule is applied to the whole string, normalizing all runs of white space into single spaces, and then we start over at the beginning of the string to look for the phrases. “high school” (with four spaces) gets correctly converted to “H.S.”. This can also sometimes be useful with rules that have overlapping domains. Consider this rule set from left: ``` sch → sh ; ss → z ; ``` Apply this rule to “bassch” rights in “bazch” because “ss” matches earlier in the string than “sch”. If you really wanted “bassh”—that is, if you wanted the first rule to win even when the second rule matches earlier in the string, you'd either have to add another rule for this special case... ``` sch → sh ; ssch → ssh; ss → z ; ``` ...or you could use a transform rule to apply the conversions in two passes: ``` sch → sh ; ::Null; ss → z ; ``` """ @direction_symbols ["→", "←", "↔"] @directions ~r/(?<![\\'])[#{Enum.join(@direction_symbols)}]/u @fields [:direction, :left, :right, :comment] defstruct @fields alias Unicode.Transform.Rule.Comment def parse(rule) when is_binary(rule) do if Regex.match?(@directions, rule) do parse_binary(rule) else nil end end defp parse_binary(rule) do [rule, comment] = case String.split(rule, ~r/(?<!')#/, parts: 2) do [rule] -> [rule, nil] [rule, comment] -> [rule, String.trim(comment)] end parsed = rule |> String.trim() |> String.split(~r/(?<!')[;]/u, include_captures: true) |> parse_rule() struct(__MODULE__, Enum.zip(@fields, parsed) |> Keyword.put(:comment, comment)) end defp parse_rule(rule) when is_binary(rule) do rule |> String.trim() |> String.split(@directions, include_captures: true) |> parse_rule() end defp parse_rule([rule]) do rule |> String.trim() |> String.replace_trailing(";", "") |> parse_rule() end defp parse_rule([rule, ";", ""]) do rule |> String.trim() |> parse_rule() end # Forward rule defp parse_rule([left, "→", right]) do parse_rule(left, right, :forward) end # Backward rule defp parse_rule([right, "←", left]) do parse_rule(left, right, :backward) end # Both rule defp parse_rule([left, "↔", right]) do parse_rule(left, right, :both) end defp parse_rule(left, right, direction) do left = parse_side(left) right = parse_side(right) [direction, left, right] end defp parse_side(side) when is_binary(side) do side |> String.trim() |> split_at_syntax() |> parse_side() |> trim() end defp parse_side([before_context, "{", replace, "|", revisit, "}", after_context]) do [before_context, replace, revisit, after_context] end defp parse_side([before_context, "{", replace, "}", after_context]) do [before_context, replace, nil, after_context] end defp parse_side([before_context, "{", replace, "|", revisit]) do [before_context, replace, revisit, nil] end defp parse_side([before_context, "{", replace]) do [before_context, replace, nil, nil] end defp parse_side([replace, "|", revisit, "}", after_context]) do [nil, replace, revisit, after_context] end defp parse_side([replace, "}", after_context]) do [nil, replace, nil, after_context] end defp parse_side([replace]) do [nil, replace, nil, nil] end # Splits on syntax chars except within string # between ' (single quote) characters defp split_at_syntax(string, acc \\ [""]) defp split_at_syntax("", acc) do Enum.reverse(acc) end defp split_at_syntax(<<"\\u", hex::binary-4>> <> string, [head | rest]) do split_at_syntax(string, [head <> "\\u" <> hex | rest]) end defp split_at_syntax(<<"\\", char::utf8>> <> string, [head | rest]) do split_at_syntax(string, [head <> "\\" <> <<char::utf8>> | rest]) end defp split_at_syntax(<<"'">> <> string, [head | rest]) do {quoted_string, remainder} = extract_quoted(string) split_at_syntax(remainder, [head <> quoted_string | rest]) end defp split_at_syntax(<<"{">> <> string, [head | rest]) do split_at_syntax(string, ["", "{", head | rest]) end defp split_at_syntax(<<"}">> <> string, [head | rest]) do split_at_syntax(string, ["", "}", head | rest]) end defp split_at_syntax(<<"|">> <> string, [head | rest]) do split_at_syntax(string, ["", "|", head | rest]) end defp split_at_syntax(<<"[">> <> string, [head | rest]) do {class, string} = Unicode.Transform.Utils.extract_character_class(<<"[">> <> string) split_at_syntax(string, [head <> class | rest]) end defp split_at_syntax(<<char::utf8>> <> string, [head | rest]) do split_at_syntax(string, [head <> <<char::utf8>> | rest]) end defp extract_quoted(string, acc \\ "") defp extract_quoted("", acc) do {acc, ""} end defp extract_quoted(<<"\\'">> <> rest, acc) do extract_quoted(rest, acc <> "'") end defp extract_quoted(<<"'">> <> rest, acc) do {acc, rest} end defp extract_quoted(<<char::utf8>> <> rest, acc) do extract_quoted(rest, acc <> <<char::utf8>>) end defp trim(list) do Enum.map(list, fn nil -> nil "" -> nil other -> String.trim(other) end) end @doc false def code_options(nil, nil), do: [] def code_options(before_context, nil), do: [preceeded_by: before_context] def code_options(nil, after_context), do: [followed_by: after_context] def code_options(before_context, after_context), do: [preceeded_by: before_context, followed_by: after_context] @doc false def code_args(from, nil), do: from def code_args(from, extra), do: from <> extra defimpl Unicode.Transform.Rule do def to_forward_code(%{left: left, right: right} = rule) do [before_context, from, extra, after_context] = left [_, to, revisit, _] = right from = Unicode.Transform.Rule.Conversion.code_args(from, extra) options = Unicode.Transform.Rule.Conversion.code_options(before_context, after_context) options = if revisit, do: Keyword.put(options, :revisit, revisit), else: options base_code = [ Comment.comment_from(rule), "replace(", wrap(from), ", ", wrap(to) ] if options == [] do [base_code, ")", "\n"] else [base_code, ", ", inspect(options), ")", "\n"] end end def to_backward_code(_rule) do [] end defp wrap(nil) do "\"\"" end defp wrap(string) do "\"" <> string <> "\"" end end end
lib/unicode/transform/rule/conversion.ex
0.887571
0.886322
conversion.ex
starcoder
defmodule Bitcoinex.Segwit do @moduledoc """ SegWit address serialization. """ alias Bitcoinex.Bech32 use Bitwise @valid_witness_program_length_range 2..40 @valid_witness_version 0..16 @supported_network [:mainnet, :testnet, :regtest] @type hrp :: String.t() @type data :: list(integer) # seem no way to use list of atom module attribute in type spec @type network :: :testnet | :mainnet | :regtest @type witness_version :: 0..16 @type witness_program :: list(integer) @type error :: atom() @doc """ Decodes an address and returns its network, witness version, and witness program. """ @spec decode_address(String.t()) :: {:ok, {network, witness_version, witness_program}} | {:error, error} def decode_address(address) when is_binary(address) do with {_, {:ok, {encoding_type, hrp, data}}} <- {:decode_bech32, Bech32.decode(address)}, {_, {:ok, network}} <- {:parse_network, parse_network(hrp |> String.to_charlist())}, {_, {:ok, {version, program}}} <- {:parse_segwit_data, parse_segwit_data(data)} do case witness_version_to_bech_encoding(version) do ^encoding_type -> {:ok, {network, version, program}} _ -> # encoding type derived from witness version (first byte of data) is different from the code derived from bech32 decoding {:error, :invalid_checksum} end else {_, {:error, error}} -> {:error, error} end end @doc """ Encodes an address string. """ @spec encode_address(network, witness_version, witness_program) :: {:ok, String.t()} | {:error, error} def encode_address(network, _, _) when not (network in @supported_network) do {:error, :invalid_network} end def encode_address(_, witness_version, _) when not (witness_version in @valid_witness_version) do {:error, :invalid_witness_version} end def encode_address(network, version, program) do with {:ok, converted_program} <- Bech32.convert_bits(program, 8, 5), {:is_program_length_valid, true} <- {:is_program_length_valid, is_program_length_valid?(version, program)} do hrp = case network do :mainnet -> "bc" :testnet -> "tb" :regtest -> "bcrt" end Bech32.encode(hrp, [version | converted_program], witness_version_to_bech_encoding(version)) else {:is_program_length_valid, false} -> {:error, :invalid_program_length} error -> error end end @doc """ Simpler Interface to check if address is valid """ @spec is_valid_segswit_address?(String.t()) :: boolean def is_valid_segswit_address?(address) when is_binary(address) do case decode_address(address) do {:ok, _} -> true _ -> false end end @spec get_segwit_script_pubkey(witness_version, witness_program) :: String.t() def get_segwit_script_pubkey(version, program) do # OP_0 is encoded as 0x00, but OP_1 through OP_16 are encoded as 0x51 though 0x60 wit_version_adjusted = if(version == 0, do: 0, else: version + 0x50) [ wit_version_adjusted, Enum.count(program) | program ] |> :erlang.list_to_binary() # to hex and all lower case for better readability |> Base.encode16(case: :lower) end defp parse_segwit_data([]) do {:error, :empty_segwit_data} end defp parse_segwit_data([version | encoded]) when version in @valid_witness_version do case Bech32.convert_bits(encoded, 5, 8, false) do {:ok, program} -> if is_program_length_valid?(version, program) do {:ok, {version, program}} else {:error, :invalid_program_length} end {:error, error} -> {:error, error} end end defp parse_segwit_data(_), do: {:error, :invalid_witness_version} defp is_program_length_valid?(version, program) when length(program) in @valid_witness_program_length_range do case {version, length(program)} do # BIP141 specifies If the version byte is 0, but the witness program is neither 20 nor 32 bytes, the script must fail. {0, program_length} when program_length == 20 or program_length == 32 -> true {0, _} -> false _ -> true end end defp is_program_length_valid?(_, _), do: false defp parse_network('bc'), do: {:ok, :mainnet} defp parse_network('tb'), do: {:ok, :testnet} defp parse_network('bcrt'), do: {:ok, :regtest} defp parse_network(_), do: {:error, :invalid_network} defp witness_version_to_bech_encoding(0), do: :bech32 defp witness_version_to_bech_encoding(witver) when witver in 1..16, do: :bech32m end
server/bitcoinex/lib/segwit.ex
0.843219
0.411406
segwit.ex
starcoder
defmodule NebulexEcto.Repo do @moduledoc """ Wrapper/Facade on top of `Nebulex.Cache` and `Ecto.Repo`. This module encapsulates the access to the Ecto repo and Nebulex cache, providing a set of functions compliant with the `Ecto.Repo` API. For retrieve-like functions, the wrapper access the cache first, if the requested data is found, then it is returned right away, otherwise, the wrapper tries to retrieve the data from the repo (database), and if the data is found, then it is cached so the next time it can be retrieved directly from cache. For write functions (insert, update, delete, ...), the wrapper runs the eviction logic, which can be delete the data from cache or just replace it; depending on the `:nbx_evict` option. When used, `NebulexEcto.Repo` expects the `:repo` and `:cache` as options. For example: defmodule MyApp.CacheableRepo do use NebulexEcto.Repo, cache: MyApp.Cache, repo: MyApp.Repo end The cache and repo respectively: defmodule MyApp.Cache do use Nebulex.Cache, otp_app: :my_app, adapter: Nebulex.Adapters.Local end defmodule MyApp.Repo do use Ecto.Repo, otp_app: :my_app, adapter: Ecto.Adapters.Postgres end And this is an example of how their configuration would looks like: config :my_app, MyApp.Cache, gc_interval: 3600 config :my_app, MyApp.Repo, database: "ecto_simple", username: "postgres", password: "<PASSWORD>", hostname: "localhost" ## Compile-time configuration options * `:cache` - a compile-time option that specifies the Nebulex cache to be used by the wrapper. * `:repo` - a compile-time option that specifies the Ecto repo to be used by the wrapper. To configure the cache and repo, see `Nebulex` and `Ecto` documentation respectively. ## Shared options Almost all of the operations below accept the following options: * `:nbx_key` - specifies the key to be used for cache access. By default is set to `{Ecto.Schema.t, id :: term}`, assuming the schema has a field `id` which is the primary key; if this is not your case, you must provide the `:nbx_key`. * `:nbx_evict` - specifies the eviction strategy, if it is set to `:delete` (the default), then the key is removed from cache, and if it is set to `:replace`, then the key is replaced with the new value into the cache. """ @doc false defmacro __using__(opts) do quote bind_quoted: [opts: opts] do alias NebulexEcto.Repo, as: CacheableRepo {cache, repo} = CacheableRepo.compile_config(__MODULE__, opts) @cache cache @repo repo def __cache__, do: @cache def __repo__, do: @repo def get(queryable, id, opts \\ []) do do_get(queryable, id, opts, &@repo.get/3) end def get!(queryable, id, opts \\ []) do do_get(queryable, id, opts, &@repo.get!/3) end def get_by(queryable, clauses, opts \\ []) do do_get(queryable, clauses, opts, &@repo.get_by/3) end def get_by!(queryable, clauses, opts \\ []) do do_get(queryable, clauses, opts, &@repo.get_by!/3) end def insert(struct_or_changeset, opts \\ []) do execute(&@repo.insert/2, struct_or_changeset, opts) end def insert!(struct_or_changeset, opts \\ []) do execute!(&@repo.insert!/2, struct_or_changeset, opts) end def update(changeset, opts \\ []) do execute(&@repo.update/2, changeset, opts) end def update!(changeset, opts \\ []) do execute!(&@repo.update!/2, changeset, opts) end def delete(struct_or_changeset, opts \\ []) do execute(&@repo.delete/2, struct_or_changeset, Keyword.put(opts, :nbx_evict, :delete)) end def delete!(struct_or_changeset, opts \\ []) do execute!(&@repo.delete!/2, struct_or_changeset, Keyword.put(opts, :nbx_evict, :delete)) end def insert_or_update(struct_or_changeset, opts \\ []) do execute(&@repo.insert_or_update/2, struct_or_changeset, opts) end def insert_or_update!(struct_or_changeset, opts \\ []) do execute!(&@repo.insert_or_update!/2, struct_or_changeset, opts) end ## Helpers def key(%Ecto.Query{from: %{source: {_tablename, schema}}}, key), do: {schema, key} def key(%Ecto.Query{from: {_tablename, schema}}, key), do: {schema, key} def key(%{__struct__: struct}, key), do: {struct, key} def key(struct, key) when is_atom(struct), do: {struct, key} defp do_get(queryable, key, opts, fallback) do {nbx_key, opts} = Keyword.pop(opts, :nbx_key) cache_key = nbx_key || key(queryable, key) cond do value = @cache.get(cache_key) -> value value = fallback.(queryable, key, opts) -> @cache.set(cache_key, value) true -> nil end end defp execute(fun, struct_or_changeset, opts) do {nbx_key, opts} = Keyword.pop(opts, :nbx_key) {nbx_evict, opts} = Keyword.pop(opts, :nbx_evict, :delete) case fun.(struct_or_changeset, opts) do {:ok, schema} = res -> cache_key = nbx_key || key(schema, schema.id) _ = cache_evict(nbx_evict, cache_key, schema) res error -> error end end defp execute!(fun, struct_or_changeset, opts) do {nbx_key, opts} = Keyword.pop(opts, :nbx_key) {nbx_evict, opts} = Keyword.pop(opts, :nbx_evict, :delete) schema = fun.(struct_or_changeset, opts) cache_key = nbx_key || key(schema, schema.id) _ = cache_evict(nbx_evict, cache_key, schema) schema end defp cache_evict(:delete, key, _), do: @cache.delete(key) defp cache_evict(:replace, key, value), do: @cache.set(key, value) end end @doc """ Retrieves the compile time configuration. """ def compile_config(facade, opts) do unless cache = Keyword.get(opts, :cache) do raise ArgumentError, "missing :cache option in #{facade}" end unless repo = Keyword.get(opts, :repo) do raise ArgumentError, "missing :repo option in #{facade}" end {cache, repo} end end
lib/nebulex_ecto/repo.ex
0.85561
0.488527
repo.ex
starcoder
defmodule Erlef.Schema do @moduledoc """ Imports all functionality for an ecto schema ### Usage ``` defmodule Erlef.Schema.MySchema do use Erlef.Schema schema "my_schemas" do # Fields end end ``` """ alias Erlef.Inputs import Ecto.Changeset defmacro __using__(_opts) do quote do alias __MODULE__ use Ecto.Schema import Ecto import Ecto.Changeset import Erlef.Schema import Ecto.Query, only: [from: 1, from: 2] @primary_key {:id, :binary_id, autogenerate: true} @foreign_key_type :binary_id @timestamps_opts [type: :utc_datetime] end end @spec validate_email(Ecto.Changeset.t(), atom()) :: Ecto.Changeset.t() def validate_email(cs, field) do case Map.get(cs.changes, field) do nil -> cs _ -> case Inputs.is_email?(Map.get(cs.changes, field)) do true -> cs false -> add_error(cs, field, "#{Atom.to_string(field)} is invalid.") end end end @spec validate_url(Ecto.Changeset.t(), atom()) :: Ecto.Changeset.t() def validate_url(cs, field) do case Map.get(cs.changes, field) do nil -> cs _ -> case Inputs.is_url?(Map.get(cs.changes, field)) do true -> cs false -> msg = """ #{humanize(field)} is invalid. Be sure the url consists of a scheme, host, and path parts. """ add_error(cs, field, msg) end end end @spec validate_uuid(Ecto.Changeset.t(), atom()) :: Ecto.Changeset.t() def validate_uuid(cs, field) do case Map.get(cs.changes, field) do nil -> cs _ -> case Inputs.is_uuid?(Map.get(cs.changes, field)) do true -> cs false -> msg = "#{humanize(field)} is invalid. Not a valid UUID." add_error(cs, field, msg) end end end @spec humanize(atom | String.t()) :: String.t() def humanize(atom) when is_atom(atom), do: humanize(Atom.to_string(atom)) def humanize(bin) when is_binary(bin) do bin = if String.ends_with?(bin, "_id") do binary_part(bin, 0, byte_size(bin) - 3) else bin end bin |> String.replace("_", " ") |> String.capitalize() end end
lib/erlef/schema.ex
0.719581
0.55929
schema.ex
starcoder
defmodule Mxpanel.Groups do @options_schema [ time: [ type: :pos_integer, doc: "Specific timestamp in seconds of the event. Defaults to `System.os_time(:second)`." ] ] @moduledoc """ Functions to manipulate group profiles. ## Shared Options All of the functions in this module accept the following options: #{NimbleOptions.docs(@options_schema)} """ alias Mxpanel.Operation @doc """ Updates or adds properties to a group profile. The profile is created if it does not exist. properties = %{"Address" => "1313 Mockingbird Lane"} "Company" |> Mxpanel.Groups.set("Mixpanel", properties) |> Mxpanel.deliver(client) """ @spec set(String.t(), String.t(), map(), Keyword.t()) :: Operation.t() def set(group_key, group_id, properties, opts \\ []) when is_binary(group_key) and is_binary(group_id) and is_map(properties) and is_list(opts) do payload = build_payload(group_key, group_id, "$set", properties, opts) %Operation{endpoint: :groups, payload: payload} end @doc """ Adds properties to a group only if the property is not already set. The profile is created if it does not exist. properties = %{"Address" => "1313 Mockingbird Lane"} "Company" |> Mxpanel.Groups.set_once("Mixpanel", properties) |> Mxpanel.deliver(client) """ @spec set_once(String.t(), String.t(), map(), Keyword.t()) :: Operation.t() def set_once(group_key, group_id, properties, opts \\ []) when is_binary(group_key) and is_binary(group_id) and is_map(properties) and is_list(opts) do payload = build_payload(group_key, group_id, "$set_once", properties, opts) %Operation{endpoint: :groups, payload: payload} end @doc """ Unsets specific properties on the group profile. property_names = ["Items purchased"] "Company" |> Mxpanel.Groups.unset("Mixpanel", property_names) |> Mxpanel.deliver(client) """ @spec unset(String.t(), String.t(), [String.t()], Keyword.t()) :: Operation.t() def unset(group_key, group_id, property_names, opts \\ []) when is_binary(group_key) and is_binary(group_id) and is_list(property_names) do payload = build_payload(group_key, group_id, "$unset", property_names, opts) %Operation{endpoint: :groups, payload: payload} end @doc """ Removes a specific value in a list property. "Company" |> Mxpanel.Groups.remove_item("Mixpanel", "Items purchased", "t-shirt") |> Mxpanel.deliver(client) """ @spec remove_item(String.t(), String.t(), String.t(), String.t(), Keyword.t()) :: Operation.t() def remove_item(group_key, group_id, property, item, opts \\ []) when is_binary(group_key) and is_binary(group_id) and is_binary(property) and is_binary(item) and is_list(opts) do payload = build_payload(group_key, group_id, "$remove", %{property => item}, opts) %Operation{endpoint: :groups, payload: payload} end @doc """ Deletes a group profile from Mixpanel. "Company" |> Mxpanel.Groups.delete("Mixpanel") |> Mxpanel.deliver(client) """ @spec delete(String.t(), String.t(), Keyword.t()) :: Operation.t() def delete(group_key, group_id, opts \\ []) when is_binary(group_key) and is_binary(group_id) and is_list(opts) do payload = build_payload(group_key, group_id, "$delete", "", opts) %Operation{endpoint: :groups, payload: payload} end @doc """ Adds the specified values to a list property on a group profile and ensures that those values only appear once. properties = %{"Items purchased" => ["socks", "shirts"], "Browser" => "ie"} "Company" |> Mxpanel.Groups.union("Mixpanel", properties) |> Mxpanel.deliver(client) """ @spec union(String.t(), String.t(), map(), Keyword.t()) :: Operation.t() def union(group_key, group_id, properties, opts \\ []) when is_binary(group_key) and is_binary(group_id) and is_map(properties) and is_list(opts) do payload = build_payload(group_key, group_id, "$union", properties, opts) %Operation{endpoint: :groups, payload: payload} end defp build_payload(group_key, group_id, operation, properties, opts) do opts = validate_options!(opts) %{ "$group_key" => group_key, "$group_id" => group_id, "$time" => Keyword.get(opts, :time, System.os_time(:second)), operation => properties } end defp validate_options!(opts) do case NimbleOptions.validate(opts, @options_schema) do {:ok, options} -> options {:error, %NimbleOptions.ValidationError{message: message}} -> raise ArgumentError, message end end end
lib/mxpanel/groups.ex
0.823009
0.433322
groups.ex
starcoder
defmodule GenSpoxy.Stores.Ets do @moduledoc """ implements the `GenSpoxy.Store` behaviour. it stores its data under `ets` and it manages it in using sharded `GenServer`. """ use GenServer use GenSpoxy.Partitionable @behaviour GenSpoxy.Store alias GenSpoxy.Defaults @total_partitions Defaults.total_partitions() * 10 # API def start_link(opts \\ []) do {:ok, partition} = Keyword.fetch(opts, :partition) opts = Keyword.put(opts, :name, partition_server(partition)) GenServer.start_link(__MODULE__, partition, opts) end @impl true def lookup_req(table_name, req_key) do partition = calc_req_partition(table_name) case :ets.lookup(ets_partition_table(partition), req_key) do [{^req_key, {resp, metadata}}] -> {resp, metadata} _ -> nil end end @impl true def store_req!(table_name, {req, req_key, resp, metadata}, opts) do partition = calc_req_partition(table_name) GenServer.call( partition_server(partition), {:store_req!, partition, req, req_key, resp, metadata, opts} ) end @impl true def invalidate!(table_name, req_key) do partition = calc_req_partition(table_name) server = partition_server(partition) GenServer.call(server, {:invalidate!, partition, req_key}) end @doc """ used for testing """ def reset_partition!(partition) do server = partition_server(partition) GenServer.call(server, {:reset!, partition}) end @doc """ used for testing """ def reset_all! do tasks = Enum.map(1..@total_partitions, fn partition -> Task.async(fn -> reset_partition!(partition) end) end) Enum.each(tasks, &Task.await/1) end # callbacks @impl true def init(partition) do :ets.new(ets_partition_table(partition), [ :set, :protected, :named_table, {:read_concurrency, true} ]) {:ok, []} end @impl true def handle_call({:store_req!, partition, _req, req_key, resp, metadata, opts}, _from, state) do uuid = UUID.uuid1() now = System.system_time(:milliseconds) {:ok, ttl_ms} = Keyword.fetch(opts, :ttl_ms) expires_at = now + ttl_ms metadata = Map.merge(metadata, %{uuid: uuid, expires_at: expires_at}) :ets.insert(ets_partition_table(partition), {req_key, {resp, metadata}}) {:reply, :ok, state} end @impl true def handle_call({:invalidate!, partition, req_key}, _from, state) do :ets.delete(ets_partition_table(partition), req_key) {:reply, :ok, state} end @impl true def handle_call({:reset!, partition}, _from, state) do :ets.delete_all_objects(ets_partition_table(partition)) {:reply, :ok, state} end @impl true def total_partitions do @total_partitions end @impl true def calc_req_partition(table_name) do 1 + :erlang.phash2(table_name, @total_partitions) end defp ets_partition_table(partition) do String.to_atom("ets-#{partition}") end end
lib/store/ets/ets.ex
0.800692
0.405625
ets.ex
starcoder
defmodule Stripe.BalanceTransaction do @moduledoc """ Work with [Stripe `balance_transaction` objects] (https://stripe.com/docs/api#balance_transactions/object). You can: - [Retrieve a balance transaction](https://stripe.com/docs/api/balance_transactions/retrieve) - [List all balance history](https://stripe.com/docs/api/balance_transactions/list) """ use Stripe.Entity import Stripe.Request @type t :: %__MODULE__{ id: Stripe.id(), object: String.t(), amount: integer, available_on: Stripe.timestamp(), created: Stripe.timestamp(), currency: String.t(), description: String.t() | nil, exchange_rate: integer | nil, fee: integer, fee_details: list(Stripe.Types.fee()) | [], net: integer, reporting_category: String.t(), source: Stripe.id() | Stripe.Source.t() | nil, status: String.t(), type: String.t() } defstruct [ :id, :object, :amount, :available_on, :created, :currency, :description, :exchange_rate, :fee, :fee_details, :net, :reporting_category, :source, :status, :type ] @endpoint "balance/history" @doc """ Retrieves the balance transaction with the given ID. Requires the ID of the balance transaction to retrieve and takes no other parameters. See the [Stripe docs](https://stripe.com/docs/api/balance_transactions/retrieve). """ @spec retrieve(Stripe.id(), Stripe.options()) :: {:ok, t} | {:error, Stripe.Error.t()} def retrieve(id, opts \\ []) do new_request(opts) |> put_endpoint(@endpoint <> "/#{id}") |> put_method(:get) |> make_request() end @doc """ Returns a list of transactions that have contributed to the Stripe account balance. Examples of such transactions are charges, transfers, and so forth. The transactions are returned in sorted order, with the most recent transactions appearing first. See `t:Stripe.BalanceTransaction.All.t/0` or the [Stripe docs](https://stripe.com/docs/api/balance_transactions/list) for parameter structure. """ @spec all(params, Stripe.options()) :: {:ok, Stripe.List.t(t)} | {:error, Stripe.Error.t()} when params: %{ optional(:available_on) => String.t() | Stripe.date_query(), optional(:created) => String.t() | Stripe.date_query(), optional(:currency) => String.t(), optional(:ending_before) => Stripe.id() | Stripe.BalanceTransaction.t(), optional(:limit) => 1..100, optional(:payout) => Stripe.id() | Stripe.Payout.t(), optional(:source) => Stripe.id() | Stripe.Source.t(), optional(:starting_after) => Stripe.id() | Stripe.BalanceTransaction.t(), optional(:type) => String.t() } def all(params \\ %{}, opts \\ []) do new_request(opts) |> put_endpoint(@endpoint) |> put_method(:get) |> put_params(params) |> cast_to_id([:ending_before, :payout, :source, :starting_after]) |> make_request() end end
lib/stripe/core_resources/balance_transaction.ex
0.859943
0.420213
balance_transaction.ex
starcoder
defmodule Prog do @moduledoc """ Documentation for `Prog`. """ @doc """ Day 11 """ def solve do {:ok, raw} = File.read("data/day_11") # raw = "L.LL.LL.LL # LLLLLLL.LL # L.L.L..L.. # LLLL.LL.LL # L.LL.LL.LL # L.LLLLL.LL # ..L.L..... # LLLLLLLLLL # L.LLLLLL.L # L.LLLLL.LL" data = String.split(raw, "\n", trim: true) |> Enum.map(&extract/1) column_size = length(List.first(data)) - 1 row_size = length(data) - 1 as_map = convert_to_map(data) |> Enum.reduce(Map.new(), fn row, acc -> Enum.reduce(row, acc, &put_stuff/2) end) part_1 = find_occupied_seats_after_stabalizing(as_map, row_size, column_size, &occupied_adjacent_seats/3, 4) IO.inspect part_1, label: "Part 1" part_2 = find_occupied_seats_after_stabalizing(as_map, row_size, column_size, &occupied_adjacent_seats_line_of_sight/3, 5) IO.inspect part_2, label: "Part 2" end def find_occupied_seats_after_stabalizing(as_map, row_size, column_size, finder, adjecent_count) do updates = apply_rules(as_map, row_size, column_size, finder, adjecent_count) |> Enum.reject(&is_nil/1) updated_map = Enum.reduce(updates, as_map, fn {coords, entry}, acc -> Map.put(acc, coords, entry) end) if updated_map == as_map do # print_waiting_room(updated_map, row_size, column_size) count_occupied_seats(updated_map, row_size, column_size) else find_occupied_seats_after_stabalizing(updated_map, row_size, column_size, finder, adjecent_count) end end def put_stuff({coords, entry}, acc) do Map.put(acc, coords, entry) end def convert_to_map(data) do data |> Enum.with_index |> Enum.map(fn {row, y} -> row |> Enum.with_index |> Enum.map(fn {cell, x} -> {{y,x}, cell} end) end) end def apply_rules(data, row_size, column_size, finder, adjecent_count) do Range.new(0, row_size) |> Enum.flat_map(fn row_index -> Range.new(0, column_size) |> Enum.map(fn column_index -> apply_rules_to_cell(row_index, column_index, data, finder, adjecent_count) end) end) end def apply_rules_to_cell(y, x, data, finder, adjecent_count) do adjacent_seats = finder.(y, x, data) case Map.get(data, {y,x}) do :empty -> if adjacent_seats == 0 do {{y, x}, :occupied} end :occupied -> if adjacent_seats >= adjecent_count do {{y, x}, :empty} end _ -> nil end end def occupied_adjacent_seats(y, x, data) do [ Map.get(data, {y + 1, x - 1}), Map.get(data, {y + 1, x}), Map.get(data, {y + 1, x + 1}), Map.get(data, {y, x - 1}), Map.get(data, {y, x + 1}), Map.get(data, {y - 1, x - 1}), Map.get(data, {y - 1, x}), Map.get(data, {y - 1, x + 1}) ] |> Enum.map(&occupied/1) |> Enum.sum() end def occupied_adjacent_seats_line_of_sight(y, x, data) do [ occupied_seat_line_of_sight(y, fn y -> (y + 1) end, x, fn x -> (x - 1) end, data), occupied_seat_line_of_sight(y, fn y -> (y + 1) end, x, fn x -> x end, data), occupied_seat_line_of_sight(y, fn y -> (y + 1) end, x, fn x -> (x + 1) end, data), occupied_seat_line_of_sight(y, fn y -> y end, x, fn x -> (x - 1) end, data), occupied_seat_line_of_sight(y, fn y -> y end, x, fn x -> (x + 1) end, data), occupied_seat_line_of_sight(y, fn y -> (y - 1) end, x, fn x -> (x - 1) end, data), occupied_seat_line_of_sight(y, fn y -> (y - 1) end, x, fn x -> x end, data), occupied_seat_line_of_sight(y, fn y -> (y - 1) end, x, fn x -> (x + 1) end, data), ] |> Enum.sum() end def occupied_seat_line_of_sight(y, y_fn, x, x_fn, data) do next_y = y_fn.(y) next_x = x_fn.(x) case Map.get(data, {next_y, next_x}) do nil -> 0 :occupied -> 1 :empty -> 0 :floor -> occupied_seat_line_of_sight(next_y, y_fn, next_x, x_fn, data) end end def occupied(seat) do case seat do :occupied -> 1 _ -> 0 end end def extract(row) do String.split(row, "", trim: true) |> Enum.map(&extract_cell/1) end def extract_cell(cell) do case cell do "L" -> :empty "." -> :floor "#" -> :occupied end end def print_waiting_room(data, row_size, column_size) do IO.puts Range.new(0, row_size) |> Enum.map(fn row_index -> Range.new(0, column_size) |> Enum.map(fn column_index -> print_cell(Map.get(data, {row_index, column_index})) end) |> Enum.join("") end) |> Enum.join("\n") end def print_cell(cell) do case cell do :empty -> "L" :floor -> "." :occupied -> "#" end end def count_occupied_seats(data, row_size, column_size) do Range.new(0, row_size) |> Enum.map(fn row_index -> Range.new(0, column_size) |> Enum.map(fn column_index -> occupied(Map.get(data, {row_index, column_index})) end) |> Enum.sum() end) |> Enum.sum() end end Prog.solve
lib/days/day_11.ex
0.548915
0.403273
day_11.ex
starcoder
defmodule ExqLimit.GCRA do @moduledoc """ This module implements the [GCRA](https://en.wikipedia.org/wiki/Generic_cell_rate_algorithm) algorithm. Check the [blog post](https://brandur.org/rate-limiting) by brandur for an excellent introduction. ### Example {ExqLimit.GCRA, period: :hour, rate: 60} The above config can be used to run 60 jobs per hour. The algorithm only allows jobs at regular interval, so if 10 jobs get enqueued at once, each one will get processed at 1 minute interval. {ExqLimit.GCRA, period: :hour, rate: 60, burst: 5} `burst` option provides the ability to overshoot the limit for a short period of time. It can be compared to a battery. With the above config, when it's fully charged, it can process 5 jobs immediatly, but from 6th job onwards, it will process jobs at 1 minute interval. The battery will get recharged 1 point every minute and get maxed at 5. ### Options - `period` (atom | integer) - Can be either `:second`, `:minute`, `:hour`, `:day`. In case of integer, it will be consider as seconds. Required field. - `burst` (integer) - Number of jobs allowed over the limit. Defaults to 0 - `local` (boolean) - if set to true, the rate limiting will apply to the local worker node. Otherwise, the rate limiting will apply to all worker nodes. Defaults to `false`. - `node_id` (string) - Unique id of the worker node. Defaults to Exq node identifier. This will be used only if local option is set to true. ### Credits The lua script used in this module is a slightly modified version of [redis-gcra](https://github.com/rwz/redis-gcra) """ require Logger alias ExqLimit.Redis.Script require ExqLimit.Redis.Script @behaviour Exq.Dequeue.Behaviour Script.compile(:lease) defmodule State do @moduledoc false defstruct redis: nil, running: 0, emission_interval: nil, burst: 0, queue: nil, available?: false, tat_key: nil, last_synced: nil, reset_after: -1, retry_after: 0 end @version "limit_gcra_v1" @impl true def init(%{queue: queue}, options) do namespace = Keyword.get_lazy(options, :namespace, fn -> Exq.Support.Config.get(:namespace) end) period = case Keyword.fetch!(options, :period) do :second -> 1 :minute -> 60 :hour -> 60 * 60 :day -> 60 * 60 * 24 seconds when is_integer(seconds) and seconds > 0 -> seconds end burst = case Keyword.get(options, :burst) do nil -> 1 burst when is_integer(burst) and burst >= 0 -> burst + 1 end prefix = if Keyword.get(options, :local) do node_id = Keyword.get_lazy(options, :node_id, fn -> Exq.Support.Config.node_identifier().node_id() end) "#{namespace}:#{@version}:#{queue}:#{node_id}:" else "#{namespace}:#{@version}:#{queue}:" end state = %State{ redis: Keyword.get_lazy(options, :redis, fn -> Exq.Support.Config.get(:name) |> Exq.Support.Opts.redis_client_name() end), queue: queue, emission_interval: period / Keyword.fetch!(options, :rate), burst: burst, tat_key: prefix <> "tat", last_synced: System.monotonic_time(:millisecond) / 1000 } {:ok, state} end @impl true def stop(_state) do :ok end @impl true def available?(state) do state = sync(state) {:ok, state.available?, state} end @impl true def dispatched(state), do: {:ok, %{state | running: state.running + 1, available?: false}} @impl true def processed(state), do: {:ok, %{state | running: state.running - 1}} @impl true def failed(state), do: {:ok, %{state | running: state.running - 1}} defp sync(state) do diff = System.monotonic_time(:millisecond) / 1000 - state.last_synced state = if (state.available? && diff < state.reset_after) || (!state.available? && diff < state.retry_after) do state else lease(state) end :telemetry.execute( [:exq_limit, :gcra], %{running: state.running}, %{queue: state.queue} ) state end defp lease(state) do case Script.eval!( state.redis, @lease, [ state.tat_key ], [state.emission_interval, state.burst] ) do {:ok, [limited, retry_after, reset_after]} -> {retry_after, ""} = Float.parse(retry_after) {reset_after, ""} = Float.parse(reset_after) available? = limited != 1 %{ state | available?: available?, retry_after: retry_after, reset_after: reset_after, last_synced: System.monotonic_time(:millisecond) / 1000 } error -> Logger.error( "Failed to run rebalance script. Unexpected error from redis: #{inspect(error)}" ) %{ state | available?: false, retry_after: 5, reset_after: -1, last_synced: System.monotonic_time(:millisecond) / 1000 } end end end
lib/exq_limit/gcra.ex
0.846847
0.739681
gcra.ex
starcoder
defmodule Jeeves.Pooled do @moduledoc """ Implement a singleton (global) named pool of services. Creates a dynamic pool of worker services. Each service shares an initial state, and each invocation of a service is independent from the previous one (so there is no concept of claiming a service for your dedicated use). ### Prerequisites You'll need to add poolboy to your project dependencies. ### Usage To create the service: * Create a module that implements the API you want. This API will be expressed as a set of public functions. Each function will automatically receive the current state in a variable (by default named `state`). There is not need to declare this as a parameter.[<small>why?</small>](#why-magic-state). If a function wants to change the state, it must end with a call to the `Jeeves.Common.update_state/2` function (which will have been imported into your module automatically). For this example, we'll call the module `PooledService`. * Add the line `use Jeeves.Pooled` to the top of this module. * Adjust the other options if required. To start the pool: PooledJeeves.run() or PooledJeeves.run(initial_state) To consume the service: * Call the API functions in the service. ### Example defmodule FaceDetector do using Jeeves.Pooled, state: %{ algorithm: ViolaJones }, state_name: :options, pool: [ min: 3, max: 10 ] def recognize(image) do # calls to OpenCV or whatever... end end ### Options You can pass a keyword list to `use Jeeves.Anonymous:` * `state:` _value_ The default value for the initial state of all workers. Can be overridden (again for all workers) by passing a value to `run()` * `state_name:` _atom_ The default name for the state variable is (unimaginatively) `state`. Use `state_name` to override this. For example, the previous example named the state `options`, and inside the `recognize` function your could write `options.algorithm` to look up the algorithm to use. * `name:` _atom_ The default name for the pool is the name of the module that defines it. Use `name:` to change this. * `pool: [ ` _options_ ` ]` Set options for the service pool. One or more of: * `min: n` The minimum number of workers that should be active, and by extension the number of workers started when the pool is run. Default is 2. * `max: n` The maximum number of workers. If all workers are busy and a new request arrives, a new worker will be started to handle it if the current worker count is less than `max`. Excess idle workers will be quietly killed off in the background. Default value is `(min+1)*2`. * `showcode:` _boolean_ If truthy, dump a representation of the generated code to STDOUT during compilation. * `timeout:` integer or float Specify the timeout to be used when the client calls workers in the pool. If all workers are busy, and none becomes free in that time, an OTP exception is raised. An integer specifies the timeout in milliseconds, and a float in seconds (so 1.5 is the same as 1500). """ alias Jeeves.Util.PreprocessorState, as: PS @doc false defmacro __using__(opts \\ []) do generate_pooled_service(__CALLER__.module, opts) end @doc false def generate_pooled_service(caller, opts) do name = Keyword.get(opts, :service_name, :no_name) state = Keyword.get(opts, :state, :no_state) PS.start_link(caller, opts) quote do import Kernel, except: [ def: 2 ] import Jeeves.Common, only: [ def: 2, set_state: 1, set_state: 2 ] @before_compile { unquote(__MODULE__), :generate_code } @name unquote(name) def run() do run(unquote(state)) end def run(state) do Jeeves.Scheduler.start_new_pool(worker_module: __MODULE__.Worker, pool_opts: unquote(opts[:pool] || [ min: 1, max: 4]), name: @name, state: state) end end |> Jeeves.Common.maybe_show_generated_code(opts) end @doc false defmacro generate_code(_) do { options, apis, handlers, implementations, delegators } = Jeeves.Common.create_functions_from_originals(__CALLER__.module, __MODULE__) PS.stop(__CALLER__.module) quote do unquote_splicing(delegators) defmodule Worker do use GenServer def start_link(args) do GenServer.start_link(__MODULE__, args) end unquote_splicing(apis) unquote_splicing(handlers) defmodule Implementation do unquote_splicing(implementations) end end end |> Jeeves.Common.maybe_show_generated_code(options) end @doc false defdelegate generate_api_call(options,function), to: Jeeves.Named @doc false defdelegate generate_handle_call(options,function), to: Jeeves.Named @doc false defdelegate generate_implementation(options,function), to: Jeeves.Named @doc false def generate_delegator(options, {call, _body}) do quote do def unquote(call), do: unquote(delegate_body(options, call)) end end @doc false def delegate_body(options, call) do timeout = options[:timeout] || 5000 request = Jeeves.Named.call_signature(call) quote do Jeeves.Scheduler.run(@name, unquote(request), unquote(timeout)) end end end
lib/jeeves/pooled.ex
0.87215
0.835416
pooled.ex
starcoder
defmodule Nesty do @moduledoc """ Convenient helpers when dealing with nested keywords and maps. """ @type key :: Keyword.key | Map.key @type default :: any @type value :: Keyword.value | Map.value @doc """ Get the value deeply nested at the specified key path. If a key does not exist, a default value will be returned (`nil` if one is not provided). A default value can be applied on a per key and general basis. Per key defaults take precedence, whereas if a per key default does not exist for the given key, it will then fallback to using the general default value. The general default value is by default `nil`, but can be set by changing the third argument of the function. Per key default values can be set by including a named tuple in the key list `{ key, default }`, as opposed to the regular `key` entry when no per key default is required. Example ------- iex> Nesty.get([a: [b: %{ c: 55 }]], [:a, :b, :c]) 55 iex> Nesty.get([a: [b: %{ c: 55 }]], [:a, :d]) nil iex> Nesty.get([a: [b: %{ c: 55 }]], [:a, :d], 123) 123 iex> Nesty.get([a: [b: %{ c: 55 }]], [:a, { :d, 1000 }, :c]) 1000 """ @spec get(any, [key | { key, default }, ...], default) :: value | default def get(data, keys, default \\ nil) def get(data, [{ _, default }|_], _) when not is_map(data) and not is_list(data), do: default def get(data, [_|_], default) when not is_map(data) and not is_list(data), do: default def get(data, [{ key, default }], _) do case data[key] do nil -> default data -> data end end def get(data, [key], default) do case data[key] do nil -> default data -> data end end def get(data, [{ key, default }|keys], default_value) do case data[key] do nil -> default data -> get(data, keys, default_value) end end def get(data, [key|keys], default) do case data[key] do nil -> default data -> get(data, keys, default) end end end
lib/nesty.ex
0.847826
0.431464
nesty.ex
starcoder
defmodule Advent.Y2021.D14 do @moduledoc """ https://adventofcode.com/2021/day/14 """ @typep rules :: %{charlist() => {charlist(), charlist()}} @doc """ Apply 10 steps of pair insertion to the polymer template and find the most and least common elements in the result. What do you get if you take the quantity of the most common element and subtract the quantity of the least common element? """ @spec part_one(Enumerable.t()) :: non_neg_integer() def part_one(input) do input |> parse_input() |> solution(10) end @doc """ Apply 40 steps of pair insertion to the polymer template and find the most and least common elements in the result. What do you get if you take the quantity of the most common element and subtract the quantity of the least common element? """ @spec part_two(Enumerable.t()) :: non_neg_integer() def part_two(input) do input |> parse_input() |> solution(40) end @spec parse_input(Enumerable.t()) :: {map(), rules()} defp parse_input(input) do template = input |> Enum.at(0) |> String.to_charlist() |> Enum.chunk_every(2, 1, :discard) |> Enum.frequencies() rules = input |> Stream.drop(2) |> Stream.map(fn <<pair::binary-size(2), " -> ", insert>> -> pair = [a, b] = String.to_charlist(pair) {pair, {[a, insert], [insert, b]}} end) |> Map.new() {template, rules} end @spec solution({map(), rules()}, non_neg_integer()) :: non_neg_integer() defp solution({template, rules}, steps) do {{_, min}, {_, max}} = Stream.iterate(template, fn counts -> Enum.reduce(counts, %{}, fn {poly, count}, acc -> case Map.fetch(rules, poly) do {:ok, {a, b}} -> acc |> Map.update(a, count, &(&1 + count)) |> Map.update(b, count, &(&1 + count)) :error -> Map.update(acc, poly, count, &(&1 + count)) end end) end) |> Enum.at(steps) |> Enum.reduce(%{}, fn {[_a, b], count}, acc -> # NOTE: I have a feeling while this solves the puzzle, it isn't # technically correct Map.update(acc, b, count, &(&1 + count)) end) |> Enum.min_max_by(&elem(&1, 1)) max - min end end
lib/advent/y2021/d14.ex
0.725649
0.618492
d14.ex
starcoder
defmodule Zaryn.Reward do @moduledoc """ Module which handles the rewards and transfer scheduling """ alias Zaryn.OracleChain alias Zaryn.P2P alias Zaryn.P2P.Message.GetTransactionChain alias Zaryn.P2P.Message.GetUnspentOutputs alias Zaryn.P2P.Message.TransactionList alias Zaryn.P2P.Message.UnspentOutputList alias Zaryn.P2P.Node alias Zaryn.Replication alias __MODULE__.NetworkPoolScheduler alias Zaryn.TransactionChain alias Zaryn.TransactionChain.Transaction alias Zaryn.TransactionChain.TransactionData.ZARYNLedger.Transfer @doc """ Get the minimum rewards for validation nodes """ @spec min_validation_nodes_reward() :: float() def min_validation_nodes_reward do zaryn_eur_price = DateTime.utc_now() |> OracleChain.get_zaryn_price() |> Keyword.get(:eur) zaryn_eur_price * 50 end @doc """ Return the list of transfers to rewards the validation nodes which receive less than the minimum validation node reward This will get and check all the unspent outputs after the last reward date and determine which were mining reward and compare it with the minimum of rewards for a validation node """ @spec get_transfers_for_in_need_validation_nodes(last_reward_date :: DateTime.t()) :: reward_transfers :: list(Transfer.t()) def get_transfers_for_in_need_validation_nodes(last_date = %DateTime{}) do min_validation_nodes_reward = min_validation_nodes_reward() Task.async_stream(P2P.authorized_nodes(), fn node = %Node{reward_address: reward_address} -> mining_rewards = reward_address |> get_transactions_after(last_date) |> Task.async_stream(&get_reward_unspent_outputs/1, timeout: 500, on_exit: :kill_task) |> Stream.filter(&match?({:ok, _}, &1)) |> Enum.flat_map(& &1) {node, mining_rewards} end) |> Enum.filter(fn {_, balance} -> balance < min_validation_nodes_reward end) |> Enum.map(fn {%Node{reward_address: address}, amount} -> %Transfer{to: address, amount: min_validation_nodes_reward - amount} end) end defp get_transactions_after(address, date) do last_address = TransactionChain.resolve_last_address(address, DateTime.utc_now()) {:ok, %TransactionList{transactions: chain}} = last_address |> Replication.chain_storage_nodes() |> P2P.reply_first(%GetTransactionChain{address: last_address, after: date}) chain end defp get_reward_unspent_outputs(%Transaction{address: address}) do {:ok, %UnspentOutputList{unspent_outputs: unspent_outputs}} = address |> Replication.chain_storage_nodes() |> P2P.reply_first(%GetUnspentOutputs{address: address}) Enum.filter(unspent_outputs, &(&1.type == :reward)) end def load_transaction(_), do: :ok @doc """ Returns the last date of the rewards scheduling from the network pool """ @spec last_scheduling_date() :: DateTime.t() defdelegate last_scheduling_date, to: NetworkPoolScheduler, as: :last_date def config_change(changed_conf) do changed_conf |> Keyword.get(NetworkPoolScheduler) |> NetworkPoolScheduler.config_change() end end
lib/zaryn/reward.ex
0.795221
0.456591
reward.ex
starcoder
defmodule RemoteDockers.ContainerConfig do alias RemoteDockers.MountPoint @enforce_keys [:Image] @derive Jason.Encoder defstruct [ :Env, :HostConfig, :Image ] @doc """ Build a container configuration with a specified `image_name`. ## Example: ```elixir iex> ContainerConfig.new("hello-world") %ContainerConfig{ :Image => "hello-world", :Env => [], :HostConfig => %{} } ``` """ def new(image_name) do %RemoteDockers.ContainerConfig{ Env: [], HostConfig: %{}, Image: image_name } end @doc """ Add an environment variable to the specified container configuration. ## Example: ```elixir iex> ContainerConfig.new("hello-world") ...> |> ContainerConfig.add_env("TOTO", "/path/to/toto") %ContainerConfig{ :Image => "hello-world", :Env => ["TOTO=/path/to/toto"], :HostConfig => %{}, } ``` """ @spec add_env(RemoteDockers.ContainerConfig, bitstring, bitstring) :: RemoteDockers.ContainerConfig def add_env(%RemoteDockers.ContainerConfig{} = container_config, key, value) do env = Map.get(container_config, :Env) |> List.insert_at(-1, key <> "=" <> value) container_config |> Map.put(:Env, env) end @doc """ Add a volume mount point description to the container host configuration. ## Example: ```elixir mount_point = %{ "Source": "/path/to/host/mount/point", "Target": "/path/to/container/directory", "Type": "bind" } ContainerConfig.new("MyImage") |> ContainerConfig.add(mount_point) ``` """ @spec add_mount_point(RemoteDockers.ContainerConfig, MountPoint) :: RemoteDockers.ContainerConfig def add_mount_point( %RemoteDockers.ContainerConfig{} = container_config, %MountPoint{} = mount_point ) do mount_points = container_config |> Map.get(:HostConfig, %{}) |> Map.get(:Mounts, []) |> List.insert_at(-1, mount_point) update_host_config(container_config, :Mounts, mount_points) end @doc """ Add a volume mount point binding (i.e. mount type is `bind`) to the container configuration. `source` and `target` values are respectively mapped to the `"Source"` and `"Target"` mount point description fields. See `add_mount_point/2` ## Example: ```elixir iex> ContainerConfig.new("image_name") ...> |> ContainerConfig.add_mount_point("/path/to/a/host/mount/point", "/path/to/a/container/directory") ...> |> ContainerConfig.add_mount_point("/path/to/another/host/mount/point", "/path/to/another/container/directory") %ContainerConfig{ :Image => "image_name", :Env => [], :HostConfig => %{ :Mounts => [ %MountPoint{ :Source => "/path/to/a/host/mount/point", :Target => "/path/to/a/container/directory", :Type => "bind" }, %MountPoint{ :Source => "/path/to/another/host/mount/point", :Target => "/path/to/another/container/directory", :Type => "bind" } ] } } ``` """ @spec add_mount_point(RemoteDockers.ContainerConfig, bitstring, bitstring, bitstring) :: RemoteDockers.ContainerConfig def add_mount_point( %RemoteDockers.ContainerConfig{} = container_config, source, target, type \\ "bind" ) do add_mount_point(container_config, MountPoint.new(source, target, type)) end @doc """ Add a DNS option to the container configuration. See `add_dns_option/2` ## Example: ```elixir iex> ContainerConfig.new("image_name") ...> |> ContainerConfig.add_dns_option("dns option") %ContainerConfig{ :Image => "image_name", :Env => [], :HostConfig => %{ :DnsOptions => ["dns option"], } } ``` """ @spec add_dns_option(RemoteDockers.ContainerConfig, bitstring) :: RemoteDockers.ContainerConfig def add_dns_option( %RemoteDockers.ContainerConfig{} = container_config, dns_option ) do dns_options = container_config |> Map.get(:HostConfig, %{}) |> Map.get(:DnsOptions, []) |> List.insert_at(-1, dns_option) update_host_config(container_config, :DnsOptions, dns_options) end @doc """ Add an extra Host to the container configuration. See `add_extra_host/3` ## Example: ```elixir iex> ContainerConfig.new("image_name") ...> |> ContainerConfig.add_extra_host("my_host", "192.168.10.10") %ContainerConfig{ :Image => "image_name", :Env => [], :HostConfig => %{ :ExtraHosts => ["my_host:192.168.10.10"], } } ``` """ @spec add_extra_host(RemoteDockers.ContainerConfig, bitstring, bitstring) :: RemoteDockers.ContainerConfig def add_extra_host( %RemoteDockers.ContainerConfig{} = container_config, hostname, ip ) do extra_hosts = container_config |> Map.get(:HostConfig, %{}) |> Map.get(:ExtraHosts, []) |> List.insert_at(-1, hostname <> ":" <> ip) update_host_config(container_config, :ExtraHosts, extra_hosts) end @doc """ Add parameter to the HostConfig element. See `update_host_config/3` ## Example: ```elixir iex> ContainerConfig.new("image_name") ...> |> ContainerConfig.update_host_config(:my_custom_key, :my_custom_value) %ContainerConfig{ :Image => "image_name", :Env => [], :HostConfig => %{ :my_custom_key => :my_custom_value, } } ``` """ @spec update_host_config(RemoteDockers.ContainerConfig, bitstring, any) :: RemoteDockers.ContainerConfig def update_host_config( %RemoteDockers.ContainerConfig{} = container_config, key, value ) do host_config = container_config |> Map.get(:HostConfig, %{}) |> Map.put(key, value) container_config |> Map.put(:HostConfig, host_config) end end
lib/container_config.ex
0.86712
0.598459
container_config.ex
starcoder
defmodule Godfist.DataDragon.Data do @moduledoc """ Interact with the data endpoints from DataDragon in English. Each function must be passed a locale as an atom, you can consult the available languages on `Godfist.Static.languages/1`. Default language is english(United States) if none is specified. These are all available languages in a list. `[:argentina, :australia, :china, :czech, :french, :german, :greek, :hungarian, :indonesian, :italian, :japanese, :korean, :malay, :malaysia, :mexico, :philippines, :poland, :polish, :portuguese, :romanian, :russian, :singapore, :spain, :taiwan, :thai, :turkish, :uk, :us, :vietnamese]` Almost every locale has the name of it's country because I guess there are variations regarding languages on each country. """ alias Godfist.LeagueRates @dragon :dragon @endpoint "/7.24.2/data" @languages %{ czech: "cs_CZ", german: "de_DE", greek: "el_GR", australia: "en_AU", uk: "en_GB", philippines: "en_PH", poland: "en_PL", singapore: "en_SG", us: "en_US", argentina: "es_AR", spain: "es_ES", mexico: "es_MX", french: "fr_FR", hungarian: "hu_HU", indonesian: "id_ID", italian: "it_IT", japanese: "ja_JP", korean: "ko_KR", malay: "ms_MY", polish: "pl_PL", portuguese: "pt_PR", romanian: "ro_RO", russian: "ru_RU", thai: "th_TH", turkish: "tr_TR", vietnamese: "vn_VN", china: "zh_CN", malaysia: "zh_MY", taiwan: "zh_TW" } @doc """ Get information about profile icons. ## Example ```elixir iex> Godfist.DataDragon.Data.icons(:greek) ``` """ @spec icons(atom) :: {:ok, map} | {:error, String.t()} def icons(locale \\ :us) do lang = get_loc(locale) rest = @endpoint <> "/#{lang}/profileicon.json" LeagueRates.handle_rate(@dragon, rest, :other) end @doc """ Get information about champions. ## Example ```elixir iex> Godfist.DataDragon.Data.champions(:us) ``` """ @spec champions(atom) :: {:ok, map} | {:error, String.t()} def champions(locale \\ :us) do lang = get_loc(locale) rest = @endpoint <> "/#{lang}/champion.json" LeagueRates.handle_rate(@dragon, rest, :other) end @doc """ Get information about a single champion. ## Example ```elixir iex> Godfist.DataDragon.Data.single_champ("Aatrox", :japanese) ``` """ @spec single_champ(String.t(), atom) :: {:ok, map} | {:error, String.t()} def single_champ(name, locale \\ :us) do lang = get_loc(locale) rest = @endpoint <> "/#{lang}/champion/#{name}.json" LeagueRates.handle_rate(@dragon, rest, :other) end @doc """ Get information about the items. ## Example ```elixir iex> Godfist.DataDragon.Data.items(:spain) ``` """ @spec items(atom) :: {:ok, map} | {:error, String.t()} def items(locale \\ :us) do lang = get_loc(locale) rest = @endpoint <> "/#{lang}/item.json" LeagueRates.handle_rate(@dragon, rest, :other) end @doc """ Get information about summoner spells. ## Example ```elixir iex> Godfist.DataDragon.Data.summ_spells() ``` """ @spec summ_spells(atom) :: {:ok, map} | {:error, String.t()} def summ_spells(locale \\ :us) do lang = get_loc(locale) rest = @endpoint <> "/#{lang}/summoner.json" LeagueRates.handle_rate(@dragon, rest, :other) end # priv to get locale defp get_loc(locale), do: Map.get(@languages, locale) end
lib/godfist/requests/data_dragon/data.ex
0.803637
0.832373
data.ex
starcoder
defmodule Mnemonix.Supervision do @moduledoc """ Functions to start a store server. Using this module will define `start_link` functions that allow your module to offer an API for booting up a `Mnemonix.Store.Server`. Providing a `:default` option will allow you to override the configuration described in `Mnemonix.Application.default/0` with your own defaults that are used to expand the arguments given to `start_link/0` and `start_link/1` into a fully-specified `start_link/2` call. """ defmacro __using__(opts \\ []) do {singleton, opts} = Mnemonix.Singleton.Behaviour.establish_singleton(__CALLER__.module, opts) store = if singleton, do: Mnemonix.Singleton.Behaviour.determine_singleton( __CALLER__.module, Keyword.get(opts, :singleton) ) quote location: :keep do alias Mnemonix.Store @doc false def defaults do {default_impl, default_opts} = Mnemonix.Application.specification() case Keyword.get(unquote(opts), :default, Mnemonix.Application.specification()) do impl when is_atom(impl) -> {impl, default_impl} opts when is_list(opts) -> {default_opts, opts} {impl, opts} -> {impl, opts} end end @doc """ Starts a new store using the default store implementation and options. The returned `t:GenServer.server/0` reference can be used in the `Mnemonix` API. """ @spec start_link :: GenServer.on_start() def start_link do {implementation, options} = defaults() start_link(implementation, options) end @doc """ Starts a new store using the default store implementation and provided `options`. ## Examples iex> {:ok, store} = Mnemonix.start_link(Mnemonix.Stores.Map) iex> Mnemonix.put(store, :foo, :bar) iex> Mnemonix.get(store, :foo) :bar """ @spec start_link(Store.Server.options()) :: GenServer.on_start() def start_link(options) when is_list(options) do {implementation, default_options} = defaults() start_link(implementation, Keyword.merge(default_options, options)) end @doc """ Starts a new store using the provided store `implementation` and default options. The returned `t:GenServer.server/0` reference can be used in the `Mnemonix` API. ## Examples iex> {:ok, store} = Mnemonix.start_link(Mnemonix.Stores.Map) iex> Mnemonix.put(store, :foo, :bar) iex> Mnemonix.get(store, :foo) :bar """ @spec start_link(Store.Behaviour.t()) :: GenServer.on_start() def start_link(implementation) do {_implementation, default_options} = defaults() start_link(implementation, default_options) end @doc """ Starts a new store using the provided store `implementation` and `options`. The returned `t:GenServer.server/0` reference can be used in the `Mnemonix` API. ## Examples iex> options = [initial: %{foo: :bar}, name: NamedStore] iex> {:ok, _store} = Mnemonix.start_link(Mnemonix.Stores.Map, options) iex> Mnemonix.get(NamedStore, :foo) :bar """ @spec start_link(Store.Behaviour.t(), Store.Server.options()) :: GenServer.on_start() def start_link(implementation, options) do implementation.start_link(Keyword.put_new(options, :name, unquote(store))) end end end end
lib/mnemonix/supervision.ex
0.913493
0.424412
supervision.ex
starcoder
defmodule Comeonin.Bcrypt do @moduledoc """ Module to handle bcrypt authentication. Bcrypt is a key derivation function for passwords designed by <NAME> and <NAME>. Bcrypt is an adaptive function, which means that it can be configured to remain slow and resistant to brute-force attacks even as computational power increases. This bcrypt implementation is based on the latest OpenBSD version, which fixed a small issue that affected some passwords longer than 72 characters. The computationally intensive code is run in C, using Erlang NIFs. One concern about NIFs is that they block the Erlang VM, and so it is better to make sure these functions do not run for too long. This bcrypt implementation has been adapted so that each NIF runs for as short a time as possible. """ use Bitwise alias Comeonin.BcryptBase64 alias Comeonin.Config alias Comeonin.Tools @on_load {:init, 0} def init do path = :filename.join(:code.priv_dir(:comeonin), 'bcrypt_nif') :ok = :erlang.load_nif(path, 0) end @doc """ Generate a salt for use with the `hashpass` function. The log_rounds parameter determines the computational complexity of the generation of the password hash. Its default is 12, the minimum is 4, and the maximum is 31. """ def gen_salt(log_rounds) when log_rounds in 4..31 do Tools.random_bytes(16) |> :binary.bin_to_list |> fmt_salt(zero_str(log_rounds)) end def gen_salt(_), do: gen_salt(Config.bcrypt_log_rounds) def gen_salt, do: gen_salt(Config.bcrypt_log_rounds) @doc """ Hash the password using bcrypt. """ def hashpass(password, salt) when is_binary(salt) and is_binary(password) do if byte_size(salt) == 29 do hashpw(:binary.bin_to_list(password), :binary.bin_to_list(salt)) else raise ArgumentError, message: "The salt is the wrong length." end end def hashpass(_password, _salt) do raise ArgumentError, message: "Wrong type. The password and salt need to be strings." end @doc """ Hash the password with a salt which is randomly generated. To change the complexity (and the time taken) of the password hash calculation, you need to change the value for `bcrypt_log_rounds` in the config file. """ def hashpwsalt(password) do hashpass(password, gen_salt(Config.bcrypt_log_rounds)) end @doc """ Check the password. The check is performed in constant time to avoid timing attacks. """ def checkpw(password, hash) when is_binary(password) and is_binary(hash) do hashpw(:binary.bin_to_list(password), :binary.bin_to_list(hash)) |> Tools.secure_check(hash) end def checkpw(_password, _hash) do raise ArgumentError, message: "Wrong type. The password and hash need to be strings." end @doc """ Perform a dummy check for a user that does not exist. This always returns false. The reason for implementing this check is in order to make user enumeration by timing responses more difficult. """ def dummy_checkpw do hashpwsalt("password") false end @doc """ Initialize the P-box and S-box tables with the digits of Pi, and then start the key expansion process. """ def bf_init(key, key_len, salt) def bf_init(_, _, _), do: exit(:nif_library_not_loaded) @doc """ The main key expansion function. This function is called 2^log_rounds times. """ def bf_expand(state, key, key_len, salt) def bf_expand(_, _, _, _), do: exit(:nif_library_not_loaded) @doc """ Encrypt and return the hash. """ def bf_encrypt(state) def bf_encrypt(_), do: exit(:nif_library_not_loaded) defp hashpw(password, salt) do [prefix, log_rounds, salt] = Enum.take(salt, 29) |> :string.tokens('$') bcrypt(password, salt, prefix, log_rounds) |> fmt_hash(salt, prefix, zero_str(log_rounds)) end defp bcrypt(key, salt, prefix, log_rounds) do key_len = length(key) + 1 if prefix == "2b" and key_len > 73, do: key_len = 73 {salt, rounds} = prepare_keys(salt, List.to_integer(log_rounds)) bf_init(key, key_len, salt) |> expand_keys(key, key_len, salt, rounds) |> bf_encrypt end defp prepare_keys(salt, log_rounds) when log_rounds in 4..31 do {BcryptBase64.decode(salt), bsl(1, log_rounds)} end defp prepare_keys(_, _) do raise ArgumentError, message: "Wrong number of rounds." end defp expand_keys(state, _key, _key_len, _salt, 0), do: state defp expand_keys(state, key, key_len, salt, rounds) do bf_expand(state, key, key_len, salt) |> expand_keys(key, key_len, salt, rounds - 1) end defp zero_str(log_rounds) do if log_rounds < 10, do: "0#{log_rounds}", else: "#{log_rounds}" end defp fmt_salt(salt, log_rounds) do "$2b$#{log_rounds}$#{BcryptBase64.encode(salt)}" end defp fmt_hash(hash, salt, prefix, log_rounds) do "$#{prefix}$#{log_rounds}$#{salt}#{BcryptBase64.encode(hash)}" end end
deps/comeonin/lib/comeonin/bcrypt.ex
0.792986
0.549157
bcrypt.ex
starcoder
defmodule OMG.Burner.ThresholdAgent do @moduledoc """ ThresholdAgent is a background task once in a while gets current gas price, checks whether thresholds has been met. If so, it triggers fee exit. """ use AdjustableServer alias OMG.Burner.HttpRequester, as: Requester alias OMG.Burner.State require Logger def start_link(args \\ %{}) do GenServer.start_link(__MODULE__, args, name: __MODULE__) end # GenServer def init(args) do casual_period = Map.get(args, :casual_period) || Application.get_env(:omg_burner, :casual_period) short_period = Map.get(args, :short_period) || Application.get_env(:omg_burner, :short_period) || casual_period max_gas_price = Map.get(args, :max_gas_price) || Application.get_env(:omg_burner, :max_gas_price) state = %{ casual_period: casual_period, short_period: short_period, max_gas_price: max_gas_price, active_period: casual_period } schedule_work(state) {:ok, state} end # handlers def handle_info(:loop, state) do new_state = state |> do_work() schedule_work(new_state) {:noreply, new_state} end # private defp schedule_work(state) do Process.send_after(self(), :loop, state.active_period) :ok end defp do_work(state) do with :ok <- check_gas_price(state) do check_thresholds() state else :error -> Map.put(state, :active_period, state.short_period) end end defp check_gas_price(%{max_gas_price: max_gas_price} = _state) do with {:ok, current_price} <- Requester.get_gas_price(), true <- current_price <= max_gas_price do Logger.info("Current gas price: #{current_price}") :ok else :error -> Logger.error("A problem with gas station occured. Check connection or API changes") :error false -> Logger.info("Gas price exceeds maximum value") :error end end defp check_thresholds() do pending_tokens = State.get_pending_fees() |> Enum.map(fn {token, _, _} -> token end) accumulated_tokens = State.get_accumulated_fees() |> Enum.map(fn {token, _} -> token end) tokens_to_check = accumulated_tokens -- pending_tokens Enum.each(tokens_to_check, &check_threshold/1) end defp check_threshold(token) do threshold_info = Application.get_env(:omg_burner, :thresholds) |> Map.get(token) with :ready <- do_check_threshold(token, threshold_info) do # TODO: do not check gas price twice {:ok, current_gas_price} = OMG.Burner.HttpRequester.get_gas_price() OMG.Burner.start_fee_exit(token, %{gas_price: current_gas_price}) else :unsupported_token -> Logger.error("Missing configuration for #{token}") end :ok end defp do_check_threshold(_token, nil), do: :unsupported_token defp do_check_threshold(token, info) do token_id = Map.fetch!(info, :coinmarketcap_id) decimals = Map.fetch!(info, :decimals) currency = Map.fetch!(info, :currency) threshold_value = Map.fetch!(info, :value) {:ok, price} = Requester.get_token_price(token_id, currency) {:ok, accumulated} = State.get_accumulated_fees(token) check_ready(accumulated, price, threshold_value, decimals) end defp check_ready(accumulated, price, threshold, decimals) do case accumulated / :math.pow(10, decimals) * price >= threshold do true -> :ready false -> :not_ready end end end
apps/omg_burner/lib/omg_burner/threshold_agent.ex
0.657978
0.401834
threshold_agent.ex
starcoder
defmodule Benchee.Formatters.Console.Memory do @moduledoc false # This deals with just the formatting of the run time results. They are similar # to the way the memory results are formatted, but different enough to where the # abstractions start to break down pretty significantly, so I wanted to extract # these two things into separate modules to avoid confusion. alias Benchee.{ Conversion, Conversion.Count, Conversion.Memory, Conversion.Unit, Formatters.Console.Helpers, Scenario, Statistics } @type unit_per_statistic :: %{atom => Unit.t()} # Length of column header @average_width 15 @deviation_width 11 @median_width 15 @percentile_width 15 @minimum_width 15 @maximum_width 15 @sample_size_width 15 @mode_width 25 @doc """ Formats the memory statistics to a report suitable for output on the CLI. If all memory measurements are the same and we have a standard deviation of 0.0 for each scenario, we don't show the statistics and report just on the single measured memory usage. """ @spec format_scenarios([Scenario.t()], map) :: [String.t(), ...] def format_scenarios(scenarios, config) do if memory_measurements_present?(scenarios) do render(scenarios, config) else [] end end defp memory_measurements_present?(scenarios) do Enum.any?(scenarios, fn scenario -> scenario.memory_usage_data.statistics.sample_size > 0 end) end defp render(scenarios, config) do scaling_strategy = config.unit_scaling units = Conversion.units(scenarios, scaling_strategy) label_width = Helpers.label_width(scenarios) hide_statistics = all_have_deviation_of_0?(scenarios) List.flatten([ "\nMemory usage statistics:\n", column_descriptors(label_width, hide_statistics), scenario_reports(scenarios, units, label_width, hide_statistics), comparison_report(scenarios, units, label_width, config, hide_statistics), extended_statistics_report(scenarios, units, label_width, config, hide_statistics) ]) end defp all_have_deviation_of_0?(scenarios) do Enum.all?(scenarios, fn scenario -> scenario.memory_usage_data.statistics.std_dev == 0.0 end) end defp column_descriptors(label_width, hide_statistics) defp column_descriptors(label_width, false) do "\n~*s~*s~*s~*s~*s\n" |> :io_lib.format([ -label_width, "Name", @average_width, "average", @deviation_width, "deviation", @median_width, "median", @percentile_width, "99th %" ]) |> to_string end defp column_descriptors(label_width, true) do "\n~*s~*s\n" |> :io_lib.format([ -label_width, "Name", @average_width, "Memory usage" ]) |> to_string end @spec scenario_reports([Scenario.t()], unit_per_statistic, integer, boolean) :: [String.t()] defp scenario_reports(scenarios, units, label_width, hide_statistics) defp scenario_reports([scenario | other_scenarios], units, label_width, true) do [ reference_report(scenario, units, label_width), comparisons(other_scenarios, units, label_width), "\n**All measurements for memory usage were the same**\n" ] end defp scenario_reports(scenarios, units, label_width, hide_statistics) do Enum.map(scenarios, fn scenario -> format_scenario(scenario, units, label_width, hide_statistics) end) end @na "N/A" @spec format_scenario(Scenario.t(), unit_per_statistic, integer, boolean) :: String.t() defp format_scenario(scenario, units, label_width, hide_statistics) defp format_scenario( scenario = %Scenario{memory_usage_data: %{statistics: %{sample_size: 0}}}, _, label_width, _ ) do warning = "WARNING the scenario \"#{scenario.name}\" has no memory measurements!" <> " This is probably a bug please report it!\n" <> "https://github.com/PragTob/benchee/issues/new" data = "~*ts~*ts\n" |> :io_lib.format([ -label_width, scenario.name, @average_width, @na ]) |> to_string warning <> "\n" <> data end defp format_scenario(scenario, %{memory: memory_unit}, label_width, false) do %Scenario{ name: name, memory_usage_data: %{ statistics: %Statistics{ average: average, std_dev_ratio: std_dev_ratio, median: median, percentiles: %{99 => percentile_99} } } } = scenario "~*ts~*ts~*ts~*ts~*ts\n" |> :io_lib.format([ -label_width, name, @average_width, memory_output(average, memory_unit), @deviation_width, Helpers.deviation_output(std_dev_ratio), @median_width, memory_output(median, memory_unit), @percentile_width, memory_output(percentile_99, memory_unit) ]) |> to_string end defp format_scenario(scenario, %{memory: memory_unit}, label_width, true) do %Scenario{ name: name, memory_usage_data: %{ statistics: %Statistics{ average: average } } } = scenario "~*ts~*ts\n" |> :io_lib.format([ -label_width, name, @average_width, memory_output(average, memory_unit) ]) |> to_string end @spec comparison_report([Scenario.t()], unit_per_statistic, integer, map, boolean) :: [ String.t() ] defp comparison_report(scenarios, units, label_width, config, hide_statistics) # No need for a comparison when only one benchmark was run defp comparison_report([_scenario], _, _, _, _), do: [] defp comparison_report(_, _, _, %{comparison: false}, _), do: [] defp comparison_report(_, _, _, _, true), do: [] defp comparison_report([scenario | other_scenarios], units, label_width, _, _) do [ Helpers.descriptor("Comparison"), reference_report(scenario, units, label_width) | comparisons(other_scenarios, units, label_width) ] end defp reference_report(scenario, %{memory: memory_unit}, label_width) do %Scenario{name: name, memory_usage_data: %{statistics: %Statistics{median: median}}} = scenario "~*s~*s\n" |> :io_lib.format([ -label_width, name, @median_width, memory_output(median, memory_unit) ]) |> to_string end @spec comparisons([Scenario.t()], unit_per_statistic, integer) :: [String.t()] defp comparisons(scenarios_to_compare, units, label_width) do Enum.map( scenarios_to_compare, fn scenario -> statistics = scenario.memory_usage_data.statistics memory_format = memory_output(statistics.average, units.memory) Helpers.format_comparison( scenario.name, statistics, memory_format, "memory usage", units.memory, label_width, @median_width ) end ) end defp memory_output(nil, _unit), do: "N/A" defp memory_output(memory, unit) do Memory.format({Memory.scale(memory, unit), unit}) end defp extended_statistics_report(scenarios, units, label_width, config, hide_statistics) defp extended_statistics_report(_, _, _, _, true), do: [] defp extended_statistics_report(scenarios, units, label_width, %{extended_statistics: true}, _) do [ Helpers.descriptor("Extended statistics"), extended_column_descriptors(label_width) | extended_statistics(scenarios, units, label_width) ] end defp extended_statistics_report(_, _, _, _, _), do: [] defp extended_column_descriptors(label_width) do "\n~*s~*s~*s~*s~*s\n" |> :io_lib.format([ -label_width, "Name", @minimum_width, "minimum", @maximum_width, "maximum", @sample_size_width, "sample size", @mode_width, "mode" ]) |> to_string end defp extended_statistics(scenarios, units, label_width) do Enum.map(scenarios, fn scenario -> format_scenario_extended(scenario, units, label_width) end) end defp format_scenario_extended(scenario, %{memory: memory_unit}, label_width) do %Scenario{ name: name, memory_usage_data: %{ statistics: %Statistics{ minimum: minimum, maximum: maximum, sample_size: sample_size, mode: mode } } } = scenario "~*s~*ts~*ts~*ts~*ts\n" |> :io_lib.format([ -label_width, name, @minimum_width, Helpers.count_output(minimum, memory_unit), @maximum_width, Helpers.count_output(maximum, memory_unit), @sample_size_width, Count.format(sample_size), @mode_width, Helpers.mode_out(mode, memory_unit) ]) |> to_string end end
lib/benchee/formatters/console/memory.ex
0.82379
0.524029
memory.ex
starcoder
defmodule Ig do @moduledoc """ Public interface for using IG Api's wrapper. It expects following config: ``` config :ig, users: %{ user_name: %{ identifier: "...", password: "...", api_key: "...", demo: false } } ``` where: - `user_name` is a human readable reference that will be used to point which user should be used (it can be any valid atom) - `identifier` is your username - `api_key` can be obtained from "My Account" on IG's dealing platform - `demo` confirms is that a demo account Idea behind config structured this way is that allows to easily use multiple accounts (for example to minimize the risk) just by referring to them using freely picked name. To clarify - account is a whole user profile with assigned email. For example two accounts would mean that you use two completely seperate profile with diffent login details, personal details etc. This can be beneficial when for example one account is classified as "professional trader" (with high margins available but without many safety gates) and other one is retail customer (higher premiums but safety features). It can also be used to mitigate increased margins when trading high volumes. To sum up: - account - single per person, you log in with it - subaccount - listed as "accounts" in My IG Ig holds spins further server per account and acts as a gateway to execute any function on the account. It allows you to use multiple accounts in parallel. """ use GenServer defmodule State do defstruct users: [] end ## Public interface def start(_type, _args) do start_link(name: :IG) end @spec start_link(any) :: GenServer.on_start() def start_link(opts \\ []) do GenServer.start_link(__MODULE__, nil, opts) end @spec init(any) :: {:ok, %State{}} def init(_) do users = (Application.get_env(:ig, :users) || []) |> Enum.map(&init_account/1) |> Enum.into(%{}) {:ok, %State{ users: users }} end @spec get_users(pid()) :: %{atom => pid()} def get_users(pid \\ :IG) do GenServer.call(pid, :get_users) end def get_user(user, pid \\ :IG) do GenServer.call(pid, {:get_user, user}) end ## Callbacks def handle_call(:get_users, _from, state) do {:reply, {:ok, state.users}, state} end def handle_call({:get_user, user}, _from, state) do {:reply, {:ok, state.users[user]}, state} end ## Private functions @type credential :: {:identifier, String.t()} | {:password, String.t()} | {:api_key, String.t()} | {:demo, boolean()} @spec init_account({atom(), [credential]}) :: {atom(), {:ok, pid()}} defp init_account({user, credentials}) do {:ok, pid} = Ig.User.start_link(credentials, name: :"user-#{user}") {user, pid} end end
lib/ig.ex
0.879121
0.678893
ig.ex
starcoder
defmodule SchedEx do @moduledoc """ SchedEx schedules jobs (either an m,f,a or a function) to run in the future. These jobs are run in isolated processes, and are unsurpervised. """ @doc """ Runs the given module, function and argument at the given time """ def run_at(m, f, a, %DateTime{} = time) when is_atom(m) and is_atom(f) and is_list(a) do run_at(fn -> apply(m, f, a) end, time) end @doc """ Runs the given function at the given time """ def run_at(func, %DateTime{} = time) when is_function(func) do delay = DateTime.diff(time, DateTime.utc_now(), :millisecond) run_in(func, delay) end @doc """ Runs the given module, function and argument in given number of units (this corresponds to milliseconds unless a custom `time_scale` is specified). Any values in the arguments array which are equal to the magic symbol `:sched_ex_scheduled_time` are replaced with the scheduled execution time for each invocation Supports the following options: * `repeat`: Whether or not this job should be recurring * `start_time`: A `DateTime` to use as the basis to offset from * `time_scale`: A module that implements the `SchedEx.TimeScale` behaviour, by default is set to `SchedEx.IdentityTimeScale`. Can be used to speed up time (often used for speeding up test runs) * `name`: To attach a name to the process. Useful for adding a name to Registry to lookup later. ie. {:via, Registry, {YourRegistryName, "scheduled-task-1"}} """ def run_in(m, f, a, delay, opts \\ []) when is_atom(m) and is_atom(f) and is_list(a) do run_in(mfa_to_fn(m, f, a), delay, opts) end @doc """ Runs the given function in given number of units (this corresponds to milliseconds unless a custom `time_scale` is specified). If func is of arity 1, the scheduled execution time will be passed for each invocation Takes the same options as `run_in/5` """ def run_in(func, delay, opts \\ []) when is_function(func) and is_integer(delay) do SchedEx.Runner.run(func, delay, opts) end @doc """ Runs the given module, function and argument on every occurence of the given crontab. Any values in the arguments array which are equal to the magic symbol `:sched_ex_scheduled_time` are replaced with the scheduled execution time for each invocation Supports the following options: * `timezone`: A string timezone identifier (`America/Chicago`) specifying the timezone within which the crontab should be interpreted. If not specified, defaults to `UTC` * `time_scale`: A module that implements the `SchedEx.TimeScale` behaviour, by default is set to `SchedEx.IdentityTimeScale`. Can be used to speed up time (often used for speeding up test runs) * `name`: To attach a name to the process. Useful for adding a name to Registry to lookup later. ie. {:via, Registry, {YourRegistryName, "scheduled-task-1"}} """ def run_every(m, f, a, crontab, opts \\ []) when is_atom(m) and is_atom(f) and is_list(a) do run_every(mfa_to_fn(m, f, a), crontab, opts) end @doc """ Runs the given function on every occurence of the given crontab. If func is of arity 1, the scheduled execution time will be passed for each invocation Takes the same options as `run_every/5` """ def run_every(func, crontab, opts \\ []) when is_function(func) do case as_crontab(crontab) do {:ok, expression} -> SchedEx.Runner.run(func, expression, Keyword.put_new(opts, :repeat, true)) {:error, _} = error -> error end end @doc """ Cancels the given scheduled job """ def cancel(token) do SchedEx.Runner.cancel(token) end @doc """ Returns stats on the given job. Stats are returned for: * `scheduling_delay`: The delay between when the job was scheduled to execute, and the time it actually was executed. Based on the quantized scheduled start, and so does not include quantization error. Value specified in microseconds. * `quantization_error`: Erlang is only capable of scheduling future calls with millisecond precision, so there is some inevitable precision lost between when the job would be scheduled in a perfect world, and how well Erlang is able to schedule the job (ie: to the closest millisecond). This error value captures that difference. Value specified in microseconds. * `execution_time`: The amount of time the job spent executing. Value specified in microseconds. """ def stats(token) do SchedEx.Runner.stats(token) end defp mfa_to_fn(m, f, args) do fn time -> substituted_args = Enum.map(args, fn arg -> case arg do :sched_ex_scheduled_time -> time _ -> arg end end) apply(m, f, substituted_args) end end defp as_crontab(%Crontab.CronExpression{} = crontab), do: {:ok, crontab} defp as_crontab(crontab) do extended = length(String.split(crontab)) > 5 Crontab.CronExpression.Parser.parse(crontab, extended) end end
lib/sched_ex.ex
0.870501
0.768516
sched_ex.ex
starcoder
defmodule SampleProjects.Language.SentenceComplete do @moduledoc false def run do {training_data, words} = gen_training_data blank_vector = NeuralNet.get_blank_vector(words) IO.puts "Generating neural network." net = GRU.new(%{input_ids: words, output_ids: words}) IO.puts "Beginning training." NeuralNet.train(net, training_data, 1.5, 2, fn info -> IO.puts "#{info.error}, iteration ##{info.iterations}" {input, _} = Enum.random(training_data) # {_, acc} = NeuralNet.eval(info.net, input) #Get its expected word given a whole sentence. {_, acc} = Enum.reduce 1..10, {hd(input), [%{}]}, fn _, {word, acc} -> #Generates with feedback {vec, acc} = NeuralNet.eval(info.net, [word], acc) {Map.put(blank_vector, NeuralNet.get_max_component(vec), 1), acc} end vectors = [hd(input) | Enum.map(Enum.slice(acc, 1..(length(acc) - 1)), fn time_frame -> time_frame.output.values end)] words = Enum.map(vectors, fn vec -> Atom.to_string(NeuralNet.get_max_component(vec)) end) IO.puts Enum.join(words, " ") info.error < 0.0001 end, 0.2) end def gen_training_data do {sentences, words} = SampleProjects.Language.Parse.parse("lib/sample_projects/language/common_sense_small.txt") IO.puts "Sample data contains #{length(sentences)} sentences, and #{MapSet.size(words)} words." sentences = Enum.map sentences, fn sentence -> Enum.map(sentence, fn word -> String.to_atom(word) end) end words = Enum.map Enum.to_list(words), &String.to_atom/1 blank_vector = NeuralNet.get_blank_vector(words) training_data = Enum.map sentences, fn sentence -> sentence = Enum.map(sentence, fn word -> Map.put(blank_vector, word, 1) end) last = length(sentence) - 1 {Enum.slice(sentence, 0..(last - 1)), Enum.slice(sentence, 1..last)} end {training_data, words} end end
lib/sample_projects/language/sentence_complete.ex
0.545286
0.504639
sentence_complete.ex
starcoder
defmodule Axon.Shared do @moduledoc false # Collection of private helper functions and # macros for enforcing shape/type constraints, # doing shape calculations, and even some # helper numerical definitions. import Nx.Defn @doc """ Asserts `lhs` has same shape as `rhs`. """ defn assert_shape!(caller, lhs_name, lhs, rhs_name, rhs) do transform( {lhs, rhs}, fn {lhs, rhs} -> lhs = Nx.shape(lhs) rhs = Nx.shape(rhs) unless Elixir.Kernel.==(lhs, rhs) do raise ArgumentError, "#{caller}: expected input shapes #{lhs_name} and #{rhs_name}" <> " to be equal, got #{inspect(lhs)} != #{inspect(rhs)}" end end ) end @doc """ Asserts all shapes are equal. """ defn assert_shape!(caller, shape_names, shapes) do transform(shapes, fn [shape | shapes] -> equal? = Enum.all?(shapes, fn cur_shape -> Elixir.Kernel.==(Nx.shape(cur_shape), Nx.shape(shape)) end) unless equal? do raise ArgumentError, "#{caller}: expected all input shapes #{inspect(shape_names)}" <> " to be equal, got #{inspect(shapes)}" end end) end @doc """ Asserts `inp` has explicit rank `rank`. """ defn assert_rank!(caller, inp_name, inp, rank) do transform( {inp, rank}, fn {x, y} -> x = Nx.rank(x) unless Elixir.Kernel.==(x, y) do raise ArgumentError, "#{caller}: expected #{inp_name} to have rank equal to #{y}," <> " got #{x} != #{y}" end end ) end @doc """ Asserts `lhs` has same rank as `rhs`. """ defn assert_equal_rank!(caller, lhs_name, lhs, rhs_name, rhs) do transform( {lhs, rhs}, fn {x, y} -> x = if is_integer(x), do: x, else: Nx.rank(x) y = if is_integer(y), do: y, else: Nx.rank(y) unless Elixir.Kernel.>=(x, y) do raise ArgumentError, "#{caller}: expected #{lhs_name} and #{rhs_name} ranks to be equal" <> " got #{x} != #{y}" end end ) end @doc """ Asserts all ranks are equal. """ defn assert_equal_rank!(caller, rank_names, ranks) do transform(ranks, fn [rank | ranks] -> equal? = Enum.all?(ranks, fn cur_rank -> Elixir.Kernel.==(Nx.rank(cur_rank), Nx.rank(rank)) end) unless equal? do raise ArgumentError, "#{caller}: expected all input ranks #{inspect(rank_names)}" <> " to be equal, got #{inspect(ranks)}" end end) end @doc """ Asserts `lhs` has at least rank `rhs`. """ defn assert_min_rank!(caller, name, lhs, rhs) do transform( {lhs, rhs}, fn {x, y} -> x = if is_integer(x), do: x, else: Nx.rank(x) y = if is_integer(y), do: y, else: Nx.rank(y) unless Elixir.Kernel.>=(x, y) do raise ArgumentError, "#{caller}: expected #{name} shape to have at least rank #{y}, got rank #{x}" end end ) end @doc """ Transforms the given Elixir value into a scalar predicate. """ defn to_predicate(term) do transform(term, fn term -> if term, do: 1, else: 0 end) end @doc """ Creates a zeros-like structure which matches the structure of the input. """ defn zeros_like(params) do transform( params, &deep_new(&1, fn x -> Axon.Initializers.zeros(shape: Nx.shape(x)) end) ) end @doc """ Creates a fulls-like tuple of inputs. """ defn fulls_like(params, value) do transform( params, &deep_new(&1, fn x -> Axon.Initializers.full(value, shape: Nx.shape(x)) end) ) end @doc """ Deep merges two possibly nested maps, applying fun to leaf values. """ def deep_merge(left, right, fun) do case Nx.Container.traverse(left, leaves(right), &recur_merge(&1, &2, fun)) do {merged, []} -> merged {_merged, _leftover} -> raise ArgumentError, "unable to merge arguments with incompatible" <> " structure" end end defp leaves(container) do container |> Nx.Container.reduce([], fn x, acc -> [x | acc] end) |> Enum.reverse() end defp recur_merge(left, [right | right_leaves], fun) do case {left, right} do {%Nx.Tensor{} = left, %Nx.Tensor{} = right} -> {fun.(left, right), right_leaves} {left, right} -> {deep_merge(left, right, fun), right_leaves} end end @doc """ Creates a new map-like structure from a possible nested map, applying `fun` to each leaf. """ def deep_new(map, fun) do {cont, :ok} = Nx.Container.traverse(map, :ok, &recur_traverse(&1, &2, fun)) cont end defp recur_traverse(item, :ok, fun) do case item do %Nx.Tensor{} = t -> {fun.(t), :ok} container -> {deep_new(container, fun), :ok} end end @doc """ Deep reduces a map with an accumulator. """ def deep_reduce(map, acc, fun) do Nx.Container.reduce(map, acc, &recur_deep_reduce(&1, &2, fun)) end defp recur_deep_reduce(value, acc, fun) do case value do %Axon{} = val -> fun.(val, acc) %Nx.Tensor{} = val -> fun.(val, acc) val -> deep_reduce(val, acc, fun) end end @doc """ Deep map-reduce a nested container with an accumulator. """ def deep_map_reduce(container, acc, fun) do Nx.Container.traverse(container, acc, &recur_deep_map_reduce(&1, &2, fun)) end defp recur_deep_map_reduce(leaf, acc, fun) do case leaf do %Axon{} = leaf -> fun.(leaf, acc) %Nx.Tensor{} = leaf -> fun.(leaf, acc) container -> deep_map_reduce(container, acc, fun) end end ## Numerical Helpers # TODO: These should be contained somewhere else, like another library defn logsumexp(x, opts \\ []) do opts = keyword!(opts, axes: [], keep_axes: false) x |> Nx.exp() |> Nx.sum(opts) |> Nx.log() end defn xlogy(x, y) do x_ok = Nx.not_equal(x, 0.0) safe_x = Nx.select(x_ok, x, Nx.tensor(1, type: Nx.type(x))) safe_y = Nx.select(x_ok, y, Nx.tensor(1, type: Nx.type(y))) Nx.select(x_ok, safe_x * Nx.log(safe_y), Nx.tensor(0, type: Nx.type(x))) end defn reciprocal(x), do: Nx.divide(1, x) defn normalize(input, mean, variance, gamma, bias, opts \\ []) do opts = keyword!(opts, epsilon: 1.0e-6) scale = variance |> Nx.add(opts[:epsilon]) |> Nx.rsqrt() |> Nx.multiply(gamma) input |> Nx.subtract(mean) |> Nx.multiply(scale) |> Nx.add(bias) end defn mean_and_variance(input, opts \\ []) do opts = keyword!(opts, [:axes]) mean = Nx.mean(input, axes: opts[:axes], keep_axes: true) mean_of_squares = Nx.mean(input * input, axes: opts[:axes], keep_axes: true) {mean, mean_of_squares - mean * mean} end end
lib/axon/shared.ex
0.821939
0.589716
shared.ex
starcoder
defmodule QRCode.GaloisField do @moduledoc """ Galios Field GF(256) functions. """ @type value() :: 1..255 @type alpha() :: 0..254 # {index, aplha} @gf_table [ {1, 0}, {2, 1}, {3, 25}, {4, 2}, {5, 50}, {6, 26}, {7, 198}, {8, 3}, {9, 223}, {10, 51}, {11, 238}, {12, 27}, {13, 104}, {14, 199}, {15, 75}, {16, 4}, {17, 100}, {18, 224}, {19, 14}, {20, 52}, {21, 141}, {22, 239}, {23, 129}, {24, 28}, {25, 193}, {26, 105}, {27, 248}, {28, 200}, {29, 8}, {30, 76}, {31, 113}, {32, 5}, {33, 138}, {34, 101}, {35, 47}, {36, 225}, {37, 36}, {38, 15}, {39, 33}, {40, 53}, {41, 147}, {42, 142}, {43, 218}, {44, 240}, {45, 18}, {46, 130}, {47, 69}, {48, 29}, {49, 181}, {50, 194}, {51, 125}, {52, 106}, {53, 39}, {54, 249}, {55, 185}, {56, 201}, {57, 154}, {58, 9}, {59, 120}, {60, 77}, {61, 228}, {62, 114}, {63, 166}, {64, 6}, {65, 191}, {66, 139}, {67, 98}, {68, 102}, {69, 221}, {70, 48}, {71, 253}, {72, 226}, {73, 152}, {74, 37}, {75, 179}, {76, 16}, {77, 145}, {78, 34}, {79, 136}, {80, 54}, {81, 208}, {82, 148}, {83, 206}, {84, 143}, {85, 150}, {86, 219}, {87, 189}, {88, 241}, {89, 210}, {90, 19}, {91, 92}, {92, 131}, {93, 56}, {94, 70}, {95, 64}, {96, 30}, {97, 66}, {98, 182}, {99, 163}, {100, 195}, {101, 72}, {102, 126}, {103, 110}, {104, 107}, {105, 58}, {106, 40}, {107, 84}, {108, 250}, {109, 133}, {110, 186}, {111, 61}, {112, 202}, {113, 94}, {114, 155}, {115, 159}, {116, 10}, {117, 21}, {118, 121}, {119, 43}, {120, 78}, {121, 212}, {122, 229}, {123, 172}, {124, 115}, {125, 243}, {126, 167}, {127, 87}, {128, 7}, {129, 112}, {130, 192}, {131, 247}, {132, 140}, {133, 128}, {134, 99}, {135, 13}, {136, 103}, {137, 74}, {138, 222}, {139, 237}, {140, 49}, {141, 197}, {142, 254}, {143, 24}, {144, 227}, {145, 165}, {146, 153}, {147, 119}, {148, 38}, {149, 184}, {150, 180}, {151, 124}, {152, 17}, {153, 68}, {154, 146}, {155, 217}, {156, 35}, {157, 32}, {158, 137}, {159, 46}, {160, 55}, {161, 63}, {162, 209}, {163, 91}, {164, 149}, {165, 188}, {166, 207}, {167, 205}, {168, 144}, {169, 135}, {170, 151}, {171, 178}, {172, 220}, {173, 252}, {174, 190}, {175, 97}, {176, 242}, {177, 86}, {178, 211}, {179, 171}, {180, 20}, {181, 42}, {182, 93}, {183, 158}, {184, 132}, {185, 60}, {186, 57}, {187, 83}, {188, 71}, {189, 109}, {190, 65}, {191, 162}, {192, 31}, {193, 45}, {194, 67}, {195, 216}, {196, 183}, {197, 123}, {198, 164}, {199, 118}, {200, 196}, {201, 23}, {202, 73}, {203, 236}, {204, 127}, {205, 12}, {206, 111}, {207, 246}, {208, 108}, {209, 161}, {210, 59}, {211, 82}, {212, 41}, {213, 157}, {214, 85}, {215, 170}, {216, 251}, {217, 96}, {218, 134}, {219, 177}, {220, 187}, {221, 204}, {222, 62}, {223, 90}, {224, 203}, {225, 89}, {226, 95}, {227, 176}, {228, 156}, {229, 169}, {230, 160}, {231, 81}, {232, 11}, {233, 245}, {234, 22}, {235, 235}, {236, 122}, {237, 117}, {238, 44}, {239, 215}, {240, 79}, {241, 174}, {242, 213}, {243, 233}, {244, 230}, {245, 231}, {246, 173}, {247, 232}, {248, 116}, {249, 214}, {250, 244}, {251, 234}, {252, 168}, {253, 80}, {254, 88}, {255, 175} ] @doc """ Given alpha exponent returns integer. Example: iex> QRCode.GaloisField.to_i(1) 2 """ @spec to_i(alpha()) :: value() def to_i(alpha) when alpha in 0..254 do @gf_table |> Enum.find(fn {_i, a} -> alpha == a end) |> first() end @doc """ Given integer returns alpha exponent. Example: iex> QRCode.GaloisField.to_a(2) 1 """ @spec to_a(value()) :: alpha() def to_a(integer) when integer in 1..255 do @gf_table |> Enum.find(fn {i, _a} -> integer == i end) |> second() end @doc """ Add two alpha values in GF(256). ## Example iex> QRCode.GaloisField.add(10, 251) 6 iex> QRCode.GaloisField.add(10, 13) 23 """ @spec add(alpha(), alpha()) :: alpha() def add(a1, a2) do rem(a1 + a2, 255) end defp first({val, _}) do val end defp second({_, val}) do val end end
lib/qr_code/galois_field.ex
0.678007
0.446434
galois_field.ex
starcoder
defmodule PriorityQueue do @moduledoc """ This module defines a priority queue datastructure, intended for use with graphs, as it prioritizes lower priority values over higher priority values (ideal for priorities based on edge weights, etc.). This implementation makes use of `:gb_trees` under the covers. It is also very fast, even for a very large number of distinct priorities. Other priority queue implementations I've looked at are either slow when working with large numbers of priorities, or restrict themselves to a specific number of allowed priorities, which is why I've ended up writing my own. """ defstruct priorities: nil @type t :: %__MODULE__{ priorities: :gb_trees.tree(integer, :queue.queue(term)) } @doc """ Create a new priority queue """ @spec new() :: t def new do %__MODULE__{priorities: :gb_trees.empty()} end @doc """ Push a new element into the queue with the given priority. Priorities must be integer or float values. ## Example iex> pq = PriorityQueue.new ...> pq = PriorityQueue.push(pq, :foo, 1) ...> {result, _} = PriorityQueue.pop(pq) ...> result {:value, :foo} iex> pq = PriorityQueue.new ...> pq = PriorityQueue.push(pq, :foo, 1) ...> {{:value, :foo}, pq} = PriorityQueue.pop(pq) ...> pq = PriorityQueue.push(pq, :bar, 1) ...> {result, _} = PriorityQueue.pop(pq) ...> result {:value, :bar} """ @spec push(t, term, integer | float) :: t def push(%__MODULE__{priorities: tree} = pq, term, priority) do if :gb_trees.size(tree) > 0 do case :gb_trees.lookup(priority, tree) do :none -> q = :queue.in(term, :queue.new()) %__MODULE__{pq | priorities: :gb_trees.insert(priority, q, tree)} {:value, q} -> q = :queue.in(term, q) %__MODULE__{pq | priorities: :gb_trees.update(priority, q, tree)} end else q = :queue.in(term, :queue.new()) %__MODULE__{pq | priorities: :gb_trees.insert(priority, q, tree)} end end @doc """ This function returns the value at the top of the queue. If the queue is empty, `:empty` is returned, otherwise `{:value, term}`. This function does not modify the queue. ## Example iex> pq = PriorityQueue.new |> PriorityQueue.push(:foo, 1) ...> {:value, :foo} = PriorityQueue.peek(pq) ...> {{:value, val}, _} = PriorityQueue.pop(pq) ...> val :foo """ @spec peek(t) :: :empty | {:value, term} def peek(%__MODULE__{} = pq) do case pop(pq) do {:empty, _} -> :empty {{:value, _} = val, _} -> val end end @doc """ Pops an element from the queue with the lowest integer value priority. Returns `{:empty, PriorityQueue.t}` if there are no elements left to dequeue. Returns `{{:value, term}, PriorityQueue.t}` if the dequeue is successful This is equivalent to the `extract-min` operation described in priority queue theory. ## Example iex> pq = PriorityQueue.new ...> pq = Enum.reduce(Enum.shuffle(0..4), pq, fn i, pq -> PriorityQueue.push(pq, ?a+i, i) end) ...> {{:value, ?a}, pq} = PriorityQueue.pop(pq) ...> {{:value, ?b}, pq} = PriorityQueue.pop(pq) ...> {{:value, ?c}, pq} = PriorityQueue.pop(pq) ...> {{:value, ?d}, pq} = PriorityQueue.pop(pq) ...> {{:value, ?e}, pq} = PriorityQueue.pop(pq) ...> {result, _} = PriorityQueue.pop(pq) ...> result :empty """ @spec pop(t) :: {:empty, t} | {{:value, term}, t} def pop(%__MODULE__{priorities: tree} = pq) do if :gb_trees.size(tree) > 0 do {min_pri, q, tree2} = :gb_trees.take_smallest(tree) case :queue.out(q) do {:empty, _} -> pop(%__MODULE__{pq | priorities: tree2}) {{:value, _} = val, q2} -> {val, %__MODULE__{pq | priorities: :gb_trees.update(min_pri, q2, tree)}} end else {:empty, pq} end end defimpl Inspect do def inspect(%PriorityQueue{priorities: tree}, opts) do if :gb_trees.size(tree) > 0 do items = tree |> :gb_trees.to_list() |> Enum.flat_map(fn {_priority, q} -> :queue.to_list(q) end) count = Enum.count(items) doc = Inspect.Algebra.to_doc(items, opts) Inspect.Algebra.concat(["#PriorityQueue<size: #{count}, queue: ", doc, ">"]) else "#PriorityQueue<size: 0, queue: []>" end end end end
lib/priority_queue.ex
0.935832
0.544741
priority_queue.ex
starcoder
defmodule Fixtures.Time do @moduledoc ~S""" Time and date related generators. """ use Fixtures.Helper @doc ~S""" Generate a random date of birth up to a 100 years old. ## Options - `:age` the following value formats are accepted: - `from..to` a date of birth for someone with an age between from and to. (inclusive) - `integer` a date of birth for someone with the given age. - `:from` - `:to` """ @spec date_of_birth(Keyword.t()) :: Date.t() def date_of_birth(opts \\ []) do {from, to} = cond do range?(opts[:age]) -> t = Date.utc_today() f..a = opts[:age] {Date.add(%{t | year: t.year - (f + 1)}, 1), %{t | year: t.year - a}} age = opts[:age] -> t = Date.utc_today() {Date.add(%{t | year: t.year - (age + 1)}, 1), %{t | year: t.year - age}} opts[:from] && opts[:to] -> {opts[:from], opts[:to]} f = opts[:from] -> {f, Date.utc_today()} t = opts[:to] -> {%{t | year: t.year - 1}, t} :last_hundred_year -> t = Date.utc_today() {%{t | year: t.year - 100}, t} end diff = Date.diff(to, from) Date.add(from, Enum.random(0..diff)) end @spec range?(any) :: boolean defp range?(_.._), do: true defp range?(_), do: false @doc ~S""" Generate a random timestamp. """ @spec timestamp(Keyword.t()) :: UTCDateTime.t() | DateTime.t() | NaiveDateTime.t() def timestamp(opts \\ []) do format = opts[:format] || UTCDateTime precision = opts[:precision] || :second min = Keyword.get_lazy(opts, :after, fn -> "1970-01-01T00:00:00.000000Z" |> format.from_iso8601() |> elem(1) end) addition = cond do before = opts[:before] -> format.diff(before, min, precision) duration = opts[:duration] -> duration :now -> format.diff(format.utc_now, min, precision) end min_addition = opts[:min_duration] || 0 min |> format.add(Enum.random(min_addition..addition), precision) |> format.truncate(precision) end end
lib/fixtures/time.ex
0.841305
0.781372
time.ex
starcoder
defmodule FlowAssertions.Messages do @moduledoc false def not_ok, do: "Value is not `:ok` or an `:ok` tuple" def not_ok_tuple, do: "Value is not an `:ok` tuple" def not_error, do: "Value is not an `:error` or `:error` tuple" def not_error_tuple, do: "Value is not an `:error` tuple" def not_error_3tuple(error_subtype), do: "Value is not of the form `{:error, #{inspect error_subtype}, <content>}`" def bad_error_3tuple_subtype(actual, expected), do: "The error subtype is `#{inspect actual}`, not the expected `#{inspect expected}`" def failed_predicate(predicate), do: "Predicate #{inspect predicate} failed" def no_regex_match, do: "Regular expression didn't match" # Note that the lack of `inspect` is deliberate. def failed_checker(name), do: "Checker `#{name}` failed" def stock_equality, do: "Assertion with == failed" def no_match, do: "The value doesn't match the given pattern" def no_field_match(field), do: "The value for field `#{inspect field}` doesn't match the given pattern" def not_no_value(key, no_value), do: "Expected key `#{inspect key}` to be `#{inspect no_value}`." def not_value(key), do: "Expected key `#{inspect key}` to have a value." def expected_1_element, do: "Expected a single element" def expected_no_element, do: "Expected an empty Enum" def expected_1_element_field(key), do: "Expected field `#{inspect key}` to be a single element Enum" def required_key_missing(key, struct) do struct_name = struct.__struct__ "Test error: there is no key `#{inspect key}` in a `#{inspect struct_name}`" end def wrong_struct_name(actual_name, expected_name), do: "Expected a `#{inspect expected_name}` but got a `#{inspect actual_name}`" def map_not_struct(expected_name), do: "Expected a `#{inspect expected_name}` but got a plain Map" def very_wrong_struct(expected_name), do: "Expected a `#{inspect expected_name}`" def wrong_field_value(key), do: "Field `#{inspect key}` has the wrong value" def field_missing(field), do: "Field `#{inspect field}` is missing" def field_wrongly_present(field), do: "Field `#{inspect field}` should not be present." def not_enumerable, do: "Expected an `Enumerable`" def not_enumerable(which), do: "The #{which} value is not an Enumerable" def different_length_collections, do: "The two collections have different lengths" def different_elements_collections, do: "The two collections have different elements" end
lib/messages.ex
0.84916
0.661168
messages.ex
starcoder
defmodule ExPlasma do @moduledoc """ Documentation for ExPlasma. """ alias ExPlasma.Transaction # constants that identify payment types, make sure that # when we introduce a new payment type, you name it `paymentV2` # https://github.com/omisego/plasma-contracts/blob/6ab35256b805e25cfc30d85f95f0616415220b20/plasma_framework/docs/design/tx-types-dependencies.md @payment_v1 <<1>> @fee <<3>> @type payment :: <<_::8>> @doc """ Simple payment type V1 """ @spec payment_v1() :: payment() def payment_v1(), do: @payment_v1 @doc """ Transaction fee claim V1 """ @spec fee() :: payment() def fee(), do: @fee @spec transaction_types :: [<<_::8>>, ...] def transaction_types(), do: [payment_v1(), fee()] @doc """ Produces a RLP encoded transaction bytes for the given transaction data. ## Example iex> txn = ...> %ExPlasma.Transaction{ ...> inputs: [ ...> %ExPlasma.Output{ ...> output_data: nil, ...> output_id: %{blknum: 0, oindex: 0, position: 0, txindex: 0}, ...> output_type: nil ...> } ...> ], ...> metadata: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>, ...> outputs: [ ...> %ExPlasma.Output{ ...> output_data: %{ ...> amount: 1, ...> output_guard: <<29, 246, 47, 41, 27, 46, 150, 159, 176, 132, 157, 153, ...> 217, 206, 65, 226, 241, 55, 0, 110>>, ...> token: <<46, 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206, ...> 65, 226, 241, 55, 0, 110>> ...> }, ...> output_id: nil, ...> output_type: 1 ...> } ...> ], ...> sigs: [], ...> tx_data: <<0>>, ...> tx_type: 1 ...> } iex> ExPlasma.Transaction.encode(txn) <<248, 104, 1, 225, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 238, 237, 1, 235, 148, 29, 246, 47, 41, 27, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, 0, 110, 148, 46, 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, 0, 110, 1, 128, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>> """ @spec encode(Transaction.t()) :: binary() def encode(%ExPlasma.Transaction{} = txn), do: Transaction.encode(txn) @doc """ Decode the given RLP list into a Transaction. ## Example iex> rlp = <<248, 74, 192, 1, 193, 128, 239, 174, 237, 1, 235, 148, 29, 246, 47, 41, 27, ...> 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, 0, 110, 148, 46, ...> 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, ...> 0, 110, 1, 128, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...> 0>> iex> ExPlasma.decode(rlp) %ExPlasma.Transaction{ inputs: [ %ExPlasma.Output{ output_data: nil, output_id: %{blknum: 0, oindex: 0, position: 0, txindex: 0}, output_type: nil } ], metadata: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>, outputs: [ %ExPlasma.Output{ output_data: %{ amount: 1, output_guard: <<29, 246, 47, 41, 27, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, 0, 110>>, token: <<46, 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, 0, 110>> }, output_id: nil, output_type: 1 } ], sigs: [], tx_data: 0, tx_type: 1 } """ @spec decode(binary()) :: Transaction.t() def decode(tx_bytes), do: Transaction.decode(tx_bytes) @doc """ Keccak hash the Transaction. This is used in the contracts and events to to reference transactions. ## Example iex> rlp = <<248, 74, 192, 1, 193, 128, 239, 174, 237, 1, 235, 148, 29, 246, 47, 41, 27, ...> 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, 0, 110, 148, 46, ...> 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, ...> 0, 110, 1, 128, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...> 0>> iex> ExPlasma.hash(rlp) <<87, 132, 239, 36, 144, 239, 129, 88, 63, 88, 116, 147, 164, 200, 113, 191, 124, 14, 55, 131, 119, 96, 112, 13, 28, 178, 251, 49, 16, 127, 58, 96>> """ @spec hash(Transaction.t() | binary()) :: <<_::256>> def hash(txn), do: Transaction.hash(txn) end
lib/ex_plasma.ex
0.841679
0.422922
ex_plasma.ex
starcoder
defmodule Ewebmachine.Builder.Resources do @moduledoc ~S""" `use` this module will `use Plug.Builder` (so a plug pipeline described with the `plug module_or_function_plug` macro), but gives you a `:resource_match` local function plug which matches routes declared with the `resource/2` macro and execute the plug defined by its body. See `Ewebmachine.Builder.Handlers` documentation to see how to contruct these modules (in the `after` block) Below a full example : ``` defmodule FullApi do use Ewebmachine.Builder.Resources if Mix.env == :dev, do: plug Ewebmachine.Plug.Debug # pre plug, for instance you can put plugs defining common handlers plug :resource_match plug Ewebmachine.Plug.Run # customize ewebmachine result, for instance make an error page handler plug plug Ewebmachine.Plug.Send # plug after that will be executed only if no ewebmachine resources has matched resource "/hello/:name" do %{name: name} after plug SomeAdditionnalPlug content_types_provided do: ['application/xml': :to_xml] defh to_xml, do: "<Person><name>#{state.name}</name>" end resource "/*path" do %{path: Enum.join(path,"/")} after resource_exists do: File.regular?(path state.path) content_types_provided do: [{state.path|>Plug.MIME.path|>default_plain,:to_content}] defh to_content, do: File.stream!(path(state.path),[],300_000_000) defp path(relative), do: "#{:code.priv_dir :ewebmachine_example}/web/#{relative}" defp default_plain("application/octet-stream"), do: "text/plain" defp default_plain(type), do: type end end ``` ## Common Plugs macro helper As the most common use case is to match resources, run the webmachine automate, then set a 404 if no resource match, then handle error code, then send the response, the `resources_plugs/1` macro allows you to do that. For example, if you want to convert all HTTP errors as Exceptions, and consider that all path must be handled and so any non matching path should return a 404 : resources_plugs error_as_exception: true, nomatch_404: true is equivalent to plug :resource_match plug Ewebmachine.Plug.Run plug :wm_notset_404 plug Ewebmachine.Plug.ErrorAsException plug Ewebmachine.Plug.Send defp wm_notset_404(%{state: :unset}=conn,_), do: resp(conn,404,"") defp wm_notset_404(conn,_), do: conn Another example, following plugs must handle non matching paths and errors should be converted into `GET /error/:status` that must be handled by following plugs : resources_plugs error_forwarding: "/error/:status" is equivalent to plug :resource_match plug Ewebmachine.Plug.Run plug Ewebmachine.Plug.ErrorAsForward, forward_pattern: "/error/:status" plug Ewebmachine.Plug.Send """ defmacro __using__(opts) do quote location: :keep do @before_compile Ewebmachine.Builder.Resources use Plug.Router import Plug.Router, only: [] import Ewebmachine.Builder.Resources if unquote(opts[:default_plugs]) do plug :resource_match plug Ewebmachine.Plug.Run plug Ewebmachine.Plug.Send end defp resource_match(conn, _opts) do conn |> match(nil) |> dispatch(nil) end end end defmacro __before_compile__(_env) do wm_routes = Module.get_attribute __CALLER__.module, :wm_routes route_matches = for {route,wm_module,init_block}<-Enum.reverse(wm_routes) do quote do Plug.Router.match unquote(route) do init = unquote(init_block) var!(conn) = put_private(var!(conn),:machine_init,init) unquote(wm_module).call(var!(conn),[]) end end end final_match = if !match?({"/*"<>_,_,_},hd(wm_routes)), do: quote(do: Plug.Router.match _ do var!(conn) end) quote do unquote_splicing(route_matches) unquote(final_match) end end defp remove_first(":"<>e), do: e defp remove_first("*"<>e), do: e defp remove_first(e), do: e defp route_as_mod(route), do: (route |> String.split("/") |> Enum.map(& &1 |> remove_first |> String.capitalize) |> Enum.join) @doc ~S""" Create a webmachine handler plug and use it on `:resource_match` when path matches - the route will be the matching spec (see Plug.Router.match, string spec only) - do_block will be called on match (so matching bindings will be available) and should return the initial state - after_block will be the webmachine handler plug module body (wrapped with `use Ewebmachine.Builder.Handlers` and `plug :add_handlers` to clean the declaration. ``` resource "/my/route/:commaid" do id = string.split(commaid,",") %{foo: id} after plug someadditionnalplug resource_exists do: state.id == ["hello"] end resource ShortenedRouteName, "/my/route/that/would/generate/a/long/module/name/:commaid" do id = String.split(commaid,",") %{foo: id} after plug SomeAdditionnalPlug resource_exists do: state.id == ["hello"] end ``` """ defmacro resource({:__aliases__, _, route_aliases},route,do: init_block, after: body) do resource_quote(Module.concat([__CALLER__.module|route_aliases]),route,init_block,body,__CALLER__.module) end defmacro resource(route,do: init_block, after: body) do resource_quote(Module.concat(__CALLER__.module,"EWM"<>route_as_mod(route)),route,init_block,body,__CALLER__.module) end def resource_quote(wm_module,route,init_block,body,caller_module) do old_wm_routes = Module.get_attribute(caller_module, :wm_routes) || [] Module.put_attribute caller_module, :wm_routes, [{route,wm_module,init_block}|old_wm_routes] quote do defmodule unquote(wm_module) do use Ewebmachine.Builder.Handlers unquote(body) plug :add_handlers end end end alias Ewebmachine.Plug.ErrorAsException alias Ewebmachine.Plug.ErrorAsForward defmacro resources_plugs(opts \\ []) do {errorplug,errorplug_params} = cond do opts[:error_as_exception]->{ErrorAsException,[]} (forward_pattern=opts[:error_forwarding])->{ErrorAsForward,[forward_pattern: forward_pattern]} true -> {false,[]} end quote do plug :resource_match plug Ewebmachine.Plug.Run if unquote(opts[:nomatch_404]), do: plug :wm_notset_404 if unquote(errorplug), do: plug(unquote(errorplug),unquote(errorplug_params)) plug Ewebmachine.Plug.Send defp wm_notset_404(%{state: :unset}=conn,_), do: resp(conn,404,"") defp wm_notset_404(conn,_), do: conn end end end
lib/ewebmachine/builder.resources.ex
0.891946
0.663042
builder.resources.ex
starcoder
defmodule State.Trip.Added do @moduledoc """ State for added trips. They aren't matched to GTFS trip IDs, so we maintain them separately, based on the predictions we see. """ use State.Server, indices: [:id, :route_id], recordable: Model.Trip, hibernate: false alias Model.{Prediction, Trip} @impl GenServer def init(state) do subscribe({:new_state, State.Prediction}) super(state) end @impl Events.Server def handle_event(_, _, _, state) do handle_new_state(&build_state/0) {:noreply, state} end @spec build_state :: Enumerable.t() defp build_state do [%{trip_match?: false}] |> State.Prediction.select() |> predictions_to_trips() end def predictions_to_trips(predictions) do predictions |> Stream.reject(&(is_nil(&1.trip_id) or is_nil(&1.stop_id))) |> Enum.reduce(%{}, &last_stop_prediction/2) |> Stream.flat_map(&prediction_to_trip/1) end @spec last_stop_prediction(Prediction.t(), acc) :: acc when acc: %{optional(Trip.id()) => Prediction.t()} defp last_stop_prediction(prediction, acc) do # remember the last prediction for the given trip Map.update(acc, prediction.trip_id, prediction, fn old -> if old.stop_sequence > prediction.stop_sequence do old else prediction end end) end @spec prediction_to_trip({Trip.id(), Prediction.t()}) :: [Trip.t()] defp prediction_to_trip({trip_id, prediction}) do with %{route_pattern_id: route_pattern_id} when is_binary(route_pattern_id) <- prediction, %{representative_trip_id: rep_trip_id} <- State.RoutePattern.by_id(route_pattern_id), [trip | _] <- State.Trip.by_id(rep_trip_id) do [ %{ trip | id: trip_id, block_id: nil, service_id: nil, wheelchair_accessible: 1, bikes_allowed: 0 } ] else _ -> prediction_to_trip_via_shape(prediction) end end defp prediction_to_trip_via_shape(prediction) do stop = case State.Stop.by_id(prediction.stop_id) do %{parent_station: nil} = stop -> stop %{parent_station: id} -> State.Stop.by_id(id) _other -> nil end last_stop_id = [prediction.route_id] |> State.Shape.select_routes(prediction.direction_id) |> Stream.filter(&(&1.route_id == prediction.route_id)) |> Enum.find_value(&last_stop_id_on_shape(&1, prediction, stop)) stop = if is_nil(last_stop_id) or last_stop_id == stop.id do stop else State.Stop.by_id(last_stop_id) end if stop == nil do [] else route = State.Route.by_id(prediction.route_id) [ %Trip{ id: prediction.trip_id, route_id: prediction.route_id, route_pattern_id: prediction.route_pattern_id, direction_id: prediction.direction_id, route_type: if(route, do: route.type), wheelchair_accessible: 1, headsign: stop.name, name: "", bikes_allowed: 0 } ] end end defp last_stop_id_on_shape(_, _, nil), do: nil defp last_stop_id_on_shape(%{priority: p} = shape, prediction, stop) when p >= 0 do shape_stops = State.StopsOnRoute.by_route_id( prediction.route_id, direction_id: prediction.direction_id, shape_ids: [shape.id] ) if Enum.any?(shape_stops, &(&1 in [stop.id, stop.parent_station])) do List.last(shape_stops) end end defp last_stop_id_on_shape(_, _, _) do nil end end
apps/state/lib/state/trip/added.ex
0.756582
0.487246
added.ex
starcoder
defmodule ISO8583 do @moduledoc ~S""" ISO 8583 messaging library for Elixir. This library has utilities validate, encode and decode message between systems using ISO 8583 regadless of the language the other system is written in. ```elixir message = %{ "0": "0800", "11": "646465", "12": "160244", "13": "0818", "7": "0818160244","70": "001"} {:ok, encoded} = ISO8583.encode(message) # {:ok, <<0, 49, 48, 56, 48, 48, 130, 56, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 48, 49, 56, 49, 54, ...>>} {:ok, decoded} = ISO8583.decode(encoded) # {:ok, %{ "0": "0800", "11": "646465", "12": "160244", "13": "0818", "7": "0818160244","70": "001"}} ``` ## Installation ```elixir def deps do [ {:iso_8583, "~> 0.1.2"} ] end ``` ## Customization and configuration All exposed API functions take options with the following configurable options. ### TCP Length Indicator This is used to specify whether or not to include the 2 byte hexadecimal encoded byte length of the whole message whe encoding or to consider it when decoding. This value is set to true by default. Example: ```elixir ISO8583.encode(some_message, tcp_len_header: false) ``` ### Bitmap encoding Primary and SecondaryBitmap encoding bitmap for fields 0-127 is configurable like below. Examples: ```elixir ISO8583.encode(some_message, bitmap_encoding: :ascii) # will result in 32 byte length bitmap ``` ```elixir ISO8583.encode(some_message) # will default to :hex result in 16 byte length bitmap encoded hexadecimal ``` ### Custom formats Custom formats for data type, data length and length type for all fields including special bitmaps like for 127.1 and 127.25.1 are configurable through custom formats. The default formats will be replaced by the custom one. To see the default formats [check here](https://github.com/zemuldo/iso_8583_elixir/blob/master/lib/iso_8583/formats/formats.ex#L104) Example: Here we override field 2 to have maximum of 30 characters. ```elixir custome_format = %{ "2": %{ content_type: "n", label: "Primary account number (PAN)", len_type: "llvar", max_len: 30, min_len: 1 } } message = some_message |> Map.put(:"2", "444466668888888888888888") ISO8583.encode(message, formats: custome_format) ``` ### Custom Static Metadata There is an option to configure static metadata to an iso message. Static metadata are info in like text format encoded at special locations in the message usually at the beginning of the message and agreed upon by the sender and receiver. This library considers the static metadata just after the MTI. In the example below BITCOIN-INTERCHANGE is encoded while encoding and extracted when decoding the message. ```elixir {:ok, encoded} = message |> ISO8583.encode(static_meta: "BITCOIN-INTERCHANGE") {:ok, decoded} = encoded |> ISO8583.decode(static_meta: "BITCOIN-INTERCHANGE") ``` """ import ISO8583.Encode alias ISO8583.DataTypes import ISO8583.Decode alias ISO8583.Formats alias ISO8583.Message.ResponseStatus alias ISO8583.Utils @doc """ Function to encode json or Elixir map into ISO 8583 encoded binary. Use this to encode all fields that are supported. See the formats module for details. ## Examples iex> message = %{ iex> "0": "0800", iex> "7": "0818160244", iex> "11": "646465", iex> "12": "160244", iex> "13": "0818", iex> "70": "001" iex> } %{ "0": "0800", "11": "646465", "12": "160244", "13": "0818", "7": "0818160244", "70": "001" } iex>ISO8583.encode(message) {:ok, <<0, 49, 48, 56, 48, 48, 130, 56, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 48, 56, 49, 56, 49, 54, 48, 50, 52, 52, 54, 52, 54, 52, 54, 53, 49, 54, 48, 50, 52, 52, 48, 56, 49, 56, 48, 48, 49>>} """ @spec encode(message :: map(), opts :: Keyword.t()) :: {:ok, binary()} | {:error, String.t()} def encode(message, opts \\ []) def encode(message, opts) do opts = opts |> default_opts() message |> Utils.atomify_map() |> encode_0_127(opts) end @doc """ Function to encode field 127 extensions. ## Examples iex>message = %{ iex>"127.1": "0000008000000000", iex>"127.25": "7E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959" iex>} %{ "127.1": "0000008000000000", "127.25": "7E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959" } iex>ISO8583.encode_127(message) {:ok, %{ "127": "000000800000000001927E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959", "127.1": "0000008000000000", "127.25": "7E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959" }} """ @spec encode_127(message :: map(), opts :: Keyword.t()) :: {:ok, binary()} | {:error, String.t()} def encode_127(message, opts \\ []) do opts = opts |> default_opts() message |> encoding_extensions(:"127", opts) end @doc """ Function to encode field 127.25 extensions. ## Examples iex>message = %{ iex>"127.25.1": "7E1E5F7C00000000", iex>"127.25.12": "61F379D43D5AEEBC", iex>"127.25.13": "80", iex>"127.25.14": "00000000000000001E0302031F00", iex>"127.25.15": "020300", iex>"127.25.18": "06010A03A09000", iex>"127.25.2": "000000005000", iex>"127.25.20": "008C", iex>"127.25.21": "E0D0C8", iex>"127.25.22": "404", iex>"127.25.23": "21", iex>"127.25.24": "0280048800", iex>"127.25.26": "404", iex>"127.25.27": "170911", iex>"127.25.28": "00000147", iex>"127.25.29": "60", iex>"127.25.3": "000000000000", iex>"127.25.30": "BAC24959", iex>"127.25.4": "A0000000031010", iex>"127.25.5": "5C00", iex>"127.25.6": "0128", iex>"127.25.7": "FF00" iex>} %{ "127.25.1": "7E1E5F7C00000000", "127.25.2": "000000005000", "127.25.3": "000000000000", "127.25.4": "A0000000031010", "127.25.5": "5C00", "127.25.6": "0128", "127.25.7": "FF00", "127.25.12": "61F379D43D5AEEBC", "127.25.13": "80", "127.25.14": "00000000000000001E0302031F00", "127.25.15": "020300", "127.25.18": "06010A03A09000", "127.25.20": "008C", "127.25.21": "E0D0C8", "127.25.22": "404", "127.25.23": "21", "127.25.24": "0280048800", "127.25.26": "404", "127.25.27": "170911", "127.25.28": "00000147", "127.25.29": "60", "127.25.30": "BAC24959" } iex>ISO8583.encode_127_25(message) {:ok, %{ "127.25": "01927E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959", "127.25.1": "7E1E5F7C00000000", "127.25.12": "61F379D43D5AEEBC", "127.25.13": "80", "127.25.14": "00000000000000001E0302031F00", "127.25.15": "020300", "127.25.18": "06010A03A09000", "127.25.2": "000000005000", "127.25.20": "008C", "127.25.21": "E0D0C8", "127.25.22": "404", "127.25.23": "21", "127.25.24": "0280048800", "127.25.26": "404", "127.25.27": "170911", "127.25.28": "00000147", "127.25.29": "60", "127.25.3": "000000000000", "127.25.30": "BAC24959", "127.25.4": "A0000000031010", "127.25.5": "5C00", "127.25.6": "0128", "127.25.7": "FF00" }} """ @spec encode_127_25(message :: map(), opts :: Keyword.t()) :: {:ok, binary()} | {:error, String.t()} def encode_127_25(message, opts \\ []) do opts = opts |> default_opts() message |> encoding_extensions(:"127.25", opts) end @doc """ Function to decode an ISO8583 binary using custimizable rules as describe in customization section. See the formats module for details. ## Examples iex> message = <<0, 49, 48, 56, 48, 48, 130, 56, 0, 0, 0, 0, iex> 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 48, 56, 49, 56, iex> 49, 54, 48, 50, 52, 52, 54, 52, 54, 52, 54, 53, iex> 49, 54, 48, 50, 52, 52, 48, 56, 49, 56, 48, 48, iex> 49>> iex>ISO8583.decode(message) {:ok, %{ "0": "0800", "11": "646465", "12": "160244", "13": "0818", "7": "0818160244", "70": "001" }} """ @spec decode(message :: binary(), opts :: Keyword.t()) :: {:ok, map()} | {:error, String.t()} def decode(message, opts \\ []) do opts = opts |> default_opts() message |> decode_0_127(opts) end @doc """ Function to expand field 127 to its sub fields. ## Examples iex>message = %{ iex>"127": "000000800000000001927E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959" iex>} %{ "127": "000000800000000001927E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959" } iex>ISO8583.decode_127(message) {:ok, %{ "127": "000000800000000001927E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959", "127.25": "7E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959" }} """ @spec decode_127(message :: binary(), opts :: Keyword.t()) :: {:ok, map()} | {:error, String.t()} def decode_127(message, opts \\ []) def decode_127(message, opts) when is_binary(message) do opts = opts |> default_opts() message |> expand_field("127.", opts) end def decode_127(message, opts) do opts = opts |> default_opts() message |> expand_field("127.", opts) end @doc """ Function to expand field 127.25 to its sub fields ## Examples iex>message = %{ iex>"127": "000000800000000001927E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959", iex>"127.25": "7E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959" iex>} %{ "127": "000000800000000001927E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959", "127.25": "7E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959" } iex>ISO8583.decode_127_25(message) {:ok, %{ "127": "000000800000000001927E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959", "127.25": "7E1E5F7C0000000000000000500000000000000014A00000000310105C000128FF0061F379D43D5AEEBC8002800000000000000001E0302031F000203001406010A03A09000008CE0D0C840421028004880040417091180000014760BAC24959", "127.25.12": "61F379D43D5AEEBC", "127.25.13": "80", "127.25.14": "00000000000000001E0302031F00", "127.25.15": "020300", "127.25.18": "06010A03A09000", "127.25.2": "000000005000", "127.25.20": "008C", "127.25.21": "E0D0C8", "127.25.22": "404", "127.25.23": "21", "127.25.24": "0280048800", "127.25.26": "404", "127.25.27": "170911", "127.25.28": "00000147", "127.25.29": "60", "127.25.3": "000000000000", "127.25.30": "BAC24959", "127.25.4": "A0000000031010", "127.25.5": "5C00", "127.25.6": "0128", "127.25.7": "FF00" }} """ @spec decode_127_25(message :: binary(), opts :: Keyword.t()) :: {:ok, map()} | {:error, String.t()} def decode_127_25(message, opts \\ []) do opts = opts |> default_opts() message |> expand_field("127.25.", opts) end @doc """ Function check if json message is valid. ## Examples iex> message = %{ iex> "0": "0800", iex> "7": "0818160244", iex> "11": "646465", iex> "12": "160244", iex> "13": "0818", iex> "70": "001" iex> } %{ "0": "0800", "11": "646465", "12": "160244", "13": "0818", "7": "0818160244", "70": "001" } iex>ISO8583.valid?(message) true iex> message = <<0, 49, 48, 56, 48, 48, 130, 56, 0, 0, 0, 0, iex> 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 48, 56, 49, 56, iex> 49, 54, 48, 50, 52, 52, 54, 52, 54, 52, 54, 53, iex> 49, 54, 48, 50, 52, 52, 48, 56, 49, 56, 48, 48, iex> 49>> iex>ISO8583.valid?(message) true """ @spec valid?(message :: binary() | map(), opts :: Keyword.t()) :: true | false def valid?(message, opts \\ []) def valid?(message, opts) when is_map(message) do opts = opts |> default_opts() with atomified <- Utils.atomify_map(message), {:ok, _} <- DataTypes.valid?(atomified, opts) do true else _ -> false end end def valid?(message, opts) when is_binary(message) do opts = opts |> default_opts() case decode(message, opts) do {:ok, _} -> true {:error, _} -> false end end @doc """ Function check if json message is valid. ## Examples iex> message = %{ iex> "0": "0800", iex> "7": "0818160244", iex> "11": "646465", iex> "12": "160244", iex> "13": "0818", iex> "70": "001" iex> } %{ "0": "0800", "11": "646465", "12": "160244", "13": "0818", "7": "0818160244", "70": "001" } iex>ISO8583.valid(message) {:ok, message} iex> message = <<0, 49, 48, 56, 48, 48, 130, 56, 0, 0, 0, 0, iex> 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 48, 56, 49, 56, iex> 49, 54, 48, 50, 52, 52, 54, 52, 54, 52, 54, 53, iex> 49, 54, 48, 50, 52, 52, 48, 56, 49, 56, 48, 48, iex> 49>> iex>ISO8583.valid(message) {:ok, %{ "0": "0800", "11": "646465", "12": "160244", "13": "0818", "7": "0818160244", "70": "001" }} """ @spec valid(message :: map() | binary(), opts :: Keyword.t()) :: {:ok, map()} | {:error, String.t()} def valid(message, opts \\ []) def valid(message, opts) when is_map(message) do opts = opts |> default_opts() message |> Utils.atomify_map() |> DataTypes.valid?(opts) end def valid(message, opts) when is_binary(message) do opts = opts |> default_opts() message |> decode(opts) end defp default_opts([]) do [bitmap_encoding: :hex, tcp_len_header: true, formats: Formats.formats_definitions()] end defp default_opts(opts) do default_opts([]) |> Keyword.merge(opts) |> configure_formats() end defp configure_formats(opts) do case opts[:formats] |> is_map() do false -> opts |> Keyword.put(:formats, Formats.formats_definitions()) true -> formats_with_customs = Formats.formats_definitions() |> Map.merge(opts[:formats] |> Utils.atomify_map()) opts |> Keyword.merge(formats: formats_with_customs) end end @doc """ Fucntion to get the message status. ## Examples iex> ISO8583.status(%{"0": "0110", "39": "00"}) {:ok, "Approved or completed successfully"} iex> ISO8583.status(%{"0": "0110", "39": "01"}) {:error, "Refer to card issuer"} iex> ISO8583.status(%{"0": "0110", "39": "000"}) {:error, "Unknown statuscode"} """ @spec status(message: map()) :: {:ok, String.t()} | {:error, String.t()} def status(message) when is_map(message) do message |> ResponseStatus.ok?() end def status(_), do: {:error, "Message has to be a map with field 39"} end
lib/iso_8583.ex
0.912826
0.736187
iso_8583.ex
starcoder
defmodule Games.Pong do require OK use GenServer, restart: :transient defmodule State do use TypedStruct typedstruct enforce: true do field :id, String.t() field :running, boolean(), default: false field :winner, :red | :blue | none(), default: nil field :fader, Fader.t(), default: Fader.new(8) field :left_player, pid() | none(), default: nil field :right_player, pid() | none(), default: nil field :left_paddle_pos, non_neg_integer(), default: 0 field :right_paddle_pos, non_neg_integer(), default: 0 field :left_keypress_state, %{up: boolean(), down: boolean()}, default: %{up: false, down: false} field :right_keypress_state, %{up: boolean(), down: boolean()}, default: %{up: false, down: false} field :ball_pos, {float(), float()}, default: Screen.centre_pos() field :ball_vel, {float(), float()}, default: {0.6, 0} end end @paddle_size 20 @paddle_move_amount 3 @x_vel_mult_on_hit 1.01 @max_ball_x_vel 10 @max_ball_y_vel 4 @initial_x_vel 0.6 @initial_y_vel_range -5..5 @tick_ms Integer.floor_div(1000, 30) def start_link(options) do state = %State{ id: Keyword.fetch!(options, :game_id), ball_vel: {@initial_x_vel, 0.1 * Enum.random(@initial_y_vel_range)} } GenServer.start_link(__MODULE__, state, options) end @impl true def init(state) do {:ok, state} end @impl true def handle_cast({:handle_input, _player, _input}, %State{running: false} = state) do {:noreply, state} end @impl true def handle_cast({:handle_input, player, {pressed_state, key}}, state) do state = OK.try do player_state_key <- cond do player == state.left_player -> {:ok, :left_keypress_state} player == state.right_player -> {:ok, :right_keypress_state} true -> {:error, :not_a_player} end key_name <- case key do "ArrowUp" -> {:ok, :up} "ArrowDown" -> {:ok, :down} _ -> {:error, :unknown_key} end after put_in(state, [Access.key!(player_state_key), key_name], pressed_state) rescue _ -> state end {:noreply, state} end @impl true def handle_cast(:terminate, state) do state = %State{state | running: false} {:noreply, state} end @impl true def handle_call({:add_player, player}, _from, state) do {:ok, state} = case {state.left_player, state.right_player} do {nil, _} -> {:ok, %State{state | left_player: player}} {_, nil} -> {:ok, %State{state | right_player: player}} _ -> {:error, :players_already_full} end {:reply, :ok, state} end @impl true def handle_call( {:remove_player, player}, _from, %State{left_player: lp, right_player: rp} = state ) when player in [lp, rp] do state = cond do state.left_player == player -> %State{state | left_player: nil} state.right_player == player -> %State{state | right_player: nil} true -> state end if state.running or Enum.all?([state.left_player, state.right_player], &is_nil/1) do Coordinator.terminate_game(state.id) end {:reply, :ok, state} end @impl true def handle_call({:remove_player, _player}, _from, state) do {:reply, :ok, state} end @impl true def handle_call(:get_status, _from, state) do player_count = Enum.count([state.left_player, state.right_player], &(not is_nil(&1))) {:reply, %GameStatus{ id: state.id, name: "Pong", players: player_count, max_players: 2, ready: player_count == 2 }, state} end @impl true def handle_call(:start_if_ready, _from, state) do cond do state.running -> {:reply, :running, state} Enum.any?([state.left_player, state.right_player], &is_nil/1) -> {:reply, :not_ready, state} true -> {:ok, _timer} = :timer.send_interval(@tick_ms, :tick) {:reply, :started, %State{state | running: true}} end end @impl true def handle_info(:tick, state) do if state.running do {:noreply, tick(state)} else fade_tick(state) end end @impl true def terminate(_reason, state) do Coordinator.notify_game_terminated(state.id) end defp tick(state) do {dx, dy} = state.ball_vel state = state |> update_in([Access.key!(:ball_pos)], fn {x, y} -> {x + dx, y + dy} end) |> update_in([Access.key!(:fader)], &Fader.step/1) |> handle_bounces() |> handle_paddle_move() render(state) if state.winner do Phoenix.PubSub.broadcast!( InfolabLightGames.PubSub, "coordinator:status", {:game_win, state.id, state.winner} ) end state = if state.running do state else put_in(state.fader.direction, :dec) end state end defp fade_tick(state) do if Fader.done(state.fader) do {:stop, :normal, state} else state = %State{state | fader: Fader.step(state.fader)} render(state) {:noreply, state} end end defp within_paddle(paddle_pos, ball_pos) do half_paddle_size = @paddle_size / 2 ball_pos < paddle_pos + half_paddle_size and ball_pos > paddle_pos - half_paddle_size end defp handle_bounces(%State{ball_pos: {x, y}, ball_vel: {dx, dy}} = state) do {screen_x, screen_y} = Screen.dims() state = if (y < 0 and dy < 0) or (y > screen_y and dy > 0) do update_in(state.ball_vel, fn {dx, dy} -> {dx, -dy} end) else state end state = case {x < 4 and dx < 0, within_paddle(state.left_paddle_pos, y), x > screen_x - 3 and dx > 0, within_paddle(state.right_paddle_pos, y)} do {true, true, _, _} -> left_movement = paddle_vel(state.left_keypress_state, state.left_paddle_pos) update_in(state.ball_vel, fn {dx, dy} -> {clamp(-dx * @x_vel_mult_on_hit, -@max_ball_x_vel, @max_ball_x_vel), clamp(dy + left_movement * 0.5, -@max_ball_y_vel, @max_ball_y_vel)} end) {true, false, _, _} -> %State{state | running: false, winner: :red} {_, _, true, true} -> right_movement = paddle_vel(state.right_keypress_state, state.right_paddle_pos) update_in(state.ball_vel, fn {dx, dy} -> {clamp(-dx * @x_vel_mult_on_hit, -@max_ball_x_vel, @max_ball_x_vel), clamp(dy + right_movement * 0.5, -@max_ball_y_vel, @max_ball_y_vel)} end) {_, _, true, false} -> %State{state | running: false, winner: :blue} _ -> state end state end defp paddle_vel(keypress_state, paddle_pos) do {_screen_x, screen_y} = Screen.dims() case {keypress_state, paddle_pos < 0, paddle_pos > screen_y} do {%{up: true, down: false}, false, _} -> -@paddle_move_amount {%{up: false, down: true}, _, false} -> @paddle_move_amount _ -> 0 end end defp handle_paddle_move(state) do left_movement = paddle_vel(state.left_keypress_state, state.left_paddle_pos) right_movement = paddle_vel(state.right_keypress_state, state.right_paddle_pos) state |> update_in([Access.key!(:left_paddle_pos)], fn p -> p + left_movement end) |> update_in([Access.key!(:right_paddle_pos)], fn p -> p + right_movement end) end defp clamp(val, mn, mx) do val |> min(mx) |> max(mn) |> floor() end defp clamp_xy({x, y}) do {screen_x, screen_y} = Screen.dims() x = clamp(x, 0, screen_x) y = clamp(y, 0, screen_y) {x, y} end defp draw_ball(screen, %State{ball_pos: {x, y}} = state) do pix = Fader.apply(Pixel.white(), state.fader) NativeMatrix.draw_rect_at(screen, clamp_xy({x - 1, y - 1}), clamp_xy({x + 1, y + 1}), pix) end defp draw_paddles(screen, %State{left_paddle_pos: lp, right_paddle_pos: rp} = state) do {screen_x, _screen_y} = Screen.dims() half_paddle_size = @paddle_size / 2 screen |> NativeMatrix.draw_rect_at( clamp_xy({0, lp - half_paddle_size}), clamp_xy({2, lp + half_paddle_size}), Fader.apply(Pixel.blue(), state.fader) ) |> NativeMatrix.draw_rect_at( clamp_xy({screen_x - 2, rp - half_paddle_size}), clamp_xy({screen_x, rp + half_paddle_size}), Fader.apply(Pixel.red(), state.fader) ) end defp render(state) do frame = Screen.blank() |> draw_paddles(state) |> draw_ball(state) Screen.update_frame(frame) end end
web/lib/infolab_light_games/games/pong.ex
0.702734
0.415966
pong.ex
starcoder
defmodule Network.Sender do @doc """ Quality-Of-Service aware network sender. The idea is to wrap around a real socket and send on multiple "partitions". Small partitions with fewer data in the queue are always preferred over the large partitions """ use GenServer alias Network.Sender defstruct [:partitions, :waiting] def new(socket) do {:ok, pid} = GenServer.start_link(__MODULE__, [socket], hibernate_after: 5_000) pid end def stop(q) do GenServer.stop(q, :normal) end def push_async(q, partition, data) do GenServer.call(q, {:push_async, partition, data}) end def push(q, partition, data) do GenServer.call(q, {:push, partition, data}, :infinity) end def pop(q) do GenServer.call(q, :pop) end def await(q) do GenServer.call(q, :await, :infinity) end @impl true def handle_call( {:push_async, partition, data}, _from, state = %Sender{partitions: partitions, waiting: nil} ) do ps = Map.update(partitions, partition, {[data], [nil]}, fn {q, rest} -> {q ++ [data], rest ++ [nil]} end) {:reply, :ok, %Sender{state | partitions: ps}} end @impl true def handle_call( {:push_async, _partition, data}, _from, state = %Sender{waiting: from} ) do GenServer.reply(from, data) {:reply, :ok, %Sender{state | waiting: nil}} end @impl true def handle_call( {:push, partition, data}, from, state = %Sender{partitions: partitions, waiting: nil} ) do ps = Map.update(partitions, partition, {[data], [from]}, fn {q, rest} -> {q ++ [data], rest ++ [from]} end) {:noreply, %Sender{state | partitions: ps}} end @impl true def handle_call( {:push, _partition, data}, _from, state = %Sender{waiting: from} ) do GenServer.reply(from, data) {:reply, :ok, %Sender{state | waiting: nil}} end @impl true def handle_call(:pop, _from, state = %Sender{partitions: partitions}) do min = Enum.min( partitions, fn {_ka, {data_a, _from_a}}, {_kb, {data_b, _from_b}} -> :erts_debug.flat_size(data_a) < :erts_debug.flat_size(data_b) end, fn -> nil end ) case min do nil -> {:reply, nil, state} {partition, {[data | q], [from | rest]}} -> ps = if q == [] do Map.delete(partitions, partition) else Map.put(partitions, partition, {q, rest}) end state = %Sender{state | partitions: ps} if from != nil, do: GenServer.reply(from, :ok) {:reply, data, state} end end @impl true def handle_call(:await, from, state) do do_await(from, state) end # Coalescing data frames into 64kb at least when available defp do_await(data \\ "", from, state = %Sender{partitions: partitions, waiting: nil}) do if map_size(partitions) > 0 and byte_size(data) < 64_000 do {:reply, new_data, state} = handle_call(:pop, from, state) do_await(data <> new_data, from, state) else if byte_size(data) > 0 do {:reply, data, state} else {:noreply, %Sender{state | waiting: from}} end end end @impl true def init([socket]) do q = self() spawn_link(fn -> relayer_loop(q, socket) end) {:ok, %Sender{partitions: %{}, waiting: nil}} end defp relayer_loop(q, socket) do case :ssl.send(socket, await(q)) do :ok -> relayer_loop(q, socket) other -> Process.exit(self(), other) end end end
lib/network/sender.ex
0.640074
0.512571
sender.ex
starcoder
defmodule MdnsLite.Options do @moduledoc """ MdnsLite options MdnsLite is usually configured in a project's application environment (`config.exs`) as follows: ```elixir config :mdns_lite, hosts: [:hostname, "nerves"], ttl: 120, instance_name: "mDNS Lite Device", services: [ %{ id: :web_server, protocol: "http", transport: "tcp", port: 80, txt_payload: ["key=value"] }, %{ id: :ssh_daemon, instance_name: "More particular mDNS Lite Device" protocol: "ssh", transport: "tcp", port: 22 } ] ``` The configurable keys are: * `:hosts` - A list of hostnames to respond to. Normally this would be set to `:hostname` and `mdns_lite` will advertise the actual hostname with `.local` appended. * `:ttl` - The default mDNS record time-to-live. The default of 120 seconds is probably fine for most use. See [RFC 6762 - Multicast DNS](https://tools.ietf.org/html/rfc6762) for considerations. * `instance_name` - A user friendly name that will be used as the name for this device's advertised service(s). Per RFC6763 Appendix C, this should describe the user-facing purpose or description of the device, and should not be considered a unique identifier. For example, 'Nerves Device' and 'MatCo Laser Printer Model CRM-114' are good choices here. If instance_name is not defined it defaults to the first entry in the `hosts` list * `:excluded_ifnames` - A list of network interfaces names to ignore. By default, `mdns_lite` will ignore loopback and cellular network interfaces. * `:ipv4_only` - Set to `true` to only respond on IPv4 interfaces. Since IPv6 isn't fully supported yet, this is the default. Note that it's still possible to get AAAA records when using IPv4. * `:if_monitor` - Set to `MdnsLite.VintageNetMonitor` when using Nerves or `MdnsLite.InetMonitor` elsewhere. The default is `MdnsLite.VintageNetMonitor`. * `:dns_bridge_enabled` - Set to `true` to start a DNS server running that will bridge DNS to mDNS. * `:dns_bridge_ip` - The IP address for the DNS server. Defaults to 127.0.0.53. * `:dns_bridge_port` - The UDP port for the DNS server. Defaults to 53. * `:dns_bridge_recursive` - If a regular DNS request comes on the DNS bridge, forward it to a DNS server rather than returning an error. This is the default since there's an issue on Linux and Nerves that prevents Erlang's DNS resolver from checking the next one. * `:services` - A list of services to advertise. See `MdnsLite.service` for details. Some options are modifiable at runtime. Functions for modifying these are in the `MdnsLite` module. """ require Logger @default_host_name_list [:hostname] @default_ttl 120 @default_dns_ip {127, 0, 0, 53} @default_dns_port 53 @default_monitor MdnsLite.VintageNetMonitor @default_excluded_ifnames ["lo0", "lo", "ppp0", "wwan0"] @default_ipv4_only true defstruct services: MapSet.new(), dot_local_names: [], hosts: [], ttl: @default_ttl, instance_name: :unspecified, dns_bridge_enabled: false, dns_bridge_ip: @default_dns_ip, dns_bridge_port: @default_dns_port, dns_bridge_recursive: true, if_monitor: @default_monitor, excluded_ifnames: @default_excluded_ifnames, ipv4_only: @default_ipv4_only @typedoc false @type t :: %__MODULE__{ services: MapSet.t(map()), dot_local_names: [String.t()], hosts: [String.t()], ttl: pos_integer(), instance_name: MdnsLite.instance_name(), dns_bridge_enabled: boolean(), dns_bridge_ip: :inet.ip_address(), dns_bridge_port: 1..65535, dns_bridge_recursive: boolean(), if_monitor: module(), excluded_ifnames: [String.t()], ipv4_only: boolean() } @doc false @spec new(Enumerable.t()) :: t() def new(enumerable \\ %{}) do opts = Map.new(enumerable) hosts = get_host_option(opts) ttl = Map.get(opts, :ttl, @default_ttl) instance_name = Map.get(opts, :instance_name, :unspecified) config_services = Map.get(opts, :services, []) |> filter_invalid_services() dns_bridge_enabled = Map.get(opts, :dns_bridge_enabled, false) dns_bridge_ip = Map.get(opts, :dns_bridge_ip, @default_dns_ip) dns_bridge_port = Map.get(opts, :dns_bridge_port, @default_dns_port) dns_bridge_recursive = Map.get(opts, :dns_bridge_recursive, true) if_monitor = Map.get(opts, :if_monitor, @default_monitor) ipv4_only = Map.get(opts, :ipv4_only, @default_ipv4_only) excluded_ifnames = Map.get(opts, :excluded_ifnames, @default_excluded_ifnames) %__MODULE__{ ttl: ttl, instance_name: instance_name, dns_bridge_enabled: dns_bridge_enabled, dns_bridge_ip: dns_bridge_ip, dns_bridge_port: dns_bridge_port, dns_bridge_recursive: dns_bridge_recursive, if_monitor: if_monitor, excluded_ifnames: excluded_ifnames, ipv4_only: ipv4_only } |> add_hosts(hosts) |> add_services(config_services) end # This used to be called :host, but now it's :hosts. It's a list, but be # nice and wrap rather than crash. defp get_host_option(%{host: host}) do Logger.warn("mdns_lite: the :host app environment option is deprecated. Change to :hosts") List.wrap(host) end defp get_host_option(%{hosts: hosts}), do: List.wrap(hosts) defp get_host_option(_), do: @default_host_name_list @doc false @spec set_instance_name(t(), MdnsLite.instance_name()) :: t() def set_instance_name(options, instance_name) do %{options | instance_name: instance_name} end @doc false @spec add_service(t(), MdnsLite.service()) :: t() def add_service(options, service) do {:ok, normalized_service} = normalize_service(service) %{options | services: MapSet.put(options.services, normalized_service)} end @doc false @spec add_services(t(), [MdnsLite.service()]) :: t() def add_services(%__MODULE__{} = options, services) do Enum.reduce(services, options, fn service, options -> add_service(options, service) end) end @doc false @spec filter_invalid_services([MdnsLite.service()]) :: [MdnsLite.service()] def filter_invalid_services(services) do Enum.flat_map(services, fn service -> case normalize_service(service) do {:ok, normalized_service} -> [normalized_service] {:error, reason} -> Logger.warn("mdns_lite: ignoring service (#{inspect(service)}): #{reason}") [] end end) end @doc """ Normalize a service description All service descriptions are normalized before use. Call this function if you're unsure how the service description will be transformed for use. """ @spec normalize_service(MdnsLite.service()) :: {:ok, MdnsLite.service()} | {:error, String.t()} def normalize_service(service) do with {:ok, id} <- normalize_id(service), {:ok, instance_name} <- normalize_instance_name(service), {:ok, port} <- normalize_port(service), {:ok, type} <- normalize_type(service) do {:ok, %{ id: id, instance_name: instance_name, port: port, type: type, txt_payload: Map.get(service, :txt_payload, []), priority: Map.get(service, :priority, 0), weight: Map.get(service, :weight, 0) }} end end defp normalize_id(%{id: id}), do: {:ok, id} defp normalize_id(%{name: name}) do Logger.warn("mdns_lite: names are deprecated now. Specify an :id that's an atom") {:ok, name} end defp normalize_id(_), do: {:ok, :unspecified} defp normalize_instance_name(%{instance_name: instance_name}), do: {:ok, instance_name} defp normalize_instance_name(_), do: {:ok, :unspecified} defp normalize_type(%{type: type}) when is_binary(type) and byte_size(type) > 0 do {:ok, type} end defp normalize_type(%{protocol: protocol, transport: transport} = service) when is_binary(protocol) and is_binary(transport) do {:ok, "_#{service.protocol}._#{service.transport}"} end defp normalize_type(_other) do {:error, "Specify either 1. :protocol and :transport or 2. :type"} end defp normalize_port(%{port: port}) when port >= 0 and port <= 65535, do: {:ok, port} defp normalize_port(_), do: {:error, "Specify a port"} @doc false @spec get_services(t()) :: [MdnsLite.service()] def get_services(%__MODULE__{} = options) do MapSet.to_list(options.services) end @doc false @spec remove_service_by_id(t(), MdnsLite.service_id()) :: t() def remove_service_by_id(%__MODULE__{} = options, service_id) do services_set = options.services |> Enum.reject(&(&1.id == service_id)) |> MapSet.new() %{options | services: services_set} end @doc false @spec set_hosts(t(), [String.t() | :hostname]) :: t() def set_hosts(%__MODULE__{} = options, hosts) do %{options | dot_local_names: [], hosts: []} |> add_hosts(hosts) end @doc false @spec add_host(t(), String.t() | :hostname) :: t() def add_host(%__MODULE__{} = options, host) do resolved_host = resolve_mdns_name(host) dot_local_name = "#{resolved_host}.local" %{ options | dot_local_names: options.dot_local_names ++ [dot_local_name], hosts: options.hosts ++ [resolved_host] } end @doc false @spec add_hosts(t(), [String.t() | :hostname]) :: t() def add_hosts(%__MODULE__{} = options, hosts) do Enum.reduce(hosts, options, &add_host(&2, &1)) end defp resolve_mdns_name(:hostname) do {:ok, hostname} = :inet.gethostname() to_string(hostname) end defp resolve_mdns_name(mdns_name) when is_binary(mdns_name), do: mdns_name defp resolve_mdns_name(_other) do raise RuntimeError, "Host must be :hostname or a string" end end
lib/mdns_lite/options.ex
0.872361
0.690478
options.ex
starcoder
defmodule Coinbase.Pro.REST do @moduledoc """ This package implements a low-level REST API of the [Coinbase Pro](https://docs.pro.coinbase.com/). Low-level means it is just a wrapper over HTTP library which handles authentication, request signing and has a few nice helper functions but you still have to construct URLs and interpret responses on your own. If you want to use a high-level API, see [elixir-coinbase/coinbasepro](https://github.com/elixir-coinbase/coinbasepro). ## Installation If [available in Hex](https://hex.pm/docs/publish), the package can be installed by adding `coinbasepro_rest` to your list of dependencies in `mix.exs`: ```elixir def deps do [ {:coinbasepro_rest, "~> 1.0"} ] end ``` ## Additional dependencies As it uses [Tesla](https://github.com/teamon/tesla) underneath, you have to follow its installation instructions. Specifically, you have to install JSON library and you probably should install a HTTP client library as default HTTP client based on `httpc` does not validate SSL certificates. For example, add Jason and Hackney to the dependencies in `mix.exs`: ```elixir defp deps do [ {:hackney, "~> 1.16.0"}, {:jason, ">= 1.0.0"} ] end ``` Configure default adapter in `config/config.exs` (optional). ```elixir config :tesla, adapter: Tesla.Adapter.Hackney ``` See [Tesla](https://github.com/teamon/tesla)'s README for list of supported HTTP and JSON libraries. ## Configuration ### Base URL By default, the API sends requests to the production API. If you want to use Sandbox, you can add the following to the `config/config.exs`: ```elixir config :coinbasepro_rest, :base_url, "https://api-public.sandbox.pro.coinbase.com" ``` ### User Agent It is a good idea to override the default value of the User-Agent header added to the requests to something that clearly identifies your application name and version. If you want to do this, you can add the following to the `config/config.exs`: ```elixir config :coinbasepro_rest, :user_agent, "MyApp/1.0.0" ``` ## Usage ```elixir alias Coinbase.Pro.REST.{Context,Request} # Obtain these values from Coinbase context = %Context{key: "...", secret: "...", passphrase: "..."} {:ok, response} = Request.get(context, "/orders?limit=10") ``` In order to issue POST request: ```elixir alias Coinbase.Pro.REST.{Context,Request} # Obtain these values from Coinbase context = %Context{key: "...", secret: "...", passphrase: "..."} {:ok, response} = Request.post(context, "/deposits/payment-method", %{ "amount": 10.00, "currency": "USD", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" }) ``` ## Documentation The docs can be found at [https://hexdocs.pm/coinbasepro_rest](https://hexdocs.pm/coinbasepro_rest). ## License MIT ## Authors <NAME> """ end
lib/coinbasepro_rest.ex
0.828419
0.828731
coinbasepro_rest.ex
starcoder
defmodule AdventOfCode.Day8 do @spec add_vertex(:digraph.graph(), any) :: any def add_vertex(graph, vertex) do case :digraph.vertex(graph, vertex) do false -> :digraph.add_vertex(graph, vertex) {vertex, _} -> vertex end end @spec execute_op(:acc | :jmp | :nop, integer, integer, integer) :: {integer, integer} def execute_op(:acc, arg, instruction_ptr, acc) do acc = acc + arg instruction_ptr = instruction_ptr + 1 {instruction_ptr, acc} end def execute_op(:nop, _, instruction_ptr, acc) do {instruction_ptr + 1, acc} end def execute_op(:jmp, quantity, instruction_ptr, acc) do instruction_ptr = instruction_ptr + quantity {instruction_ptr, acc} end @spec execute(integer, integer, map, :digraph.graph()) :: {:single | :double, integer, integer, :digraph.graph()} def execute(instruction_ptr, acc, program, graph) when not is_map_key(program, instruction_ptr) do {:single, instruction_ptr, acc, graph} end def execute(instruction_ptr, acc, program, graph) do %{^instruction_ptr => {count, {opcode, arg}}} = program case count == 1 do true -> {:double, instruction_ptr, acc, graph} false -> count = count + 1 this_instruction = add_vertex(graph, instruction_ptr) program = Map.put(program, instruction_ptr, {count, {opcode, arg}}) {instruction_ptr, acc} = execute_op(opcode, arg, instruction_ptr, acc) next_instruction = add_vertex(graph, instruction_ptr) :digraph.add_edge(graph, this_instruction, next_instruction) execute(instruction_ptr, acc, program, graph) end end def fix_cycle(graph, program, [ptr | rest]) do case Map.get(program, ptr) do {_, {:nop, arg}} -> new_program = Map.put(program, ptr, {0, {:jmp, arg}}) case execute(0, 0, new_program, :digraph.new()) do {:single, _, acc, _} -> acc {:double, _, _, _} -> fix_cycle(graph, program, rest) end {_, {:jmp, arg}} -> new_program = Map.put(program, ptr, {0, {:nop, arg}}) case execute(0, 0, new_program, :digraph.new()) do {:single, _, acc, _} -> acc {:double, _, _, _} -> fix_cycle(graph, program, rest) end _ -> fix_cycle(graph, program, rest) end end def day8() do {_, program} = "day8_input" |> AdventOfCode.read_file() |> Enum.reduce({0, %{}}, fn x, {ptr, acc} -> [instr, count] = String.split(x) instr = String.to_atom(instr) {count, _} = Integer.parse(count) acc = Map.put(acc, ptr, {0, {instr, count}}) {ptr + 1, acc} end) {:double, ptr, part1, graph} = execute(0, 0, program, :digraph.new()) cycle = :digraph.get_cycle(graph, ptr) part2 = fix_cycle(graph, program, cycle) {part1, part2, ptr, graph} end end
lib/day8.ex
0.716516
0.468487
day8.ex
starcoder
defprotocol Orderable do @moduledoc """ Orderable is a simple Elixir package that allows you to make your custom data types orderable, so you can: - Compare them - Sort them ## How to write an implementation for my datatype? There is only a single function to implement: `Orderable.ordered/1`. This function should return a datatype that can be used with Erlang's built-in ordering to decide which is 'first'. In general, use: - Integers if you have a predefined, strict order. - Floats if you have some calculations that make it difficult or impractical to work with integers. - Strings/symbols will be ordered alphabetically (unicode-codepoint ordering). Tuples can be used to return an ordering whose second element only is considered if the first element is the same (and the third is only used if the second elements are the same, etc.) So if you have a struct `%Address{city: String.t, street: String.t, house_number: integer}` that you'd like to sort, you could implement it as follows: defimpl Orderable, for: Address do def ordered(address) do Orderable.ordered({address.city, address.street, address.house_number}) end end This will sort by city, and if they match by street, and if they match by house number. Note also that we call `Orderable.ordered` recursively on the tuple we are building. While this is only required if you know you might have custom values inside your data structur (for which Orderable might also be implemented), it is considered good style, so you do not forget it later on when your data model changes. For lists and tuples, Orderable.ordered will be called for each of the elements automatically. ## Full-scale Example An example of a custom data structure that you'd want to order: defmodule Rating do defstruct [:subject, :rating, :user] @ratings ~w{bad mediocre neutral good superb}a def new(subject, rating, user), do: %__MODULE__{subject: subject, rating: rating, user: user} @ratings_indexes @ratings |> Enum.with_index |> Enum.into(%{}) @doc false def ratings_indexes do @ratings_indexes end defimpl Orderable do def ordered(rating) do Orderable.ordered({rating.subject, Rating.ratings_indexes[rating.rating], rating.user}) end end end Now, you can use it as follows: iex> rating1 = Rating.new("Elixir", :superb, "Qqwy") iex> rating2 = Rating.new("Erlang", :good, "Qqwy") iex> rating3 = Rating.new("Peanut Butter", :bad, "Qqwy") iex> rating4 = Rating.new("Doing the Dishes", :neutral, "Qqwy") iex> rating5 = Rating.new("Elixir", :superb, "Nobbz") iex> rating6 = Rating.new("Elixir", :neutral, "Timmy the Cat") iex> unsorted = [rating1, rating2, rating3, rating4, rating5, rating6] iex> unsorted |> Enum.sort_by(&Orderable.ordered/1) [ %Rating{rating: :neutral, subject: "Doing the Dishes", user: "Qqwy"}, %Rating{rating: :neutral, subject: "Elixir", user: "Timmy the Cat"}, %Rating{rating: :superb, subject: "Elixir", user: "Nobbz"}, %Rating{rating: :superb, subject: "Elixir", user: "Qqwy"}, %Rating{rating: :good, subject: "Erlang", user: "Qqwy"}, %Rating{rating: :bad, subject: "Peanut Butter", user: "Qqwy"} ] ## default protocol implementations Default implementations exist for all datatypes that are common to be compared. For the following primitive values, calling `Orderable.ordered` is a no-op: - Integers - Floats - Binaries - Atoms For the following ordered collection types, Orderable.ordered will automatically be called for each element: - Lists - Tuples There are no default implementations for Maps, MapSets and other set-like things, because these datatypes are only partially ordered (when using the common subset-based way of ordering things.) There are also no default implementations for Functions, Pids, Ports and References, because those are not things we usually order in a _semantical_ way (They are part of the built-in Erlang ordering only so they can immediately be used as unique keys inside dictionary-like datatypes). """ @doc """ For every implementing type, it is supposed to return a data structure that can be used by Erlang's built-in ordering mechanism. """ @spec ordered(Orderable.t) :: any() def ordered(thing) end defimpl Orderable, for: [Atom, BitString, Integer, Float] do def ordered(already_ordered_primitive), do: already_ordered_primitive end defimpl Orderable, for: List do def ordered(things) do things |> Enum.map(&Orderable.ordered/1) end end defimpl Orderable, for: Tuple do def ordered(things_tuple) do things_tuple |> Tuple.to_list |> Enum.map(&Orderable.ordered/1) |> List.to_tuple end end
lib/orderable.ex
0.873417
0.71857
orderable.ex
starcoder
defmodule Estated.Error do @moduledoc """ Errors happen either because a problem occured with the key or the request. The level at which the result provides an error differs based on where the error occurs within the request. Error codes will be provided in an error response. """ @moduledoc since: "0.1.0" defstruct [ :code, :status_code, :title, :description, :metadata ] @typedoc "Error response." @typedoc since: "0.1.0" @type t :: %__MODULE__{ code: code() | nil, status_code: status_code() | nil, title: title() | nil, description: description() | nil, metadata: metadata() | nil } @typedoc " Code describing the type of error that occurred. Eg. [**APE01**](https://estated.com/developers/docs/v4/property/overview#errors) " @typedoc since: "0.1.0" @type code :: String.t() @typedoc """ The HTTP status code. Eg. **500** """ @typedoc since: "0.1.0" @type status_code :: pos_integer() @typedoc """ Title of the error. Eg. **Internal Server Error** """ @typedoc since: "0.1.0" @type title :: String.t() @typedoc """ Description of the error. Eg. **An exception occurred when attempting to process your request** """ @typedoc since: "0.1.0" @type description :: String.t() @typedoc "Metadata related to the error." @typedoc since: "0.1.0" @type metadata :: map() @doc false @doc since: "0.1.0" @spec cast(map()) :: t() def cast(%{} = error) do Enum.reduce(error, %__MODULE__{}, &cast_field/2) end @spec cast(nil) :: nil def cast(nil) do nil end defp cast_field({"code", code}, acc) do %__MODULE__{acc | code: code} end defp cast_field({"status_code", status_code}, acc) do %__MODULE__{acc | status_code: status_code} end defp cast_field({"title", title}, acc) do %__MODULE__{acc | title: title} end defp cast_field({"description", description}, acc) do %__MODULE__{acc | description: description} end defp cast_field({"metadata", metadata}, acc) do %__MODULE__{acc | metadata: metadata} end defp cast_field(_map_entry, acc) do acc end end
lib/estated/error.ex
0.85067
0.525308
error.ex
starcoder
defmodule Nug do @moduledoc """ Provides a macro for using Nug in tests """ defmacro __using__([]) do quote do import Nug end end defmacro __using__(opts) do quote do @upstream_url Keyword.fetch!(unquote(opts), :upstream_url) @client_builder Keyword.fetch!(unquote(opts), :client_builder) import Nug end end @doc ~S""" `with_proxy` is convenience macro that will handle setup and teardown of your proxy server. # Usage The `with_proxy` macro starts the proxy server and provides you with a variable in the block called `address` this is the server address that will be listening for requests made in the block ``` defmodule TestCase do use ExUnit.Case, async: true use Nug test "get response from API" do with_proxy("https://www.example.com", "test/fixtures/example.fixture") do # address is a variable that is created by the macro client = TestClient.new(address) {:ok, response} = Tesla.get(client, "/", query: [q: "hello"]) assert response.status == 200 end end end ``` """ defmacro with_proxy(upstream_url, recording_file, test_body) do quote do {:ok, pid} = Nug.HandlerSupervisor.start_child(%Nug.Handler{ upstream_url: unquote(upstream_url), recording_file: unquote(recording_file) }) var!(address) = Nug.RequestHandler.listen_address(pid) try do unquote(test_body) after Nug.RequestHandler.close(pid) end end end defmacro with_proxy(builder, upstream_url, recording_file, test_body) do test_file_name = "test/fixtures/#{recording_file}" quote do {:ok, pid} = Nug.HandlerSupervisor.start_child(%Nug.Handler{ upstream_url: unquote(upstream_url), recording_file: unquote(test_file_name) }) var!(client) = unquote(builder).(Nug.RequestHandler.listen_address(pid)) # Avoid unused variable warnings as the client is just a convenience _ = var!(client) try do unquote(test_body) after Nug.RequestHandler.close(pid) end end end defmacro with_proxy(recording_file, do: test_body) do test_file_name = "test/fixtures/#{recording_file}" upstream_url = Module.get_attribute(__CALLER__.module, :upstream_url) client_builder = Module.get_attribute(__CALLER__.module, :client_builder) quote do {:ok, pid} = Nug.HandlerSupervisor.start_child(%Nug.Handler{ upstream_url: unquote(upstream_url), recording_file: unquote(test_file_name) }) var!(client) = unquote(client_builder).(Nug.RequestHandler.listen_address(pid)) # Avoid unused variable warnings as the client is just a convenience _ = var!(client) try do unquote(test_body) after Nug.RequestHandler.close(pid) end end end end
lib/nug.ex
0.727104
0.624895
nug.ex
starcoder
defmodule Penelope.NLP.IntentClassifier do @moduledoc """ The intent classifier transforms a natural language utterance into a named intent and a set of named parameters. It uses an ML classifier to infer the intent name and an entity recognizer to extract named entities as parameters. These components are both represented as ML pipelines. The intent classifier also maintains a tokenizer pipeline for converting utterances into a list of tokens. This pipeline is executed first, and its results are run through the classifier/recognizer pipelines. Classification results are returned as a tuple of <intents, parameters>, where intents is the map of names to classification probabilities, and parameters is a name->value map. Intent names, parameter names and parameter values are all strings. Probabilities are all floats that should sum to 1. Example: pipeline = %{ tokenizer: [{:ptb_tokenizer, []}], classifier: [{:count_vectorizer, []}, {:linear_classifier, [c: 2.0, probability?: true]}], recognizer: [{:crf_tagger, []}], } x = [ "you have four pears", "three hundred apples would be a lot" ] y = [ {"intent_1", ["o", "o", "b_count", "b_fruit"]}, {"intent_2", ["b_count", "i_count", "b_fruit", "o", "o", "o", "o"]} ] classifier = Penelope.NLP.IntentClassifier.fit(%{}, x, y, pipeline) {intents, params} = Penelope.NLP.IntentClassifier.predict_intent( classifier, %{}, "I have three bananas" ) """ alias Penelope.ML.Pipeline @type model :: %{ tokenizer: [{atom, any}], detokenizer: [{atom, any}], classifier: [{atom, any}], recognizer: [{atom, any}] } @doc """ fits the tokenizer, classifier, and recognizer models """ @spec fit( context :: map, x :: [utterance :: String.t()], y :: [{intent :: String.t(), tags :: [String.t()]}], pipelines :: [ tokenizer: [{String.t() | atom, any}], classifier: [{String.t() | atom, any}], recognizer: [{String.t() | atom, any}] ] ) :: model def fit(context, x, y, pipelines) do pipelines = Map.new(pipelines) tokenizer = Pipeline.fit(context, x, y, pipelines.tokenizer) x_token = Pipeline.transform(tokenizer, context, x) {y_intent, y_entity} = Enum.unzip(y) classifier = Pipeline.fit(context, x_token, y_intent, pipelines.classifier) recognizer = Pipeline.fit(context, x_token, y_entity, pipelines.recognizer) %{ tokenizer: tokenizer, detokenizer: Enum.reverse(tokenizer), classifier: classifier, recognizer: recognizer } end @doc """ predicts an intent and its parameters from an utterance string """ @spec predict_intent( model :: model, context :: map, x :: String.t() ) :: {intents :: %{(intent :: String.t()) => probability :: float}, params :: %{(name :: String.t()) => value :: String.t()}} def predict_intent(model, context, x) do # tokenize the utterance [tokens] = Pipeline.transform(model.tokenizer, context, [x]) # predict the intent probabilities [intents] = Pipeline.predict_probability(model.classifier, context, [tokens]) # predict the tag sequence {intent, _probability} = Enum.max_by(intents, fn {_, v} -> v end) context = Map.put(context, :intent, intent) [{tags, _probability}] = Pipeline.predict_sequence(model.recognizer, context, [tokens]) # detokenize the parameters params = parse(tokens, tags) params = Map.new(params, fn {k, v} -> [v] = Pipeline.transform(model.detokenizer, context, [v]) {k, v} end) {intents, params} end # parse IOB tags # ignore tokens tagged as "o" (other) # strip the leading b_/i_ from tag names # merge consecutive tagged tokens into lists for detokenization # combine tokens with the same name under that key in the map defp parse([], []) do %{} end defp parse([_token | tokens], ["o" | tags]) do parse(tokens, tags) end defp parse([token | tokens], [tag | tags]) do <<_bi, __>> <> name = tag params = parse(tokens, tags) Map.update(params, name, [token], &[token | &1]) end @doc """ imports parameters from a serialized model """ @spec compile(params :: map) :: model def compile(params) do tokenizer = Pipeline.compile(params["tokenizer"]) %{ tokenizer: tokenizer, detokenizer: Enum.reverse(tokenizer), classifier: Pipeline.compile(params["classifier"]), recognizer: Pipeline.compile(params["recognizer"]) } end @doc """ exports a runtime model to a serializable data structure """ @spec export(model :: model) :: map def export(model) do %{ "tokenizer" => Pipeline.export(model.tokenizer), "classifier" => Pipeline.export(model.classifier), "recognizer" => Pipeline.export(model.recognizer) } end end
lib/penelope/nlp/intent_classifier.ex
0.85814
0.730482
intent_classifier.ex
starcoder
defmodule PromEx.Plugin do @moduledoc """ This module defines the behaviour that PromEx plugins need to implement in order to be properly loaded by PromEx on application start. As a convenience, this module can also be used as a macro to automatically import all of the necessary utility functions for writing plugins and also providing default implementations of behaviour functions that you may not be implementing. """ alias PromEx.MetricTypes.{ Event, Manual, Polling } @doc """ The `c:event_metrics/1` callback returns the configured event based metrics that the plugin exposes. If the plugin does not expose any event style metrics, there is a default implementation of this function that returns an empty list. In other words, if your plugin does not expose any event style metrics, there is no action needed on your part. This function is expected to either return a single `PromEx.Plugins.Event` struct or a list of `PromEx.Plugins.Event` structs. """ @callback event_metrics(keyword()) :: [Event.t()] | Event.t() @doc """ The `c:polling_metrics/1` callback returns the configured polling based metrics that the plugin exposes. If the plugin does not expose any polling style metrics, there is a default implementation of this function that returns an empty list. In other words, if your plugin does not expose any polling style metrics, there is no action needed on your part. This function is expected to either return a single `PromEx.Plugins.Polling` struct or a list of `PromEx.Plugins.Polling` structs. """ @callback polling_metrics(keyword()) :: [Polling.t()] | Polling.t() @doc """ The `c:manual_metrics/1` callback returns the configured manual based metrics that the plugin exposes. If the plugin does not expose any manual style metrics, there is a default implementation of this function that returns an empty list. In other words, if your plugin does not expose any manual style metrics, there is no action needed on your part. This function is expected to either return a single `PromEx.Plugins.Manual` struct or a list of `PromEx.Plugins.Manual` structs. """ @callback manual_metrics(keyword()) :: [Manual.t()] | Manual.t() defmacro __using__(_) do quote do @behaviour PromEx.Plugin import Telemetry.Metrics, only: [counter: 2, distribution: 2, last_value: 2, sum: 2] import PromEx.BucketGenerator alias PromEx.MetricTypes.{Event, Manual, Polling} @doc false @impl true def event_metrics(_), do: [] @doc false @impl true def polling_metrics(_), do: [] @doc false @impl true def manual_metrics(_), do: [] defoverridable PromEx.Plugin end end @doc false def no_dep_raise(module, dep) do raise("You are attempting to use the #{inspect(module)} plugin but #{dep} is currently not a project dependency") end end
lib/prom_ex/plugin.ex
0.868868
0.49646
plugin.ex
starcoder
defmodule SSHKit.SCP.Download do @moduledoc false require Bitwise alias SSHKit.SCP.Command alias SSHKit.SSH @doc """ Downloads a file or directory from a remote host. ## Options * `:verbose` - let the remote scp process be verbose, default `false` * `:recursive` - set to `true` for copying directories, default `false` * `:preserve` - preserve timestamps, default `false` * `:timeout` - timeout in milliseconds, default `:infinity` ## Example ``` :ok = SSHKit.SCP.Download.transfer(conn, "/home/code/sshkit", "downloads", recursive: true) ``` """ def transfer(connection, remote, local, options \\ []) do start(connection, remote, Path.expand(local), options) end defp start(connection, remote, local, options) do timeout = Keyword.get(options, :timeout, :infinity) command = Command.build(:download, remote, options) handler = connection_handler(options) ini = {:next, local, [], %{}, <<>>} SSH.run(connection, command, timeout: timeout, acc: {:cont, <<0>>, ini}, fun: handler) end defp connection_handler(options) do fn message, state -> case message do {:data, _, 0, data} -> process_data(state, data, options) {:exit_status, _, status} -> exited(options, state, status) {:eof, _} -> eof(options, state) {:closed, _} -> closed(options, state) end end end defp process_data(state, data, options) do case state do {:next, path, stack, attrs, buffer} -> next(options, path, stack, attrs, buffer <> data) {:read, path, stack, attrs, buffer} -> read(options, path, stack, attrs, buffer <> data) end end defp next(options, path, stack, attrs, buffer) do if String.last(buffer) == "\n" do case dirparse(buffer) do {"T", mtime, _, atime, _} -> time(options, path, stack, attrs, mtime, atime) {"C", mode, len, name} -> regular(options, path, stack, attrs, mode, len, name) {"D", mode, _, name} -> directory(options, path, stack, attrs, mode, name) {"E"} -> up(options, path, stack) _ -> {:halt, {:error, "Invalid SCP directive received: #{buffer}"}} end else {:cont, {:next, path, stack, attrs, buffer}} end end defp time(_, path, stack, attrs, mtime, atime) do attrs = Map.merge(attrs, %{atime: atime, mtime: mtime}) {:cont, <<0>>, {:next, path, stack, attrs, <<>>}} end defp directory(options, path, stack, attrs, mode, name) do target = if File.dir?(path), do: Path.join(path, name), else: path preserve? = Keyword.get(options, :preserve, false) exists? = File.exists?(target) stat = if exists?, do: File.stat!(target), else: nil if exists? do :ok = File.chmod!(target, Bitwise.bor(stat.mode, 0o700)) else :ok = File.mkdir!(target) end mode = if exists? && !preserve?, do: stat.mode, else: mode attrs = Map.put(attrs, :mode, mode) {:cont, <<0>>, {:next, target, [attrs | stack], %{}, <<>>}} end defp regular(options, path, stack, attrs, mode, length, name) do target = if File.dir?(path), do: Path.join(path, name), else: path preserve? = Keyword.get(options, :preserve, false) exists? = File.exists?(target) stat = if exists?, do: File.stat!(target), else: nil if exists? do :ok = File.chmod!(target, Bitwise.bor(stat.mode, 0o200)) end device = File.open!(target, [:write, :binary]) mode = if exists? && !preserve?, do: stat.mode, else: mode attrs = attrs |> Map.put(:mode, mode) |> Map.put(:device, device) |> Map.put(:length, length) |> Map.put(:written, 0) {:cont, <<0>>, {:read, target, stack, attrs, <<>>}} end defp read(options, path, stack, attrs, buffer) do %{device: device, length: length, written: written} = attrs {buffer, written} = if written < length do count = min(byte_size(buffer), length - written) <<chunk::binary-size(count), rest::binary>> = buffer :ok = IO.binwrite(device, chunk) {rest, written + count} else {buffer, written} end if written == length && buffer == <<0>> do :ok = File.close(device) :ok = File.chmod!(path, attrs[:mode]) if Keyword.get(options, :preserve, false) do :ok = touch!(path, attrs[:atime], attrs[:mtime]) end {:cont, <<0>>, {:next, Path.dirname(path), stack, %{}, <<>>}} else {:cont, {:read, path, stack, Map.put(attrs, :written, written), <<>>}} end end defp up(options, path, [attrs | rest]) do :ok = File.chmod!(path, attrs[:mode]) if Keyword.get(options, :preserve, false) do :ok = touch!(path, attrs[:atime], attrs[:mtime]) end {:cont, <<0>>, {:next, Path.dirname(path), rest, %{}, <<>>}} end defp exited(_, {_, _, [], _, _}, status) do {:cont, {:done, status}} end defp exited(_, {_, _, _, _, _}, status) do {:halt, {:error, "SCP exited before completing the transfer (#{status})"}} end defp eof(_, state) do {:cont, state} end defp closed(_, {:done, 0}) do {:cont, :ok} end defp closed(_, {:done, status}) do {:cont, {:error, "SCP exited with non-zero exit code #{status}"}} end defp closed(_, _) do {:cont, {:error, "SCP channel closed before completing the transfer"}} end @epoch :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}) defp touch!(path, atime, mtime) do atime = :calendar.gregorian_seconds_to_datetime(@epoch + atime) mtime = :calendar.gregorian_seconds_to_datetime(@epoch + mtime) {:ok, file_info} = File.stat(path) :ok = File.write_stat(path, %{file_info| mtime: mtime, atime: atime}, [:posix]) end @tfmt ~S"(T)(0|[1-9]\d*) (0|[1-9]\d{0,5}) (0|[1-9]\d*) (0|[1-9]\d{0,5})" @ffmt ~S"(C|D)([0-7]{4}) (0|[1-9]\d*) ([^/]+)" @efmt ~S"(E)" @dfmt ~r/\A(?|#{@efmt}|#{@tfmt}|#{@ffmt})\n\z/ defp dirparse(value) do case Regex.run(@dfmt, value, capture: :all_but_first) do ["T", mtime, mtus, atime, atus] -> {"T", dec(mtime), dec(mtus), dec(atime), dec(atus)} [chr, _, _, name] when chr in ["C", "D"] and name in ["/", "..", "."] -> nil ["C", mode, len, name] -> {"C", oct(mode), dec(len), name} ["D", mode, len, name] -> {"D", oct(mode), dec(len), name} ["E"] -> {"E"} nil -> nil end end defp int(value, base), do: String.to_integer(value, base) defp dec(value), do: int(value, 10) defp oct(value), do: int(value, 8) end
lib/sshkit/scp/download.ex
0.813868
0.722331
download.ex
starcoder
defmodule Architect.KnownHosts.Scanned do @moduledoc """ Provides functions to get known host data from a host. Does this by running the v-ssh-keyscan executable and decoding the output to json. Tries to scan for *:architect, :keyscan_timeout* milliseconds, then will fail with :keyscan_timeout. Possible errors: :invalid_args - A non-binary was passed :keyscan_timeout - No response after 5 seconds from keyscan executable :keyscan_failed - The keyscan executable returned a non 0 exit code :json_decode_failed - Could not decode the executable output to JSON :unexpected_decode_values - Could decode the executable output to JSON, but could not find required keys Examples ...> Architect.KnownHosts.Scanned.generate("github.com") {:ok, %Architect.KnownHosts.Scanned{ entry: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHk...", md5: "16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48", sha256: "SHA256:nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8" } } """ require Logger @enforce_keys [:md5, :sha256, :entry] defstruct [:md5, :sha256, :entry] @keyscan_bin "v-ssh-keyscan" @doc """ Scan a host and get either a :ok or :error tuple """ def generate(host) when is_binary(host) do %{timeout: timeout} = Application.get_env(:architect, :keyscan) |> Enum.into(%{}) try do Task.async(fn -> {:ok, cwd} = File.cwd() System.cmd("#{cwd}/#{@keyscan_bin}", [host], stderr_to_stdout: true) end) |> Task.await(timeout) |> handle_scan() catch :exit, _ -> log("v-ssh-keyscan timeout", :warn) {:error, :keyscan_timeout} end end def generate(_), do: {:error, :invalid_args} @doc false defp handle_scan({output, exit_code}) when exit_code == 0 do output |> Poison.decode() |> handle_decode() end defp handle_scan({output, exit_code}) do log("v-ssh-keyscan exit code #{inspect(exit_code)}, error: #{inspect(output)}", :error) {:error, :keyscan_failed} end @doc false defp handle_decode( {:ok, %{ "entry" => entry, "sha256Fingerprint" => sha256, "md5Fingerprint" => md5 }} ) do {:ok, %__MODULE__{md5: md5, sha256: sha256, entry: entry}} end defp handle_decode({:ok, values}) do log("v-ssh-keyscan unexpected decode values #{inspect(values)}", :error) {:error, :unexpected_decode_values} end defp handle_decode({:error, values}) do log("v-ssh-keyscan failed to decode to JSON #{inspect(values)}", :error) {:error, :json_decode_failed} end @doc false defp log(output, level) do %{log_errors: log_errors} = Application.get_env(:architect, :keyscan) |> Enum.into(%{}) log(output, level, log_errors) end defp log(output, :debug, true), do: Logger.debug(output) defp log(output, :warn, true), do: Logger.warn(output) defp log(output, :error, true), do: Logger.error(output) defp log(_, _, _), do: nil end
architect/lib/architect/known_hosts/scanned.ex
0.736211
0.523177
scanned.ex
starcoder