code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Gradient.ElixirFmt do
@moduledoc """
Module that handles formatting and printing error messages produced by Gradient in Elixir.
"""
@behaviour Gradient.Fmt
alias :gradualizer_fmt, as: FmtLib
alias Gradient.ElixirType
def print_errors(errors, opts) do
for {file, e} <- errors do
opts = Keyword.put(opts, :filename, file)
print_error(e, opts)
end
end
def print_error(error, opts) do
file = Keyword.get(opts, :filename)
fmt_loc = Keyword.get(opts, :fmt_location, :verbose)
case file do
nil -> :ok
_ when fmt_loc == :brief -> :io.format("~s:", [file])
_ -> :io.format("~s: ", [file])
end
:io.put_chars(format_error(error, opts))
end
def format_error(error, opts) do
opts = Keyword.put(opts, :fmt_type_fun, &ElixirType.pretty_print/1)
format_type_error(error, opts)
end
@impl Gradient.Fmt
def format_type_error({:type_error, expression, actual_type, expected_type}, opts)
when is_tuple(expression) do
format_expr_type_error(expression, actual_type, expected_type, opts)
end
def format_type_error({:call_undef, anno, module, func, arity}, opts) do
:io_lib.format(
"~sCall to undefined function ~p:~p/~p~s~n",
[
format_location(anno, :brief, opts),
module,
func,
arity,
format_location(anno, :verbose, opts)
]
)
end
def format_type_error({:undef, :record, anno, {module, recName}}, opts) do
:io_lib.format(
"~sUndefined record ~p:~p~s~n",
[
format_location(anno, :brief, opts),
module,
recName,
format_location(anno, :verbose, opts)
]
)
end
def format_type_error({:undef, :record, anno, recName}, opts) do
:io_lib.format(
"~sUndefined record ~p~s~n",
[format_location(anno, :brief, opts), recName, format_location(anno, :verbose, opts)]
)
end
def format_type_error({:undef, :record_field, fieldName}, opts) do
:io_lib.format(
"~sUndefined record field ~s~s~n",
[
format_location(fieldName, :brief, opts),
pp_expr(fieldName, opts),
format_location(fieldName, :verbose, opts)
]
)
end
def format_type_error({:undef, :user_type, anno, {name, arity}}, opts) do
:io_lib.format(
"~sUndefined type ~p/~p~s~n",
[format_location(anno, :brief, opts), name, arity, format_location(anno, :verbose, opts)]
)
end
def format_type_error({:undef, type, anno, {module, name, arity}}, opts)
when type in [:user_type, :remote_type] do
type =
case type do
:user_type -> "type"
:remote_type -> "remote type"
end
module = "#{inspect(module)}"
:io_lib.format(
"~sUndefined ~s ~s:~p/~p~s~n",
[
format_location(anno, :brief, opts),
type,
module,
name,
arity,
format_location(anno, :verbose, opts)
]
)
end
def format_type_error(error, opts) do
:gradualizer_fmt.format_type_error(error, opts) ++ '\n'
end
def format_expr_type_error(expression, actual_type, expected_type, opts) do
{inline_expr, fancy_expr} =
case try_highlight_in_context(expression, opts) do
{:error, _e} -> {" " <> pp_expr(expression, opts), ""}
{:ok, fancy} -> {"", fancy}
end
:io_lib.format(
"~sThe ~s~ts~s is expected to have type ~ts but it has type ~ts~n~ts~n~n",
[
format_location(expression, :brief, opts),
describe_expr(expression),
inline_expr,
format_location(expression, :verbose, opts),
pp_type(expected_type, opts),
pp_type(actual_type, opts),
fancy_expr
]
)
end
def format_location(expression, fmt_type, opts \\ []) do
case Keyword.get(opts, :fmt_location, :verbose) do
^fmt_type -> FmtLib.format_location(expression, fmt_type)
:verbose -> ""
end
end
def pp_expr(expression, _opts) do
IO.ANSI.blue() <> "#{inspect(expression)}" <> IO.ANSI.reset()
end
def pp_type(type, _opts) do
pp = ElixirType.pretty_print(type)
IO.ANSI.cyan() <> pp <> IO.ANSI.reset()
end
def try_highlight_in_context(expression, opts) do
forms = Keyword.get(opts, :forms)
with :ok <- has_location?(expression),
{:ok, path} <- get_ex_file_path(forms),
{:ok, code} <- File.read(path) do
code_lines = String.split(code, ~r/\R/)
{:ok, highlight_in_context(expression, code_lines)}
end
end
def has_location?(expression) do
if elem(expression, 1) == 0 do
{:error, "The location is missing in the expression"}
else
:ok
end
end
@spec highlight_in_context(tuple(), [String.t()]) :: String.t()
def highlight_in_context(expression, context) do
line = elem(expression, 1)
context
|> Enum.with_index(1)
|> filter_context(line, 2)
|> underscore_line(line)
|> Enum.join("\n")
end
def filter_context(lines, loc, ctx_size \\ 1) do
line = :erl_anno.line(loc)
range = (line - ctx_size)..(line + ctx_size)
Enum.filter(lines, fn {_, number} -> number in range end)
end
def underscore_line(lines, line) do
Enum.map(lines, fn {str, n} ->
if(n == line) do
IO.ANSI.underline() <> IO.ANSI.red() <> to_string(n) <> " " <> str <> IO.ANSI.reset()
else
to_string(n) <> " " <> str
end
end)
end
def get_ex_file_path([{:attribute, 1, :file, {path, 1}} | _]), do: {:ok, path}
def get_ex_file_path(_), do: {:error, :not_found}
# defp warning_error_not_handled(error) do
# msg = "\nElixir formatter not exist for #{inspect(error, pretty: true)} using default \n"
# String.to_charlist(IO.ANSI.light_yellow() <> msg <> IO.ANSI.reset())
# end
@spec describe_expr(:gradualizer_type.abstract_expr()) :: binary()
def describe_expr({:atom, _, _}), do: "atom"
def describe_expr({:bc, _, _, _}), do: "binary comprehension"
def describe_expr({:bin, _, _}), do: "bit expression"
def describe_expr({:block, _, _}), do: "block"
def describe_expr({:char, _, _}), do: "character"
def describe_expr({:call, _, _, _}), do: "function call"
def describe_expr({:catch, _, _}), do: "catch expression"
def describe_expr({:case, _, _, _}), do: "case expression"
def describe_expr({:cons, _, _, _}), do: "list"
def describe_expr({:float, _, _}), do: "float"
def describe_expr({:fun, _, _}), do: "fun expression"
def describe_expr({:integer, _, _}), do: "integer"
def describe_expr({:if, _, _}), do: "if expression"
def describe_expr({:lc, _, _, _}), do: "list comprehension"
def describe_expr({:map, _, _}), do: "map"
def describe_expr({:map, _, _, _}), do: "map update"
def describe_expr({:match, _, _, _}), do: "match"
def describe_expr({:named_fun, _, _, _}), do: "named fun expression"
def describe_expr({nil, _}), do: "empty list"
def describe_expr({:op, _, 'not', _}), do: "negation"
def describe_expr({:op, _, '-', _}), do: "negation"
def describe_expr({:op, _, op, _, _}), do: to_string(:io_lib.format("~w expression", [op]))
def describe_expr({:record, _, _, _}), do: "record"
def describe_expr({:receive, _, _, _, _}), do: "receive expression"
def describe_expr({:record, _, _, _, _}), do: "record update"
def describe_expr({:record_field, _, _, _, _}), do: "record field"
def describe_expr({:record_index, _, _, _}), do: "record index"
def describe_expr({:string, _, _}), do: "string"
def describe_expr({:tuple, _, _}), do: "tuple"
def describe_expr({:try, _, _, _, _, _}), do: "try expression"
def describe_expr({:var, _, _}), do: "variable"
def describe_expr(_), do: "expression"
end
|
lib/gradient/elixir_fmt.ex
| 0.536313 | 0.546073 |
elixir_fmt.ex
|
starcoder
|
defmodule Saxy.Xmerl do
@moduledoc """
Provides functions to parse a XML document to
[xmerl format](https://github.com/erlang/otp/blob/master/lib/xmerl/include/xmerl.hrl)
data structure.
See "Types" section for more information.
"""
import Saxy.Xmerl.Records
@type position() :: integer()
@type name() :: atom()
@type expanded_name() :: charlist()
@type content() :: [text() | element()]
@type parent() :: {name(), position()}
@type namespace_info() :: {charlist(), charlist()}
@type value() :: [iolist() | atom() | integer()]
@type language() :: charlist()
@type namespace() ::
record(:xmlNamespace,
default: [],
nodes: []
)
@type text() ::
record(:xmlText,
value: value(),
pos: position(),
parents: [parent()],
language: language()
)
@type attribute() ::
record(:xmlAttribute,
name: name(),
expanded_name: expanded_name(),
nsinfo: namespace_info(),
namespace: namespace(),
pos: position(),
value: value(),
normalized: boolean()
)
@type element() ::
record(:xmlElement,
name: name(),
expanded_name: expanded_name(),
nsinfo: namespace_info(),
namespace: namespace(),
attributes: [attribute()],
pos: position(),
content: [content()],
parents: [parent()]
)
@doc """
Parses XML document into Erlang [xmerl](http://erlang.org/doc/man/xmerl.html) format.
Xmerl format requires tag and attribute names to be atoms. By default Saxy uses
`String.to_existing_atom/1` to avoid creating atoms at runtime. You could override
this behaviour by specifying `:atom_fun` option to `String.to_atom/1`.
Warning: However, `String.to_atom/1` function creates atoms dynamically and atoms are not
garbage-collected. Therefore, you should not use this if the input XML cannot be trusted,
such as input received from a socket or during a web request.
## Examples
iex> string = File.read!("./test/support/fixture/foo.xml")
iex> Saxy.Xmerl.parse_string(string)
{:ok,
{:xmlElement,
:foo,
:foo,
[],
{:xmlNamespace, [], []},
[],
1,
[{:xmlAttribute, :bar, :bar, [], [], [], 1, [], 'value', :undefined}],
[],
[],
[],
:undeclared}}
## Options
* `:atom_fun` - The function to convert string to atom. Defaults to `String.to_existing_atom/1`.
* `:expand_entity` - specifies how external entity references should be handled. Three supported strategies respectively are:
* `:keep` - keep the original binary, for example `Orange ®` will be expanded to `"Orange ®"`, this is the default strategy.
* `:skip` - skip the original binary, for example `Orange ®` will be expanded to `"Orange "`.
* `{mod, fun, args}` - take the applied result of the specified MFA.
"""
@spec parse_string(data :: binary()) :: {:ok, element()} | {:error, Saxy.ParseError.t()}
def parse_string(data, options \\ []) do
{atom_fun, options} = Keyword.pop(options, :atom_fun, &String.to_existing_atom/1)
state = %Saxy.Xmerl.State{atom_fun: atom_fun}
case Saxy.parse_string(data, __MODULE__.Handler, state, options) do
{:ok, %{stack: [document]}} ->
{:ok, document}
{:error, _reason} = error ->
error
end
end
end
|
lib/saxy/xmerl.ex
| 0.91519 | 0.445047 |
xmerl.ex
|
starcoder
|
defmodule Deckhub.Ecto.Markdown do
@moduledoc """
An `Ecto.Type` that handles the conversion between a string in the database and a
`Deckhub.Markdown` struct in memory.
Use this as the type of the database field in the schema:
```
defmodule Deckhub.Hearthstone.Term do
use Ecto.Schema
alias Deckhub.Ecto.Markdown
schema "terms" do
field :key, :string
field :value, Markdown
timestamps()
end
end
```
This type requires special handling in forms because Phoenix's form builder functions call
`Phoenix.HTML.html_escape/1` on all field values, which returns the `html` field on this type. But
what we want when we show an `t:Deckhub.Ecto.Markdown.t/0` value in a form is the `text` field.
See: [Beyond Functions in Elixir: Refactoring for Maintainability][beyond-functions]
[beyond-functions]: https://blog.usejournal.com/beyond-functions-in-elixir-refactoring-for-maintainability-5c73daba77f3
"""
use Ecto.Type
@doc """
Returns the underlying schema type for the custom type.
See: `c:Ecto.Type.type/0`
"""
@impl Ecto.Type
def type, do: :string
@doc """
Casts the given input to the custom type.
See: `c:Ecto.Type.cast/1`
"""
@impl Ecto.Type
def cast(binary) when is_binary(binary) do
{:ok, %Deckhub.Markdown{text: binary}}
end
def cast(%Deckhub.Markdown{} = markdown), do: {:ok, markdown}
def cast(_other), do: :error
@doc """
Loads the given term into a custom type.
See: `c:Ecto.Type.load/1`
"""
@impl Ecto.Type
def load(binary) when is_binary(binary) do
# credo:disable-for-next-line
{:ok, %Deckhub.Markdown{text: binary, html: Deckhub.Markdown.to_html(binary)}}
end
def load(_other), do: :error
@doc """
Dumps the given term into an Ecto native type.
See: `c:Ecto.Type.dump/1`
"""
@impl Ecto.Type
def dump(%Deckhub.Markdown{text: binary}) when is_binary(binary) do
{:ok, binary}
end
def dump(binary) when is_binary(binary), do: {:ok, binary}
def dump(_other), do: :error
end
|
lib/deckhub/ecto/markdown.ex
| 0.85373 | 0.832032 |
markdown.ex
|
starcoder
|
defmodule Kino.Control do
@moduledoc """
Various widgets for user interactions.
Each widget is a UI control element that the user interacts
with, consequenty producing an event stream.
Those widgets are often useful paired with `Kino.Frame` for
presenting content that changes upon user interactions.
## Examples
First, create a control and make sure it is rendered,
either by placing it at the end of a code cell or by
explicitly rendering it with `Kino.render/1`.
button = Kino.Control.button("Hello")
Next, to receive events from the control, a process needs to
subscribe to it and specify pick a name to distinguish the
events.
Kino.Control.subscribe(button, :hello)
As the user interacts with the button, the subscribed process
receives corresponding events.
IEx.Helpers.flush()
#=> {:hello, %{origin: #PID<10895.9854.0>}}
#=> {:hello, %{origin: #PID<10895.9854.0>}}
"""
defstruct [:attrs]
@opaque t :: %__MODULE__{attrs: Kino.Output.control_attrs()}
@opaque interval :: {:interval, milliseconds :: non_neg_integer()}
defp new(attrs) do
ref = Kino.Output.random_ref()
subscription_manager = Kino.SubscriptionManager.cross_node_name()
attrs = Map.merge(attrs, %{ref: ref, destination: subscription_manager})
Kino.Bridge.reference_object(ref, self())
Kino.Bridge.monitor_object(ref, subscription_manager, {:clear_topic, ref})
%__MODULE__{attrs: attrs}
end
@doc """
Creates a new button.
"""
@spec button(String.t()) :: t()
def button(label) when is_binary(label) do
new(%{type: :button, label: label})
end
@doc """
Creates a new keyboard control.
This widget is represented as button that toggles interception
mode, in which the given keyboard events are captured.
## Event info
In addition to standard properties, all events include additional
properties.
### Key events
* `:type` - either `:keyup` or `:keydown`
* `:key` - the value matching the browser [KeyboardEvent.key](https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key)
### Status event
* `:type` - either `:status`
* `:enabled` - whether the keyboard is activated
## Examples
Create the widget:
keyboard = Kino.Control.keyboard([:keyup, :keydown, :status])
Subscribe to events:
Kino.Control.subscribe(keyboard, :keyboard)
As the user types events are streamed:
IEx.Helpers.flush()
#=> {:keyboard, %{enabled: true, origin: #PID<10895.9854.0>, type: :status}
#=> {:keyboard, %{key: "o", origin: #PID<10895.9854.0>, type: :keydown}}
#=> {:keyboard, %{key: "k", origin: #PID<10895.9854.0>, type: :keydown}}
#=> {:keyboard, %{key: "o", origin: #PID<10895.9854.0>, type: :keyup}}
#=> {:keyboard, %{key: "k", origin: #PID<10895.9854.0>, type: :keyup}}
"""
@spec keyboard(list(:keyup | :keydown | :status)) :: t()
def keyboard(events) when is_list(events) do
if events == [] do
raise ArgumentError, "expected at least one event, got: []"
end
for event <- events do
unless event in [:keyup, :keydown, :status] do
raise ArgumentError,
"expected event to be either :keyup, :keydown or :status, got: #{inspect(event)}"
end
end
new(%{type: :keyboard, events: events})
end
@doc """
Creates a new form.
A form is composed of regular inputs from the `Kino.Input` module,
however in a form input values are not synchronized between users.
Consequently, the form is another control for producing user-specific
events.
Either `:submit` or `:report_changes` must be specified.
## Options
* `:submit` - specifies the label to use for the submit button
and enables submit events
* `:report_changes` - whether to send new form value whenever any
of the input changes. Defaults to `false`
* `:reset_on_submit` - a list of fields to revert to their default
values once the form is submitted. Use `true` to indicate all
fields. Defaults to `[]`
## Event info
In addition to standard properties, all events include additional
properties.
* `:type` - either `:submit` or `:change`
* `:data` - a map with field values, matching the field list
## Examples
Create a form out of inputs:
form =
Kino.Control.form(
[
name: Kino.Input.text("Name"),
message: Kino.Input.textarea("Message")
],
submit: "Send"
)
Subscribe to events:
Kino.Control.subscribe(form, :chat_form)
As users submit the form the payload is sent:
IEx.Helpers.flush()
#=> {:chat_form,
#=> %{
#=> data: %{message: "Hola", name: "Amy"},
#=> origin: #PID<10905.5195.0>,
#=> type: :submit
#=> }}
#=> {:chat_form,
#=> %{
#=> data: %{message: "Hey!", name: "Jake"},
#=> origin: #PID<10905.5186.0>,
#=> type: :submit
#=> }}
"""
@spec form(list({atom(), Kino.Input.t()}), keyword()) :: t()
def form(fields, opts \\ []) when is_list(fields) do
if fields == [] do
raise ArgumentError, "expected at least one field, got: []"
end
for {field, input} <- fields do
unless is_atom(field) do
raise ArgumentError,
"expected each field key to be an atom, got: #{inspect(field)}"
end
unless is_struct(input, Kino.Input) do
raise ArgumentError,
"expected each field to be a Kino.Input widget, got: #{inspect(input)} for #{inspect(field)}"
end
end
unless opts[:submit] || opts[:report_changes] do
raise ArgumentError, "expected either :submit or :report_changes option to be enabled"
end
fields =
Enum.map(fields, fn {field, input} ->
# Make sure we use this input only in the form and nowhere else
input = Kino.Input.duplicate(input)
{field, input.attrs}
end)
submit = Keyword.get(opts, :submit, nil)
report_changes =
if Keyword.get(opts, :report_changes, false) do
Map.new(fields, fn {field, _} -> {field, true} end)
else
%{}
end
reset_on_submit =
case Keyword.get(opts, :reset_on_submit, []) do
true -> Keyword.keys(fields)
false -> []
fields -> fields
end
new(%{
type: :form,
fields: fields,
submit: submit,
report_changes: report_changes,
reset_on_submit: reset_on_submit
})
end
@doc """
Subscribes the calling process to control or input events.
The events are sent as `{tag, info}`, where info is a map with
event details. In particular, it always includes `:origin`, which
is an opaque identifier of the client that triggered the event.
"""
@spec subscribe(t() | Kino.Input.t(), term()) :: :ok
def subscribe(source, tag)
when is_struct(source, Kino.Control) or is_struct(source, Kino.Input) do
Kino.SubscriptionManager.subscribe(source.attrs.ref, self(), tag)
end
@doc """
Unsubscribes the calling process from control or input events.
"""
@spec unsubscribe(t() | Kino.Input.t()) :: :ok
def unsubscribe(source)
when is_struct(source, Kino.Control) or is_struct(source, Kino.Input) do
Kino.SubscriptionManager.unsubscribe(source.attrs.ref, self())
end
@doc """
Returns a new interval event source.
This can be used as event source for `stream/1` and `tagged_stream/1`.
The events are emitted periodically with an increasing value, starting
from 0 and have the form:
%{type: :interval, iteration: non_neg_integer()}
"""
@spec interval(non_neg_integer()) :: interval()
def interval(milliseconds) when is_number(milliseconds) and milliseconds > 0 do
{:interval, milliseconds}
end
@doc """
Returns a `Stream` of control events.
This is an alternative API to `subscribe/2`, such that event
messages are consume via stream instead of process messages.
It accepts a single source or a list of sources, where each
source is either of:
* `Kino.Control` - emitting value on relevant interaction
* `Kino.Input` - emitting value on value change
## Example
button = Kino.Control.button("Hello")
for event <- Kino.Control.stream(button) do
IO.inspect(event)
end
#=> %{origin: #PID<10895.9854.0>, type: :click}
#=> %{origin: #PID<10895.9854.0>, type: :click}
Or with multiple sources:
button = Kino.Control.button("Hello")
input = Kino.Input.checkbox("Check")
interval = Kino.Control.interval(1000)
for event <- Kino.Control.stream([button, input, interval]) do
IO.inspect(event)
end
#=> %{type: :interval, iteration: 0}
#=> %{origin: #PID<10895.9854.0>, type: :click}
#=> %{origin: #PID<10895.9854.0>, type: :change, value: true}
"""
@spec stream(source | list(source)) :: Enumerable.t()
when source: t() | Kino.Input.t() | interval()
def stream(sources) when is_list(sources) do
for source <- sources, do: assert_stream_source!(source)
tagged_topics = for %{attrs: %{ref: ref}} <- sources, do: {nil, ref}
tagged_intervals = for {:interval, ms} <- sources, do: {nil, ms}
build_stream(tagged_topics, tagged_intervals, fn nil, event -> event end)
end
def stream(source) do
stream([source])
end
@doc """
Same as `stream/1`, but attaches custom tag to every stream item.
## Example
button = Kino.Control.button("Hello")
input = Kino.Input.checkbox("Check")
for event <- Kino.Control.tagged_stream([hello: button, check: input]) do
IO.inspect(event)
end
#=> {:hello, %{origin: #PID<10895.9854.0>, type: :click}}
#=> {:check, %{origin: #PID<10895.9854.0>, type: :change, value: true}}
"""
def tagged_stream(entries) when is_list(entries) do
for entry <- entries do
case entry do
{tag, source} when is_atom(tag) ->
assert_stream_source!(source)
_other ->
raise ArgumentError, "expected a keyword list, got: #{inspect(entries)}"
end
end
tagged_topics = for {tag, %{attrs: %{ref: ref}}} <- entries, do: {tag, ref}
tagged_intervals = for {tag, {:interval, ms}} <- entries, do: {tag, ms}
build_stream(tagged_topics, tagged_intervals, fn tag, event -> {tag, event} end)
end
defp assert_stream_source!(%Kino.Control{}), do: :ok
defp assert_stream_source!(%Kino.Input{}), do: :ok
defp assert_stream_source!({:interval, ms}) when is_number(ms) and ms > 0, do: :ok
defp assert_stream_source!(item) do
raise ArgumentError,
"expected source to be either %Kino.Control{}, %Kino.Input{} or {:interval, ms}, got: #{inspect(item)}"
end
defp build_stream(tagged_topics, tagged_intervals, mapper) do
Stream.resource(
fn ->
ref = make_ref()
for {tag, topic} <- tagged_topics do
Kino.SubscriptionManager.subscribe(topic, self(), {ref, tag}, notify_clear: true)
end
for {tag, ms} <- tagged_intervals do
Process.send_after(self(), {{ref, tag}, :__interval__, ms, 0}, ms)
end
topics = Enum.map(tagged_topics, &elem(&1, 1))
{ref, topics}
end,
fn {ref, topics} ->
receive do
{{^ref, tag}, event} ->
{[mapper.(tag, event)], {ref, topics}}
{{^ref, _tag}, :topic_cleared, topic} ->
case topics -- [topic] do
[] -> {:halt, {ref, []}}
topics -> {[], {ref, topics}}
end
{{^ref, tag}, :__interval__, ms, i} ->
Process.send_after(self(), {{ref, tag}, :__interval__, ms, i + 1}, ms)
event = %{type: :interval, iteration: i}
{[mapper.(tag, event)], {ref, topics}}
end
end,
fn {_ref, topics} ->
for topic <- topics do
Kino.SubscriptionManager.unsubscribe(topic, self())
end
end
)
end
end
|
lib/kino/control.ex
| 0.893443 | 0.477311 |
control.ex
|
starcoder
|
defmodule Painting do
@moduledoc """
Module to create and manipulate Paintings. A painting has a status that can be:
- :not_ready -> Painting cannot be started, it needs configuration
- :ready -> Painting can be started, all configuration is set
- :in_progress -> Painting is in progress.
- :complete -> Painting is complete.
To change between :not_ready and ready you need to set content, style and settings.
## Examples
iex> p = Painting.new("my_painting")
iex> p.status
:not_ready
iex> p = Painting.add_content(p, "content.png")
iex> p.status
:not_ready
iex> p = Painting.add_style(p, "style.png")
iex> p.status
:not_ready
iex> p = Painting.add_settings(p, %Settings{})
iex> p.status
:ready
"""
alias Painting.Settings
alias Painting.Iteration
defstruct name: nil, content: nil, style: nil, settings: nil, status: :not_ready, iterations: []
@type t :: %__MODULE__{}
@doc """
Creates an painting with a given name
## Examples
iex> p = Painting.new("my_painting")
iex> p.status
:not_ready
"""
@spec new(name :: String.t) :: t
def new(name), do: %__MODULE__{name: name}
@doc """
Adds content to a painting
## Examples
iex> p = Painting.new("my_painting")
iex> p.content
nil
iex> p = Painting.add_content(p, "content.png")
iex> p.content
"content.png"
"""
@spec add_content(painting :: t, content :: String.t) :: t
def add_content(%__MODULE__{} = p, content) do
%{p | content: content}
|> update_status()
end
@doc """
Adds style to a painting
## Examples
iex> p = Painting.new("my_painting")
iex> p.style
nil
iex> p = Painting.add_style(p, "style.png")
iex> p.style
"style.png"
"""
@spec add_style(painting :: t, style :: String.t) :: t
def add_style(%__MODULE__{} = p, style) do
%{p | style: style}
|> update_status()
end
@doc """
Adds settings to a painting
## Examples
iex> p = Painting.new("my_painting")
iex> p.settings
nil
iex> p = Painting.add_settings(p, %Settings{})
iex> p.settings
%Settings{}
"""
@spec add_settings(painting :: t, settings :: Settings.t) :: t
def add_settings(%__MODULE__{} = p, %Settings{} = settings) do
%{p | settings: settings}
|> update_status()
end
@doc """
Adds an iteration to a painting
## Examples
iex> ready_painting().status
:ready
iex> Painting.start(ready_painting()).status
:in_progress
"""
@spec start(painting :: t) :: t
def start(%__MODULE__{status: :ready} = p) do
%{p | status: :in_progress}
end
@doc """
Adds an iteration to a painting
## Examples
iex> in_progress_painting().status
:in_progress
iex> Painting.complete(in_progress_painting()).status
:complete
"""
@spec complete(painting :: t) :: t
def complete(%__MODULE__{status: :in_progress} = p) do
%{p | status: :complete}
end
@doc """
Adds an iteration to a painting
## Examples
iex> p = Painting.new("my_painting")
iex> p.settings
nil
iex> p = Painting.add_settings(p, %Settings{})
iex> p.settings
%Settings{}
"""
@spec add_iteration(painting :: t, iter :: Iteration.t) :: t
def add_iteration(%__MODULE__{status: :ready} = p, %Iteration{} = iter) do
add_iteration(%{p | status: :in_progress}, iter)
end
def add_iteration(%__MODULE__{status: :in_progress} = p, %Iteration{} = iter) do
%{p | iterations: p.iterations ++ [iter]}
|> update_status()
end
def prepend_path(%__MODULE__{content: content, style: style, iterations: iterations} = p, path) do
%{p |
content: path <> content,
style: path <> style,
iterations: iterations |> Enum.map(fn i -> %{i | file_name: path <> i.file_name} end)
}
end
defp update_status(%__MODULE__{content: nil, status: :not_ready} = p), do: p
defp update_status(%__MODULE__{style: nil, status: :not_ready} = p), do: p
defp update_status(%__MODULE__{settings: nil, status: :not_ready} = p), do: p
defp update_status(%__MODULE__{status: :not_ready} = p), do: %{p | status: :ready}
defp update_status(%__MODULE__{status: :ready} = p), do: p
defp update_status(%__MODULE__{status: :in_progress, settings: %Settings{iterations: n_iterations}, iterations: iterations} = p) when length(iterations) <= n_iterations, do: p
defp update_status(%__MODULE__{status: :in_progress} = p), do: %{p | status: :complete}
end
|
apps/painting/lib/painting.ex
| 0.805709 | 0.411229 |
painting.ex
|
starcoder
|
defmodule Openflow.Action.SetField do
@moduledoc """
Set a header field using OXM TLV format.
"""
defstruct(field: nil)
alias __MODULE__
@type t :: %SetField{field: Keyword.t()}
@set_field_size 8
def ofpat, do: 25
@doc """
Create a new set_field action struct
note: The following oxm(nxm)_header values are potentially acceptable as `field`:
- :tun_id
- :tun_ipv4_src
- :tun_ipv4_dst
- :tun_ipv6_src
- :tun_ipv6_dst
- :tun_flags
- :tun_gbp_id
- :tun_gbp_flags
- :tun_metadata{0..63}
- :in_port
- :pkt_mark
- :ct_mark
- :ct_label
- :reg{0..15}
- :xreg{0..8}
- :xxreg{0..4}
- :eth_src
- :eth_dst
- :vlan_tci
- :mpls_ttl
- :ip_src
- :ip_dst
- :ipv6_src
- :ipv6_dst
- :ipv6_label
- :ip_tos
- :ip_ecn
- :ip_ttl
- :arp_op
- :arp_spa
- :arp_tpa
- :arp_sha
- :arp_tha
- :tcp_src
- :tcp_dst
- :udp_src
- :udp_dst
- :icmp_type
- :icmp_code
- :icmpv6_type
- :icmpv6_code
- :nd_target
- :nd_sll
- :nd_tll
- :metadata
```elixir
iex> %SetField{field: [reg1: 10]} = SetField.new(reg1: 10)
```
"""
@spec new(Keyword.t()) :: t()
def new([{_field, _value}] = oxm_field) do
%SetField{field: oxm_field}
end
def to_binary(%SetField{field: field}) do
match_bin =
field
|> Openflow.Match.new()
|> Openflow.Match.to_binary()
<<1::16, _length::16, padded_field::bytes>> = match_bin
patial_len = @set_field_size - 4 + byte_size(padded_field)
padding = Openflow.Utils.padding(patial_len, 8)
length = patial_len + padding
<<fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, length::16, padded_field::bytes, 0::size(padding)-unit(8)>>
end
def read(<<fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, _length::16, match_field_bin::bytes>>) do
<<_class::16, _field::7, _hm::1, flen::8, _rest::bytes>> = match_field_bin
match_len = 4 + 4 + flen
match_bin = <<fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, match_len::16, match_field_bin::bytes>>
{[field | _], _rest} = Openflow.Match.read(match_bin)
%SetField{field: [field]}
end
end
|
lib/openflow/actions/set_field.ex
| 0.736969 | 0.641956 |
set_field.ex
|
starcoder
|
defmodule Formex.Field do
alias __MODULE__
@doc """
Defines the Formex.Field struct.
* `:name` - a field name, for example: `:title`
* `:struct_name` - a name of a key in your struct. By default the same as `:name`
* `:custom_value` - custom function that extracts a value that will be used in view
* `:type` - a type of a field that in most cases will be the name of a function from
[`Phoenix.HTML.Form`](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html)
* `:value` - the value from struct/params
* `:required` - is field required? Used only in template, not validated
* `:validation` - validation rules to be passed to a validator
* `:label` - the text label
* `:data` - additional data used by particular field type (eg. `:select` stores here data
for `<option>`'s)
* `:opts` - options
* `:phoenix_opts` - options that will be passed to
[`Phoenix.HTML.Form`](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html)
"""
defstruct name: nil,
struct_name: nil,
custom_value: nil,
type: nil,
required: true,
validation: [],
label: "",
data: [],
opts: [],
phoenix_opts: []
@type t :: %Field{}
@doc """
Creates a new field.
`type` is the name of function from
[`Phoenix.HTML.Form`](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html).
## Options
* `:label`
* `:required` - defaults to true. Used only by the template helper to generate an additional
`.required` CSS class.
* `:struct_name` - a name of a key in your struct. Defaults to the `name` variable
* `:custom_value` - use this, if you need to change value that will be used in view.
For example, field of `Money.Ecto.Type` type casted to string returns a formatted number,
when we may need a raw number. In this case we should use:
```
form
|> add(:money, :text_input, custom_value: fn value ->
if value do
value.amount
end
end)
```
* `:phoenix_opts` - options that will be passed to
[`Phoenix.HTML.Form`](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html), for example:
```
form
|> add(:content, :textarea, phoenix_opts: [
rows: 4
])
```
## Options for `<select>`
* `:choices` - list of `<option>`s. Named "choices", not "options", because we don't want to
confuse it with the rest of options
```
form
|> add(:field, :select, choices: ["Option 1": 1, "Options 2": 2])
```
```
form
|> add(:country, :select, choices: [
"Europe": ["UK": 1, "Sweden": 2, "Poland": 3],
"Asia": [...]
])
```
* `:without_choices` - set this option to true if you want to render select without
any `<option>`s and provide them in another way (for example, using
[Ajax-Bootstrap-Select](https://github.com/truckingsim/Ajax-Bootstrap-Select)).
It disables choices rendering in `Formex.Ecto.CustomField.SelectAssoc`.
* `:choice_label_provider` - used along with `:select_without_choices`.
When form is sent but it's displayed again (because of some errors), we have to render
<select>` with a single `<option>`, previously chosen by user.
This option expects a function that receives id and returns some label.
```
form
|> add(:customer, :select, without_choices: true, choice_label_provider: fn id ->
Repo.get(Customer, id).name
end)
```
`Formex.Ecto.CustomField.SelectAssoc` will set this option for you
"""
def create_field(type, name, opts \\ []) do
data = []
{opts, data} =
if type in [:select, :multiple_select] do
opts =
opts
|> Keyword.put_new(:without_choices, false)
data =
data
|> Keyword.merge(choices: Keyword.get(opts, :choices, []))
{opts, data}
else
{opts, []}
end
%Field{
name: name,
struct_name: Keyword.get(opts, :struct_name, name),
custom_value: Keyword.get(opts, :custom_value),
type: type,
label: get_label(name, opts),
required: Keyword.get(opts, :required, true),
validation: Keyword.get(opts, :validation, []),
data: data,
opts: prepare_opts(opts),
phoenix_opts: prepare_phoenix_opts(opts)
}
end
@doc false
def get_label(name, opts) do
if opts[:label] do
opts[:label]
else
Atom.to_string(name)
end
end
@doc false
def get_value(form, name) do
if form.struct do
Map.get(form.struct, name)
else
nil
end
end
@doc false
def prepare_opts(opts) do
opts
|> Keyword.delete(:phoenix_opts)
|> Keyword.delete(:custom_value)
end
@doc false
def prepare_phoenix_opts(opts) do
phoenix_opts = if opts[:phoenix_opts], do: opts[:phoenix_opts], else: []
if phoenix_opts[:class] do
phoenix_opts
else
Keyword.put(phoenix_opts, :class, "")
end
end
end
|
lib/formex/field.ex
| 0.894132 | 0.895477 |
field.ex
|
starcoder
|
defmodule Ecto.Adapters.Postgres.SQL do
@moduledoc false
# This module handles the generation of SQL code from queries and for create,
# update and delete. All queries has to be normalized and validated for
# correctness before given to this module.
alias Ecto.Query.Query
alias Ecto.Query.QueryExpr
alias Ecto.Query.JoinExpr
alias Ecto.Query.Util
alias Ecto.Query.Normalizer
unary_ops = [ -: "-", +: "+" ]
binary_ops =
[ ==: "=", !=: "!=", <=: "<=", >=: ">=", <: "<", >: ">",
and: "AND", or: "OR",
+: "+", -: "-", *: "*",
<>: "||", ++: "||",
pow: "^", div: "/", rem: "%",
date_add: "+", date_sub: "-" ]
functions =
[ { { :downcase, 1 }, "lower" }, { { :upcase, 1 }, "upper" } ]
@binary_ops Dict.keys(binary_ops)
Enum.map(unary_ops, fn { op, str } ->
defp translate_name(unquote(op), 1), do: { :unary_op, unquote(str) }
end)
Enum.map(binary_ops, fn { op, str } ->
defp translate_name(unquote(op), 2), do: { :binary_op, unquote(str) }
end)
Enum.map(functions, fn { { fun, arity }, str } ->
defp translate_name(unquote(fun), unquote(arity)), do: { :fun, unquote(str) }
end)
defp translate_name(fun, _arity), do: { :fun, atom_to_binary(fun) }
# Generate SQL for a select statement
def select(Query[] = query) do
# Generate SQL for every query expression type and combine to one string
sources = create_names(query)
{ from, used_names } = from(query.from, sources)
select = select(query.select, sources)
join = join(query, sources, used_names)
where = where(query.wheres, sources)
group_by = group_by(query.group_bys, sources)
having = having(query.havings, sources)
order_by = order_by(query.order_bys, sources)
limit = limit(query.limit)
offset = offset(query.offset)
[select, from, join, where, group_by, having, order_by, limit, offset]
|> Enum.filter(&(&1 != nil))
|> List.flatten
|> Enum.join("\n")
end
# Generate SQL for an insert statement
def insert(entity) do
module = elem(entity, 0)
table = entity.model.__model__(:source)
primary_key = module.__entity__(:primary_key)
pk_value = entity.primary_key
zipped = module.__entity__(:entity_kw, entity, primary_key: !!pk_value)
[ fields, values ] = List.unzip(zipped)
"INSERT INTO #{table} (" <> Enum.join(fields, ", ") <> ")\n" <>
"VALUES (" <> Enum.map_join(values, ", ", &literal(&1)) <> ")" <>
if primary_key && !pk_value, do: "\nRETURNING #{primary_key}", else: ""
end
# Generate SQL for an update statement
def update(entity) do
module = elem(entity, 0)
table = entity.model.__model__(:source)
pk_field = module.__entity__(:primary_key)
pk_value = entity.primary_key
zipped = module.__entity__(:entity_kw, entity, primary_key: false)
zipped_sql = Enum.map_join(zipped, ", ", fn({k, v}) ->
"#{k} = #{literal(v)}"
end)
"UPDATE #{table} SET " <> zipped_sql <> "\n" <>
"WHERE #{pk_field} = #{literal(pk_value)}"
end
# Generate SQL for an update all statement
def update_all(Query[] = query, values) do
names = create_names(query)
from = elem(names, 0)
{ table, name } = Util.source(from)
zipped_sql = Enum.map_join(values, ", ", fn({field, expr}) ->
"#{field} = #{expr(expr, names)}"
end)
where = if query.wheres == [], do: "", else: "\n" <> where(query.wheres, names)
"UPDATE #{table} AS #{name}\n" <>
"SET " <> zipped_sql <>
where
end
# Generate SQL for a delete statement
def delete(entity) do
module = elem(entity, 0)
table = entity.model.__model__(:source)
pk_field = module.__entity__(:primary_key)
pk_value = entity.primary_key
"DELETE FROM #{table} WHERE #{pk_field} = #{literal(pk_value)}"
end
# Generate SQL for an delete all statement
def delete_all(Query[] = query) do
names = create_names(query)
from = elem(names, 0)
{ table, name } = Util.source(from)
where = if query.wheres == [], do: "", else: "\n" <> where(query.wheres, names)
"DELETE FROM #{table} AS #{name}" <> where
end
defp select(expr, sources) do
QueryExpr[expr: expr] = Normalizer.normalize_select(expr)
"SELECT " <> select_clause(expr, sources)
end
defp from(from, sources) do
from_model = Util.model(from)
source = tuple_to_list(sources) |> Enum.find(&(from_model == Util.model(&1)))
{ table, name } = Util.source(source)
{ "FROM #{table} AS #{name}", [name] }
end
defp join(Query[] = query, sources, used_names) do
# We need to make sure that we get a unique name for each entity since
# the same entity can be referenced multiple times in joins
sources_list = tuple_to_list(sources)
Enum.map_reduce(query.joins, used_names, fn(join, names) ->
join = JoinExpr[] = Normalizer.normalize_join(join, query)
source = Enum.find(sources_list, fn({ { source, name }, _, model }) ->
((source == join.source) or (model == join.source)) and not name in names
end)
{ table, name } = Util.source(source)
on_sql = expr(join.on.expr, sources)
qual = join_qual(join.qual)
{ "#{qual} JOIN #{table} AS #{name} ON " <> on_sql, [name|names] }
end) |> elem(0)
end
defp join_qual(:inner), do: "INNER"
defp join_qual(:left), do: "LEFT OUTER"
defp join_qual(:right), do: "RIGHT OUTER"
defp join_qual(:full), do: "FULL OUTER"
defp where(wheres, sources) do
boolean("WHERE", wheres, sources)
end
defp group_by([], _sources), do: nil
defp group_by(group_bys, sources) do
exprs = Enum.map_join(group_bys, ", ", fn(expr) ->
Enum.map_join(expr.expr, ", ", fn({ var, field }) ->
{ _, name } = Util.find_source(sources, var) |> Util.source
"#{name}.#{field}"
end)
end)
"GROUP BY " <> exprs
end
defp having(havings, sources) do
boolean("HAVING", havings, sources)
end
defp order_by([], _sources), do: nil
defp order_by(order_bys, sources) do
exprs = Enum.map_join(order_bys, ", ", fn(expr) ->
Enum.map_join(expr.expr, ", ", &order_by_expr(&1, sources))
end)
"ORDER BY " <> exprs
end
defp order_by_expr({ dir, var, field }, sources) do
{ _, name } = Util.find_source(sources, var) |> Util.source
str = "#{name}.#{field}"
case dir do
nil -> str
:asc -> str <> " ASC"
:desc -> str <> " DESC"
end
end
defp limit(nil), do: nil
defp limit(num), do: "LIMIT " <> integer_to_binary(num)
defp offset(nil), do: nil
defp offset(num), do: "OFFSET " <> integer_to_binary(num)
defp boolean(_name, [], _sources), do: nil
defp boolean(name, query_exprs, sources) do
exprs = Enum.map_join(query_exprs, " AND ", fn(QueryExpr[expr: expr]) ->
"(" <> expr(expr, sources) <> ")"
end)
name <> " " <> exprs
end
defp expr({ :., _, [{ :&, _, [_] } = var, field] }, sources) when is_atom(field) do
{ _, name } = Util.find_source(sources, var) |> Util.source
"#{name}.#{field}"
end
defp expr({ :!, _, [expr] }, sources) do
"NOT (" <> expr(expr, sources) <> ")"
end
defp expr({ :&, _, [_] } = var, sources) do
source = Util.find_source(sources, var)
entity = Util.entity(source)
fields = entity.__entity__(:field_names)
{ _, name } = Util.source(source)
Enum.map_join(fields, ", ", &"#{name}.#{&1}")
end
defp expr({ :==, _, [nil, right] }, sources) do
"#{op_to_binary(right, sources)} IS NULL"
end
defp expr({ :==, _, [left, nil] }, sources) do
"#{op_to_binary(left, sources)} IS NULL"
end
defp expr({ :!=, _, [nil, right] }, sources) do
"#{op_to_binary(right, sources)} IS NOT NULL"
end
defp expr({ :!=, _, [left, nil] }, sources) do
"#{op_to_binary(left, sources)} IS NOT NULL"
end
defp expr({ :in, _, [left, Range[first: first, last: last]] }, sources) do
sqls = [ expr(left, sources), "BETWEEN", expr(first, sources), "AND",
expr(last, sources) ]
Enum.join(sqls, " ")
end
defp expr({ :in, _, [left, { :.., _, [first, last] }] }, sources) do
sqls = [ expr(left, sources), "BETWEEN", expr(first, sources), "AND",
expr(last, sources) ]
Enum.join(sqls, " ")
end
defp expr({ :in, _, [left, right] }, sources) do
expr(left, sources) <> " = ANY (" <> expr(right, sources) <> ")"
end
defp expr(Range[] = range, sources) do
expr(Enum.to_list(range), sources)
end
defp expr({ :.., _, [first, last] }, sources) do
expr(Enum.to_list(first..last), sources)
end
defp expr({ :/, _, [left, right] }, sources) do
op_to_binary(left, sources) <> " / " <> op_to_binary(right, sources) <> "::float"
end
defp expr({ arg, _, [] }, sources) when is_tuple(arg) do
expr(arg, sources)
end
defp expr({ fun, _, args }, sources) when is_atom(fun) and is_list(args) do
case translate_name(fun, length(args)) do
{ :unary_op, op } ->
arg = expr(Enum.first(args), sources)
op <> arg
{ :binary_op, op } ->
[left, right] = args
op_to_binary(left, sources) <> " #{op} " <> op_to_binary(right, sources)
{ :fun, "localtimestamp" } ->
"localtimestamp"
{ :fun, fun } ->
"#{fun}(" <> Enum.map_join(args, ", ", &expr(&1, sources)) <> ")"
end
end
defp expr(list, sources) when is_list(list) do
"ARRAY[" <> Enum.map_join(list, ", ", &expr(&1, sources)) <> "]"
end
defp expr(literal, _sources), do: literal(literal)
defp literal(nil), do: "NULL"
defp literal(true), do: "TRUE"
defp literal(false), do: "FALSE"
defp literal(Ecto.DateTime[] = dt) do
"timestamp '#{dt.year}-#{dt.month}-#{dt.day} #{dt.hour}:#{dt.min}:#{dt.sec}'"
end
defp literal(Ecto.Interval[] = i) do
"interval 'P#{i.year}-#{i.month}-#{i.day}T#{i.hour}:#{i.min}:#{i.sec}'"
end
defp literal(Ecto.Binary[value: binary]) do
hex = lc << h :: [unsigned, 4], l :: [unsigned, 4] >> inbits binary do
fixed_integer_to_binary(h, 16) <> fixed_integer_to_binary(l, 16)
end
"'\\x#{hex}'::bytea"
end
defp literal(literal) when is_binary(literal) do
"'#{escape_string(literal)}'::text"
end
defp literal(literal) when is_number(literal) do
to_string(literal)
end
defp op_to_binary({ op, _, [_, _] } = expr, sources) when op in @binary_ops do
"(" <> expr(expr, sources) <> ")"
end
defp op_to_binary(expr, sources) do
expr(expr, sources)
end
# TODO: Records (Kernel.access)
defp select_clause({ :{}, _, elems }, sources) do
Enum.map_join(elems, ", ", &select_clause(&1, sources))
end
defp select_clause(list, sources) when is_list(list) do
Enum.map_join(list, ", ", &select_clause(&1, sources))
end
defp select_clause(expr, sources) do
expr(expr, sources)
end
defp escape_string(value) when is_binary(value) do
:binary.replace(value, "'", "''", [:global])
end
defp create_names(query) do
sources = query.sources |> tuple_to_list
Enum.reduce(sources, [], fn({ table, entity, model }, names) ->
name = unique_name(names, String.first(table), 0)
[{ { table, name }, entity, model }|names]
end) |> Enum.reverse |> list_to_tuple
end
# Brute force find unique name
defp unique_name(names, name, counter) do
counted_name = name <> integer_to_binary(counter)
if Enum.any?(names, fn { { _, n }, _, _ } -> n == counted_name end) do
unique_name(names, name, counter+1)
else
counted_name
end
end
# This is fixed in R16B02, we can remove this fix when we stop supporting R16B01
defp fixed_integer_to_binary(0, _), do: "0"
defp fixed_integer_to_binary(value, base), do: integer_to_binary(value, base)
end
|
lib/ecto/adapters/postgres/sql.ex
| 0.618089 | 0.433262 |
sql.ex
|
starcoder
|
defmodule OverDB.Protocol.V4.Frames.Responses.Result.Prepared do
@moduledoc """
Prepared
The result to a PREPARE message. The body of a Prepared result is:
<id><metadata><result_metadata>
where:
- <id> is [short bytes] representing the prepared query ID.
- <metadata> is composed of:
<flags><columns_count><pk_count>[<pk_index_1>...<pk_index_n>][<global_table_spec>?<col_spec_1>...<col_spec_n>]
where:
- <flags> is an [int]. The bits of <flags> provides information on the
formatting of the remaining information. A flag is set if the bit
corresponding to its `mask` is set. Supported masks and their flags
are:
0x0001 Global_tables_spec: if set, only one table spec (keyspace
and table name) is provided as <global_table_spec>. If not
set, <global_table_spec> is not present.
- <columns_count> is an [int] representing the number of bind markers
in the prepared statement. It defines the number of <col_spec_i>
elements.
- <pk_count> is an [int] representing the number of <pk_index_i>
elements to follow. If this value is zero, at least one of the
partition key columns in the table that the statement acts on
did not have a corresponding bind marker (or the bind marker
was wrapped in a function call).
- <pk_index_i> is a short that represents the index of the bind marker
that corresponds to the partition key column in position i.
For example, a <pk_index> sequence of [2, 0, 1] indicates that the
table has three partition key columns; the full partition key
can be constructed by creating a composite of the values for
the bind markers at index 2, at index 0, and at index 1.
This allows implementations with token-aware routing to correctly
construct the partition key without needing to inspect table
metadata.
- <global_table_spec> is present if the Global_tables_spec is set in
<flags>. If present, it is composed of two [string]s. The first
[string] is the name of the keyspace that the statement acts on.
The second [string] is the name of the table that the columns
represented by the bind markers belong to.
- <col_spec_i> specifies the bind markers in the prepared statement.
There are <column_count> such column specifications, each with the
following format:
(<ksname><tablename>)?<name><type>
The initial <ksname> and <tablename> are two [string] that are only
present if the Global_tables_spec flag is not set. The <name> field
is a [string] that holds the name of the bind marker (if named),
or the name of the column, field, or expression that the bind marker
corresponds to (if the bind marker is "anonymous"). The <type>
field is an [option] that represents the expected type of values for
the bind marker. See the Rows documentation (section 4.2.5.2) for
full details on the <type> field.
- <result_metadata> is defined exactly the same as <metadata> in the Rows
documentation (section 4.2.5.2). This describes the metadata for the
result set that will be returned when this prepared statement is executed.
Note that <result_metadata> may be empty (have the No_metadata flag and
0 columns, See section 4.2.5.2) and will be for any query that is not a
Select. In fact, there is never a guarantee that this will be non-empty, so
implementations should protect themselves accordingly. This result metadata
is an optimization that allows implementations to later execute the
prepared statement without requesting the metadata (see the Skip_metadata
flag in EXECUTE). Clients can safely discard this metadata if they do not
want to take advantage of that optimization.
Note that the prepared query ID returned is global to the node on which the query
has been prepared. It can be used on any connection to that node
until the node is restarted (after which the query must be reprepared).
"""
defstruct [:id, :metadata, :metadata_length, :result_metadata, :result_metadata_length, :pk_indices, :values]
@type t :: %__MODULE__{id: binary, metadata: list, metadata_length: integer, result_metadata: list, result_metadata_length: integer, pk_indices: list, values: list | map}
@spec create(binary, list, integer, list | nil, integer, list, list | map) :: t
def create(id, metadata, metadata_length, result_metadata, result_metadata_length, pk_indices, values \\ []) do
%__MODULE__{id: id, metadata: metadata, metadata_length: metadata_length, result_metadata: result_metadata, result_metadata_length: result_metadata_length, pk_indices: pk_indices, values: values}
end
end
|
lib/protocol/v4/frames/responses/result/prepared.ex
| 0.848628 | 0.687433 |
prepared.ex
|
starcoder
|
defmodule Frank.Git.Grep.Match do
@type t :: %__MODULE__{
line_number: integer(),
raw_text: String.t()
}
defstruct line_number: nil,
raw_text: nil
end
defmodule Frank.Git.Grep do
import Frank.Helper
require Logger
@type t :: %__MODULE__{
reference: Frank.Git.Reference.t(),
object: Frank.Git.Object.t(),
matches: [Frank.Git.Repo.Match.t()]
}
defstruct reference: nil,
object: nil,
matches: nil
@doc """
$ git grep -iIn compatible
guides/https.md:78:To simplify configuration of TLS defaults Plug provides two preconfigured options: `cipher_suite: :strong` and `cipher_suite: :compatible`.
guides/https.md:82:The `:compatible` profile additionally enables AES-CBC ciphers, as well as TLS versions 1.1 and 1.0. Use this configuration to allow connections from older clients, such as older PC or mobile operating systems. Note that RSA key exchange is not enabled by this configuration, due to known weaknesses, so to support clients that do not support ECDHE or DHE it is necessary specify the ciphers explicitly (see [below](#manual-configuration)).
lib/plug/ssl.ex:75: @compatible_tls_ciphers [
lib/plug/ssl.ex:114: options: `cipher_suite: :strong` and `cipher_suite: :compatible`. The Ciphers
lib/plug/ssl.ex:127: be fully compatible with older browsers and operating systems.
lib/plug/ssl.ex:129: The **Compatible** cipher suite supports tlsv1, tlsv1.1 and tlsv1.2. Ciphers were
lib/plug/ssl.ex:231: :compatible -> set_compatible_tls_defaults(options)
lib/plug/ssl.ex:250: defp set_compatible_tls_defaults(options) do
lib/plug/ssl.ex:253: |> Keyword.put_new(:ciphers, @compatible_tls_ciphers)
test/plug/ssl_test.exs:37: test "sets cipher suite to compatible" do
test/plug/ssl_test.exs:38: assert {:ok, opts} = configure(key: "abcdef", cert: "ghijkl", cipher_suite: :compatible)
test/plug/ssl_test.exs:64: test "sets cipher suite with overrides compatible" do
test/plug/ssl_test.exs:69: cipher_suite: :compatible,
"""
def search(
%Frank.Git.Reference{path: reference_path, objects: objects, name: reference_name} =
reference,
search_term,
pattern \\ nil
) do
command =
case pattern do
nil ->
"git -C #{reference_path} grep -iIn #{search_term} origin/#{reference_name}"
limited_to_filename ->
"git -C #{reference_path} grep -iIn #{search_term} origin/#{reference_name} -- #{
limited_to_filename
}"
end
case command |> bash() do
{result, 0} ->
result
err ->
Logger.warn(inspect(err))
""
end
|> String.split("\n")
|> Enum.filter(fn x -> x != "" end)
|> Enum.map(fn raw_match ->
raw_match
|> String.split(":", parts: 4)
end)
|> Enum.group_by(
fn
[_branch, file | _] ->
file
end,
fn [
_branch,
_file | line_number_and_raw_text
] ->
[line_number, raw_text] = line_number_and_raw_text
cond do
String.length(raw_text) > 200 ->
{head, _tail} =
raw_text
|> String.split_at(200)
[line_number, "#{head} ..."]
true ->
line_number_and_raw_text
end
end
)
|> Enum.to_list()
|> Enum.map(fn {match_path, matches} ->
object =
case objects
|> Enum.find(fn %Frank.Git.Object{path: object_path} ->
object_path == match_path
end)
|> Frank.Git.Object.get_content(reference) do
{:ok, object} ->
object
_ ->
nil
end
%Frank.Git.Grep{
reference: reference,
object: object,
matches:
matches
|> Enum.map(fn [line_number, raw_text] ->
%Frank.Git.Grep.Match{
line_number: String.to_integer(line_number),
raw_text: raw_text |> String.trim()
}
end)
}
end)
end
end
|
lib/git/grep.ex
| 0.712532 | 0.483222 |
grep.ex
|
starcoder
|
defmodule Wasmex do
@moduledoc """
Wasmex is a fast and secure [WebAssembly](https://webassembly.org/) and [WASI](https://github.com/WebAssembly/WASI) runtime for Elixir.
It enables lightweight WebAssembly containers to be run in your Elixir backend.
It uses [wasmer](https://wasmer.io/) to execute WASM binaries through a NIF. We use [Rust](https://www.rust-lang.org/) to implement the NIF to make it as safe as possible.
This is the main module, providing most of the needed API to run WASM binaries.
Each WASM module must be compiled from a `.wasm` file.
A compiled module can be instantiated which usually happens in a [GenServer](https://hexdocs.pm/elixir/master/GenServer.html).
To start the GenServer, `start_link/1` is used - it receives a variety of configuration options including function imports and optional WASI runtime options.
{:ok, bytes } = File.read("wasmex_test.wasm")
{:ok, module} = Wasmex.Module.compile(bytes)
{:ok, instance } = Wasmex.start_link(%{module: module})
{:ok, [42]} == Wasmex.call_function(instance, "sum", [50, -8])
Memory of a WASM instance can be read/written using `Wasmex.Memory`:
offset = 7
index = 4
value = 42
{:ok, memory} = Wasmex.Instance.memory(instance, :uint8, offset)
Wasmex.Memory.set(memory, index, value)
IO.puts Wasmex.Memory.get(memory, index) # 42
See `start_link/1` for starting a WASM instance and `call_function/3` for details about calling WASM functions.
"""
use GenServer
# Client
@doc """
Starts a GenServer which instantiates a WASM module from the given `.wasm` bytes.
{:ok, bytes } = File.read("wasmex_test.wasm")
{:ok, module} = Wasmex.Module.compile(bytes)
{:ok, instance } = Wasmex.start_link(%{module: module})
{:ok, [42]} == Wasmex.call_function(instance, "sum", [50, -8])
### Imports
Imports are provided as a map of namespaces, each namespace being a nested map of imported functions:
imports = %{
env: %{
sum3: {:fn, [:i32, :i32, :i32], [:i32], fn (_context, a, b, c) -> a + b + c end},
}
}
instance = Wasmex.start_link(%{module: module, imports: imports})
{:ok, [6]} = Wasmex.call_function(instance, "use_the_imported_sum_fn", [1, 2, 3])
In the example above, we import the `"env"` namespace.
Each namespace is a map listing imports, e.g. the `sum3` function, which is represented with a tuple of:
1. the import type: `:fn` (a function),
1. the functions parameter types: `[:i32, :i32]`,
1. the functions return types: `[:i32]`, and
1. the function to be executed: `fn (_context, a, b, c) -> a + b end`
The first param the function receives is always the call context (a Map containing e.g. the instances memory).
All other params are regular parameters as specified by the parameter type list.
Valid parameter/return types are:
- `:i32` a 32 bit integer
- `:i64` a 64 bit integer
- `:f32` a 32 bit float
- `:f64` a 64 bit float
The return type must always be one value.
### WASI
Optionally, modules can be run with WebAssembly System Interface (WASI) support.
WASI functions are provided as native NIF functions by default.
{:ok, instance } = Wasmex.start_link(%{module: module, wasi: true})
It is possible to overwrite the default WASI functions using the imports map as described above.
Oftentimes, WASI programs need additional input like environment variables, arguments,
or file system access.
These can be provided by giving a `wasi` map:
wasi = %{
args: ["hello", "from elixir"],
env: %{
"A_NAME_MAPS" => "to a value",
"THE_TEST_WASI_FILE" => "prints all environment variables"
},
preopen: %{"wasi_logfiles": %{flags: [:write, :create], alias: "log"}}
}
{:ok, instance } = Wasmex.start_link(%{module: module, wasi: wasi})
The `preopen` map takes directory paths as keys and settings map as values.
Settings must specify the access map with one or more of `:create`, `:read`, `:write`.
Optionally, the directory can be given another name in the WASI program using `alias`.
It is also possible to capture stdout, stdin, or stderr of a WASI program using pipes:
{:ok, stdin} = Wasmex.Pipe.create()
{:ok, stdout} = Wasmex.Pipe.create()
{:ok, stderr} = Wasmex.Pipe.create()
wasi = %{
stdin: stdin,
stdout: stdout,
stderr: stderr
}
{:ok, instance } = Wasmex.start_link(%{module: module, wasi: wasi})
Wasmex.Pipe.write(stdin, "Hey! It compiles! Ship it!")
{:ok, _} = Wasmex.call_function(instance, :_start, [])
Wasmex.Pipe.read(stdout)
"""
def start_link(%{} = opts) when not is_map_key(opts, :imports),
do: start_link(Map.merge(opts, %{imports: %{}}))
def start_link(%{wasi: true} = opts), do: start_link(Map.merge(opts, %{wasi: %{}}))
def start_link(%{bytes: bytes} = opts) do
with {:ok, module} <- Wasmex.Module.compile(bytes) do
opts
|> Map.delete(:bytes)
|> Map.put(:module, module)
|> start_link()
end
end
def start_link(%{module: module, imports: imports, wasi: wasi})
when is_map(imports) and is_map(wasi) do
GenServer.start_link(__MODULE__, %{
module: module,
imports: stringify_keys(imports),
wasi: stringify_keys(wasi)
})
end
def start_link(%{module: module, imports: imports}) when is_map(imports) do
GenServer.start_link(__MODULE__, %{module: module, imports: stringify_keys(imports)})
end
@doc """
Returns whether a function export with the given `name` exists in the WebAssembly instance.
"""
def function_exists(pid, name) do
GenServer.call(pid, {:exported_function_exists, stringify(name)})
end
@doc """
Calls a function with the given `name` and `params` on
the WebAssembly instance and returns its results.
### Strings as Parameters and Return Values
Strings can not directly be used as parameters or return values when calling WebAssembly functions since WebAssembly only knows number data types.
But since Strings are just "a bunch of bytes" we can write these bytes into memory and give our WebAssembly function a pointer to that memory location.
#### Strings as Function Parameters
Given we have the following Rust function that returns the first byte of a given string
in our WebAssembly (note: this is copied from our test code, have a look there if you're interested):
```rust
#[no_mangle]
pub extern "C" fn string_first_byte(bytes: *const u8, length: usize) -> u8 {
let slice = unsafe { slice::from_raw_parts(bytes, length) };
match slice.first() {
Some(&i) => i,
None => 0,
}
}
```
Let's see how we can call this function from Elixir:
```elixir
{:ok, instance} = Wasmex.start_link(%{module: module})
{:ok, memory} = Wasmex.memory(instance, :uint8, 0)
index = 42
string = "hello, world"
Wasmex.Memory.write_binary(memory, index, string)
# 104 is the letter "h" in ASCII/UTF-8 encoding
{:ok, [104]} == Wasmex.call_function(instance, "string_first_byte", [index, String.length(string)])
```
Please not that Elixir and Rust assume Strings to be valid UTF-8. Take care when handling other encodings.
#### Strings as Function Return Values
Given we have the following Rust function in our WebAssembly (copied from our test code):
```rust
#[no_mangle]
pub extern "C" fn string() -> *const u8 {
b"Hello, World!".as_ptr()
}
```
This function returns a pointer to its memory.
This memory location contains the String "Hello, World!" (ending with a null-byte since in C-land all strings end with a null-byte to mark the end of the string).
This is how we would receive this String in Elixir:
```elixir
{:ok, instance} = Wasmex.start_link(%{module: module})
{:ok, memory} = Wasmex.memory(instance, :uint8, 0)
{:ok, [pointer]} = Wasmex.call_function(instance, "string", [])
returned_string = Wasmex.Memory.read_string(memory, pointer, 13) # "Hello, World!"
```
"""
def call_function(pid, name, params) do
GenServer.call(pid, {:call_function, stringify(name), params})
end
@doc """
Finds the exported memory of the given WASM instance and returns it as a `Wasmex.Memory`.
The memory is a collection of bytes which can be viewed and interpreted as a sequence of different
(data-)`types`:
* uint8 / int8 - (un-)signed 8-bit integer values
* uint16 / int16 - (un-)signed 16-bit integer values
* uint32 / int32 - (un-)signed 32-bit integer values
We can think of it as a list of values of the above type (where each value may be larger than a byte).
The `offset` value can be used to start reading the memory starting from the chosen position.
"""
def memory(pid, type, offset) when type in [:uint8, :int8, :uint16, :int16, :uint32, :int32] do
GenServer.call(pid, {:memory, type, offset})
end
defp stringify_keys(struct) when is_struct(struct), do: struct
defp stringify_keys(map) when is_map(map) do
for {key, val} <- map, into: %{}, do: {stringify(key), stringify_keys(val)}
end
defp stringify_keys(value), do: value
defp stringify(s) when is_binary(s), do: s
defp stringify(s) when is_atom(s), do: Atom.to_string(s)
# Server
@doc """
Params:
* module (Wasmex.Module): the compiled WASM module
* imports (map): a map defining imports. Structure is:
%{
namespace_name: %{
import_name: {:fn, [:i32, :i32], [:i32], function_reference}
}
}
* wasi (map): a map defining WASI support. Structure is:
%{
args: ["string", "arguments"],
env: %{
"A_NAME_MAPS" => "to a value"
},
stdin: Pipe.create(),
stdout: Pipe.create(),
stderr: Pipe.create()
}
"""
@impl true
def init(%{module: module, imports: imports, wasi: wasi})
when is_map(imports) and is_map(wasi) do
case Wasmex.Instance.new_wasi(module, imports, wasi) do
{:ok, instance} -> {:ok, %{instance: instance, imports: imports, wasi: wasi}}
{:error, reason} -> {:error, reason}
end
end
@impl true
def init(%{module: module, imports: imports}) when is_map(imports) do
case Wasmex.Instance.new(module, imports) do
{:ok, instance} -> {:ok, %{instance: instance, imports: imports}}
{:error, reason} -> {:error, reason}
end
end
@impl true
def handle_call({:memory, size, offset}, _from, %{instance: instance} = state)
when size in [:uint8, :int8, :uint16, :int16, :uint32, :int32] do
case Wasmex.Memory.from_instance(instance, size, offset) do
{:ok, memory} -> {:reply, {:ok, memory}, state}
{:error, error} -> {:reply, {:error, error}, state}
end
end
@impl true
def handle_call({:exported_function_exists, name}, _from, %{instance: instance} = state)
when is_binary(name) do
{:reply, Wasmex.Instance.function_export_exists(instance, name), state}
end
@impl true
def handle_call({:call_function, name, params}, from, %{instance: instance} = state) do
:ok = Wasmex.Instance.call_exported_function(instance, name, params, from)
{:noreply, state}
end
@impl true
def handle_info({:returned_function_call, result, from}, state) do
GenServer.reply(from, result)
{:noreply, state}
end
@impl true
def handle_info(
{:invoke_callback, namespace_name, import_name, context, params, token},
%{imports: imports} = state
) do
context =
Map.put(
context,
:memory,
Wasmex.Memory.wrap_resource(Map.get(context, :memory), :uint8, 0)
)
{success, return_value} =
try do
{:fn, _params, _returns, callback} =
imports
|> Map.get(namespace_name, %{})
|> Map.get(import_name)
{true, apply(callback, [context | params])}
rescue
e in RuntimeError -> {false, e.message}
end
return_values =
case return_value do
nil -> []
_ -> [return_value]
end
:ok = Wasmex.Native.namespace_receive_callback_result(token, success, return_values)
{:noreply, state}
end
end
|
lib/wasmex.ex
| 0.900423 | 0.657786 |
wasmex.ex
|
starcoder
|
defmodule Stone do
@moduledoc ~S"""
This project tries to reduce boilerplate when writing Elixir `GenServer`s by
making use of language metaprogramming capabilities.
## Functionality
This project helps remove boilerplate common when implementing `GenServer`
behaviour in Elixir. In particular, it can be useful in following situations:
* `start` function just packs all arguments into a tuple which it forwards to
`init/1` via `GenServer.start`
* Calls and casts interface functions just forward all arguments to the server
process via `GenServer.call` and `GenServer.cast`
* Registration of GenServer under a global name
For other cases, you may need to use plain `GenServer` functions (which can be
used together with `Stone` macros). `Stone` is not meant to fully replace
`GenServer`. It just tries to reduce boilerplate in most common cases.
## Usage Examples
Let's take a look at the following server definition:
```elixir
defmodule CounterAgent do
use Stone.GenServer
defstart start_link(val \\ 0) do
initial_state(val)
end
defcall get(), state: state do
reply(state)
end
defcall inc(), state: state do
reply_and_set(state, state+1)
end
defcall add(x), state: state do
reply_and_set(state + x, state + x)
end
defcast set(value) do
noreply_and_set(value)
end
end
```
Above code defines a simple `GenServer` that maintains a counter, and exposes
a convenient interface to be used by other processes. Without using a library,
this code would look like that:
```elixir
defmodule CounterAgent do
use GenServer
def start_link(val \\ 0, opts \\ []) do
GenServer.start_link(CounterAgent, {val}, opts)
end
def init({val}) do
{:ok, val}
end
def get(pid) do
GenServer.call(pid, {:get})
end
def handle_call({:get}, _from, state) do
{:reply, state, state}
end
def inc(pid) do
GenServer.call(pid, {:inc})
end
def handle_call({:inc}, _from, state) do
{:reply, state, state+1}
end
def set(pid, value \\ 0) do
GenServer.cast(pid, {:set, value})
end
def handle_cast({:set, value}, _from, _state) do
{:noreply, value}
end
end
```
## Documentation
For more detailed documentation, please look at these modules:
* `Stone.GenServer`
* `Stone.Operations`
* `Stone.Responders`
"""
end
|
lib/stone.ex
| 0.835383 | 0.867822 |
stone.ex
|
starcoder
|
defmodule NotQwerty123.PasswordStrength do
@moduledoc """
Module to check password strength.
This module does not provide a password strength meter. Instead, it
simply rejects passwords that are considered too weak. Depending on
the nature of your application, a solid front end solution to password
checking, such as [this Dropbox implementation](https://github.com/dropbox/zxcvbn)
might be a better idea.
## Password strength
In simple terms, password strength depends on how long a password is
and how easy it is to guess it. In most cases, passwords should be at
least 8 characters long, and they should not be similar to common
passwords, like `password` or `<PASSWORD>`, or consist of repeated
characters, like `abcabcabcabc`. Dictionary words, common names
and user-specific words (company name, address, etc.) should also
be avoided.
## Further information
The [NIST password guidelines](https://pages.nist.gov/800-63-3/sp800-63b.html).
The [Comeonin wiki](https://github.com/elixircnx/comeonin/wiki)
also has links to further information about password-related issues.
"""
import NotQwerty123.Gettext
alias NotQwerty123.WordlistManager
@doc """
Check the strength of the password.
It returns {:ok, password} or {:error, message}
The password is checked to make sure that it is not too short, that
it does not consist of repeated characters (e.g. 'abcabcabcabc') and
that it is not similar to any word in the common password list.
See the documentation for NotQwerty123.WordlistManager for
information about customizing the common password list.
## Options
There is one option:
* `:min_length` - minimum allowable length of the password
* default is 8
"""
def strong_password?(password, opts \\ []) do
min_len = Keyword.get(opts, :min_length, 8)
word_len = String.length(password)
if min_len > word_len do
{:error,
gettext("The password should be at least %{min_len} characters long.", min_len: min_len)}
else
easy_guess?(password, word_len) |> result
end
end
defp easy_guess?(password, word_len) when word_len < 1025 do
key = String.downcase(password)
Regex.match?(~r/^.?(..?.?.?.?.?.?.?)(\1+).?$/, key) or WordlistManager.query(key, word_len) or
password
end
defp easy_guess?(password, _), do: password
defp result(true) do
{:error,
gettext(
"The password you have chosen is weak because it is easy to guess. Please choose another one."
)}
end
defp result({:error, message}), do: {:error, message}
defp result(password), do: {:ok, password}
end
|
lib/not_qwerty123/password_strength.ex
| 0.70304 | 0.53437 |
password_strength.ex
|
starcoder
|
defmodule FarmbotOS.BotStateNG do
@moduledoc """
The data strucutre behind the bot state tree (not the living process).
Also has some helpers for batching changes.
"""
alias FarmbotOS.{
BotStateNG,
BotStateNG.McuParams,
BotStateNG.LocationData,
BotStateNG.InformationalSettings,
BotStateNG.Configuration
}
use Ecto.Schema
import Ecto.Changeset
@primary_key false
@legacy_info %{
farmwares: %{
"Measure Soil Height": %{
config: %{
"0": %{
label:
"Measured distance from camera to soil in millimeters (required input for calibration)",
name: "measured_distance",
value: 0
},
"1": %{
label: "Disparity search depth",
name: "disparity_search_depth",
value: 1
},
"10": %{
label: "Calibration maximum",
name: "calibration_maximum",
value: 0
},
"2": %{
label: "Disparity block size",
name: "disparity_block_size",
value: 15
},
"3": %{
label: "Image output",
name: "verbose",
value: 2
},
"4": %{
label: "Log verbosity",
name: "log_verbosity",
value: 1
},
"5": %{
label: "Calibration factor result",
name: "calibration_factor",
value: 0
},
"6": %{
label: "Calibration offset result",
name: "calibration_disparity_offset",
value: 0
},
"7": %{
label: "Image width during calibration",
name: "calibration_image_width",
value: 0
},
"8": %{
label: "Image height during calibration",
name: "calibration_image_height",
value: 0
},
"9": %{
label: "Z-axis position during calibration",
name: "calibration_measured_at_z",
value: 0
}
},
description: "Measure soil z height at the current position.",
farmware_manifest_version: "2.0.0",
package: "Measure Soil Height",
package_version: "1.4.6"
},
"camera-calibration": %{
config: %{},
description: "Calibrate the camera for use in plant-detection.",
farmware_manifest_version: "2.0.0",
package: "camera-calibration",
package_version: "0.0.2"
},
"historical-camera-calibration": %{
config: %{},
description:
"Calibrate the camera with historical image for use in plant-detection.",
farmware_manifest_version: "2.0.0",
package: "historical-camera-calibration",
package_version: "0.0.2"
},
"historical-plant-detection": %{
config: %{},
description:
"Detect and mark plants in historical image. Prerequisite: camera-calibration",
farmware_manifest_version: "2.0.0",
package: "historical-plant-detection",
package_version: "0.0.2"
},
"plant-detection": %{
config: %{},
description: "Detect and mark plants. Prerequisite: camera-calibration",
farmware_manifest_version: "2.0.0",
package: "plant-detection",
package_version: "0.0.20"
},
"take-photo": %{
config: %{},
description: "Take a photo using a USB or Raspberry Pi camera.",
farmware_manifest_version: "2.0.0",
package: "take-photo",
package_version: "1.0.19"
}
}
}
embedded_schema do
embeds_one(:mcu_params, McuParams, on_replace: :update)
embeds_one(:location_data, LocationData, on_replace: :update)
embeds_one(:informational_settings, InformationalSettings,
on_replace: :update
)
embeds_one(:configuration, Configuration, on_replace: :update)
field(:user_env, :map, default: %{})
field(:process_info, :map, default: @legacy_info)
field(:pins, :map, default: %{})
field(:jobs, :map, default: %{})
end
def new do
%BotStateNG{}
|> changeset(%{})
|> put_embed(:mcu_params, McuParams.new())
|> put_embed(:location_data, LocationData.new())
|> put_embed(:informational_settings, InformationalSettings.new())
|> put_embed(:configuration, Configuration.new())
|> apply_changes()
end
def changeset(bot_state, params \\ %{}) do
bot_state
|> cast(params, [:user_env, :pins, :jobs, :process_info])
|> cast_embed(:mcu_params, [])
|> cast_embed(:location_data, [])
|> cast_embed(:informational_settings, [])
|> cast_embed(:configuration, [])
end
def view(bot_state) do
%{
mcu_params: McuParams.view(bot_state.mcu_params),
location_data: LocationData.view(bot_state.location_data),
informational_settings:
InformationalSettings.view(bot_state.informational_settings),
configuration: Configuration.view(bot_state.configuration),
process_info: Map.merge(@legacy_info, bot_state.process_info),
user_env: bot_state.user_env,
pins: bot_state.pins,
jobs: bot_state.jobs
}
end
@doc "Add or update a pin to state.pins."
def add_or_update_pin(state, number, mode, value) do
cs = changeset(state, %{})
new_pins =
cs
|> get_field(:pins)
|> Map.put(number, %{mode: mode, value: value})
put_change(cs, :pins, new_pins)
end
@doc "Sets an env var on the state.user_env"
def set_user_env(state, key, value) do
cs = changeset(state, %{})
new_user_env =
cs
|> get_field(:user_env)
|> Map.put(key, value)
put_change(cs, :user_env, new_user_env)
end
@doc "Sets a progress objecto on state.jobs"
def set_job_progress(state, name, progress) do
cs = changeset(state, %{})
t = round(FarmbotOS.Time.system_time_ms() / 1000)
progress2 = Map.put(progress, :updated_at, t)
new_jobs =
cs
|> get_field(:jobs)
|> Map.put(name, progress2)
put_change(cs, :jobs, new_jobs)
end
end
|
lib/core/bot_state_ng.ex
| 0.793746 | 0.417776 |
bot_state_ng.ex
|
starcoder
|
defprotocol Commanded.Event.Upcaster do
@moduledoc """
Protocol to allow an event to be transformed before being passed to a
consumer.
You can use an upcaster to change the shape of an event (e.g. add a new field
with a default, rename a field) or rename an event.
Because the upcaster changes any historical event to the latest version,
consumers (aggregates, event handlers, and process managers) only need
to support the latest version.
## Example
defimpl Commanded.Event.Upcaster, for: AnEvent do
def upcast(%AnEvent{} = event, _metadata) do
%AnEvent{name: name} = event
%AnEvent{event | first_name: name}
end
end
## Metadata
The `upcast/2` function receives the domain event and a map of metadata
associated with that event. The metadata is provided during command dispatch.
In addition to the metadata key/values you provide, the following system
values will be included in the metadata:
- `application` - the `Commanded.Application` used to read the event.
- `event_id` - a globally unique UUID to identify the event.
- `event_number` - a globally unique, monotonically incrementing integer
used to order the event amongst all events.
- `stream_id` - the stream identity for the event.
- `stream_version` - the version of the stream for the event.
- `causation_id` - an optional UUID identifier used to identify which
command caused the event.
- `correlation_id` - an optional UUID identifier used to correlate related
commands/events.
- `created_at` - the datetime, in UTC, indicating when the event was
created.
These key/value metadata pairs will use atom keys to differentiate them from
the user provided metadata which uses string keys.
"""
@fallback_to_any true
@spec upcast(event :: struct(), metadata :: map()) :: struct()
def upcast(event, metadata)
end
defimpl Commanded.Event.Upcaster, for: Any do
@moduledoc """
The default implementation of the `Commanded.Event.Upcaster`.
This will return an event unchanged.
"""
def upcast(event, _metadata), do: event
end
|
lib/commanded/event/upcaster.ex
| 0.930197 | 0.499573 |
upcaster.ex
|
starcoder
|
defmodule Resourceful.JSONAPI.Fields do
@moduledoc """
Functions for validating fields, primarily for use with JSON:API
[sparse fieldsets](https://jsonapi.org/format/#fetching-sparse-fieldsets).
Fields are provided by type type_name in requests and not inferred from root or
relationship names. This means that if a type has multiple relationships
pointing to a single type or a self-referential relationships, these will
be applied to all instances of that type type_name regardless of its location
in the graph.
Since this is specific to JSON:API, field names are not converted to atoms in
the generation options after successful validation. There's no need since
these prevent any mapping from occurring and never make it to the data layer.
It's also important to note that a "field" is
[specifically defined](https://jsonapi.org/format/#document-type-object-fields)
and is a collection of attribute names and relationship names. It specifically
_excludes_ `id` and `type` despite all identifiers sharing a common namespace.
NOTE: Relationships are currently not supported.
"""
alias Resourceful.{Error, JSONAPI, Type}
@doc """
Takes a map of fields by type type_name (e.g.
`%{"albums" => ["releaseDate", "title"]}`) and validates said fields against
the provided type. If fields are included that are not part of a
particular type, errors will be returned.
"""
def validate(%Type{} = type, %{} = fields_by_type) do
Map.new(
fields_by_type,
fn {type_name, fields} ->
{
type_name,
List.wrap(
with {:ok, related_type} <- validate_field_type(type, type_name),
do: validate_fields_with_type(related_type, fields)
)
}
end
)
end
defp invalid_field_error(field), do: Error.with_key(:invalid_field, field)
defp validate_field_type(type, type_name) do
case Type.fetch_related_type(type, type_name) do
{:ok, _} = ok -> ok
_ -> Error.with_key(:invalid_field_type, type_name)
end
end
defp validate_fields_with_type(type, fields, context \\ %{})
defp validate_fields_with_type(type, fields, _) when is_binary(fields) do
validate_fields_with_type(
type,
JSONAPI.Params.split_string_list(fields),
%{input: fields, source: ["fields", type.name]}
)
end
defp validate_fields_with_type(type, fields, context) when is_list(fields) do
fields
|> Stream.with_index()
|> Enum.map(fn {field, index} ->
case Type.has_local_field?(type, field) do
true ->
{:ok, field}
_ ->
field
|> invalid_field_error()
|> Error.with_context(:resource_type, type.name)
|> Error.with_source(Map.get(context, :source) || ["fields", type.name, index])
|> Error.with_input(Map.get(context, :input) || field)
end
end)
end
defp validate_fields_with_type(_, field, _) do
field
|> invalid_field_error()
|> Error.with_input(inspect(field))
end
end
|
lib/resourceful/jsonapi/fields.ex
| 0.811303 | 0.485356 |
fields.ex
|
starcoder
|
defmodule Comeonin do
@moduledoc """
Comeonin is a password hashing library that aims to make the
secure validation of passwords as straightforward as possible.
It also provides extensive documentation to help
developers keep their apps secure.
Comeonin supports bcrypt and pbkdf2_sha512.
## Use
Most users will just need to use the `hashpwsalt`, `checkpw` and `dummy_checkpw`
functions, using either the `Comeonin.Bcrypt` or `Comeonin.Pbkdf2` module.
Naming conventions are the same for each algorithm.
Import, or alias, the algorithm you want to use -- either `Comeonin.Bcrypt`
or `Comeonin.Pbkdf2`.
To hash a password with the default options:
hash = hashpwsalt("difficult2guess")
To check a password against the stored hash, use the `checkpw`
function. This takes two arguments: the plaintext password and
the stored hash.
There is also a `dummy_checkpw` function, which takes no arguments
and is to be used when the username cannot be found. It performs a hash,
but then returns false. This can be used to make user enumeration more
difficult. If an attacker already knows, or can guess, the username,
this function will not be of any use, and so if you are going to use
this function, it should be used with a policy of creating usernames
that are not made public and are difficult to guess.
See each module's documentation for more information about
all the available options.
## Choosing an algorithm
Bcrypt and pbkdf2_sha512 are both highly secure key derivation functions.
They have no known vulnerabilities and their algorithms have been used
and widely reviewed for at least 10 years. They are also designed
to be `future-adaptable` (see the section below about speed / complexity
for more details), and so we do not recommend one over the other.
However, if your application needs to use a hashing function that has been
recommended by a recognized standards body, then you will need to
use pbkdf2_sha512, which has been recommended by NIST.
## Adjusting the speed / complexity of bcrypt and pbkdf2
It is possible to adjust the speed / complexity of bcrypt and pbkdf2 by
changing the number of rounds (the number of calculations) used. In most
cases, you will not need to change the default number of rounds, but
increasing the number of rounds can be useful because it limits the
number of attempts an attacker can make within a certain time frame.
It is not recommended to set the number of rounds lower than the
defaults.
To help you see how much time the hashing function takes with different
numbers of rounds, this module provides convenience timing functions
for bcrypt and pbkdf2.
## Further information
Visit our [wiki](https://github.com/riverrun/comeonin/wiki)
for links to further information about these and related issues.
"""
@doc """
A function to help the developer decide how many log rounds to use
when using bcrypt.
The number of log rounds can be increased to make the bcrypt hashing
function more complex, and slower. The minimum number is 4 and the maximum is 31.
The default is 12, but, depending on the nature of your application and
the hardware being used, you might want to increase this.
The `bcrypt_log_rounds` value can be set in the config file. See the
documentation for `Comeonin.Config` for more details.
"""
def time_bcrypt(log_rounds \\ 12) do
salt = Comeonin.Bcrypt.gen_salt(log_rounds)
{time, _} = :timer.tc(Comeonin.Bcrypt, :hashpass, ["password", salt])
Mix.shell.info "Log rounds: #{log_rounds}, Time: #{div(time, 1000)} ms"
end
@doc """
A function to help the developer decide how many rounds to use
when using pbkdf2.
The number of rounds can be increased to make the pbkdf2 hashing function slower.
The maximum number of rounds is 4294967295. The default is 160_000, but,
depending on the nature of your application and the hardware being used,
you might want to increase this.
The `pbkdf2_rounds` value can be set in the config file. See the
documentation for `Comeonin.Config` for more details.
"""
def time_pbkdf2(rounds \\ 160_000) do
salt = Comeonin.Pbkdf2.gen_salt
{time, _} = :timer.tc(Comeonin.Pbkdf2, :hashpass, ["password", salt, rounds])
Mix.shell.info "Rounds: #{rounds}, Time: #{div(time, 1000)} ms"
end
end
|
deps/comeonin/lib/comeonin.ex
| 0.832066 | 0.910665 |
comeonin.ex
|
starcoder
|
defmodule ExNoCache.Plug.LastModified do
@moduledoc """
A plug for caching a content using last modify date.
Requires one option:
* `:updated_at` - The Module Function Argument tuple to fetch the last
modify date for the content. The function must return a %DateTime{}
struct.
## Cache mechanisms
`ExNoCache.Plug.LastModified` uses content last modification datetime to
determine whether it should set the last-modified in header response or not.
It first look for the client last modify date from the `"if-modified-since"`
in the request headers. Then compare with the content last modify date. If the
datetime is equal, the `ExNoCache.Plug.LastModified` halts the plug and
returns the `"not modified"` immediately. Otherwise request will just go
normally and the `"last-modified"` is added in the response headers.
## Usage
Add this plug to the pipeline of the endpoint you want to cache using content
last update time. Provide the Module Function Argument to get the last updated
datetime and it should just work.
Please note that the `ExNoCache.Plug.LastModified` only works with the GET
request.
## Examples
This plug can be mounted in a `Plug.Builder` pipeline as follows:
def MyPlug do
use Plug.Builder
plug ExNoCache.Plug.LastModified, updated_at: {My, :awesome, ["function"]}
end
"""
@moduledoc since: "0.1.0"
require Logger
@behaviour Plug
@control "no-cache, private, max-age: 86400"
@allowed_method ~w(GET)
alias Plug.Conn
@type options :: [updated_at: mfa()]
@impl Plug
@doc false
@spec init(options()) :: options()
def init(opts) do
opts
end
@impl Plug
@doc false
@spec call(Plug.Conn.t(), options()) :: Plug.Conn.t()
def call(%Conn{method: method} = conn, opts) when method in @allowed_method do
content_updated_at = get_content_updated_at(opts)
if_modified_since = Conn.get_req_header(conn, "if-modified-since")
conn
|> compare(content_updated_at, if_modified_since)
|> control(content_updated_at)
end
def call(%Conn{} = conn, _) do
conn
end
defp compare(%Conn{} = conn, _content_last_modified_at, []) do
conn
end
defp compare(%Conn{} = conn, nil, _) do
conn
end
defp compare(%Conn{} = conn, content_last_modified_at, [raw_req_last_modified_at | _]) do
req_last_modified_at = parse_req_last_modified_header(raw_req_last_modified_at)
case DateTime.compare(content_last_modified_at, req_last_modified_at) do
:eq ->
conn
|> Conn.resp(:not_modified, [])
|> Conn.halt()
:gt ->
conn
:lt ->
# FIXME: Not sure how to deal with this. Server downgrades the content?
conn
end
end
defp control(%Conn{} = conn, nil) do
conn
|> Conn.put_resp_header("cache-control", @control)
end
defp control(%Conn{} = conn, content_last_modified_at) do
content_last_modified =
content_last_modified_at
|> DateTime.shift_zone!("Etc/UTC")
|> format_datetime()
conn
|> Conn.put_resp_header("cache-control", @control)
|> Conn.put_resp_header("last-modified", content_last_modified)
end
defp get_content_updated_at(opts) do
case Keyword.get(opts, :updated_at) do
nil ->
warn_module_not_set()
nil
{mod, fun, args} ->
case apply(mod, fun, args) do
{:ok, %DateTime{} = updated_at} ->
updated_at
_ ->
nil
end
end
end
defp parse_req_last_modified_header(datetime_string) do
<<
_weekday::binary-size(3),
", ",
day::binary-size(2),
" ",
month::binary-size(3),
" ",
year::binary-size(4),
" ",
hour::binary-size(2),
":",
minute::binary-size(2),
":",
second::binary-size(2),
" GMT"
>> = datetime_string
{:ok, datetime, _} =
[
year,
"-",
month_abbr_to_num(month),
"-",
day,
"T",
hour,
":",
minute,
":",
second,
"Z"
]
|> IO.iodata_to_binary()
|> DateTime.from_iso8601()
datetime
end
defp month_abbr_to_num(month_abbr) do
case month_abbr do
"Jan" -> "01"
"Feb" -> "02"
"Mar" -> "03"
"Apr" -> "04"
"May" -> "05"
"Jun" -> "06"
"Jul" -> "07"
"Aug" -> "08"
"Sep" -> "09"
"Oct" -> "10"
"Nov" -> "11"
"Dec" -> "12"
end
end
defp format_datetime(%DateTime{} = datetime) do
[
format_day_of_week(datetime),
", ",
datetime.day |> Integer.to_string() |> String.pad_leading(2, "0"),
" ",
format_month(datetime),
" ",
datetime.year |> Integer.to_string(),
" ",
datetime.hour |> Integer.to_string() |> String.pad_leading(2, "0"),
":",
datetime.minute |> Integer.to_string() |> String.pad_leading(2, "0"),
":",
datetime.second |> Integer.to_string() |> String.pad_leading(2, "0"),
" GMT"
]
|> IO.iodata_to_binary()
end
defp format_day_of_week(%DateTime{} = datetime) do
case Calendar.ISO.day_of_week(datetime.year, datetime.month, datetime.day) do
1 -> "Mon"
2 -> "Tue"
3 -> "Wed"
4 -> "Thu"
5 -> "Fri"
6 -> "Sat"
7 -> "Sun"
end
end
defp format_month(%DateTime{} = datetime) do
case datetime.month do
1 -> "Jan"
2 -> "Feb"
3 -> "Mar"
4 -> "Apr"
5 -> "May"
6 -> "Jun"
7 -> "Jul"
8 -> "Aug"
9 -> "Sep"
10 -> "Oct"
11 -> "Nov"
12 -> "Dec"
end
end
defp warn_module_not_set do
Logger.warn([
"[",
inspect(__MODULE__),
"] is used but cannot load updated_at. The content updated at check won't work as expected!"
])
end
end
|
lib/ex_no_cache/plug/last_modified.ex
| 0.757481 | 0.412205 |
last_modified.ex
|
starcoder
|
defmodule Eager do
@type atm :: {:atm, atom}
@type variable :: {:var, atom}
@type ignore :: :ignore
@type cons(t) :: {:cons, t, t}
# Pattern matching
@type pattern :: atm | variable | ignore | cons(pattern)
@type lambda :: {:lambda, [atom], [atom], seq}
@type apply :: {:apply, expr, [expr]}
@type case :: {:case, expr, [clause]}
@type clause :: {:clause, pattern, seq}
@type expr :: atm | variable | lambda | apply | case | cons(expr)
# Sequences
@type match :: {:match, pattern, expr}
@type seq :: [expr] | [match | seq]
# Expressions are evaluated to structures.
@type closure :: {:closure, [atom], seq, env}
@type str :: atom | [str] | closure
# An environment is a key-value of variableiable to structure.
@type env :: [{atom, str}]
@doc """
Evaluate a sequence given a program.
"""
@spec eval(seq) :: {:ok, str} | :fail
def eval(seq) do
# a new environment is created
eval_seq(seq, Env.new())
end
@doc """
Evaluate a sequence given an environment and a program.
"""
@spec eval_seq([expr], env) :: {:ok, str} | :error
def eval_seq([exp], env) do
eval_expr(exp, env)
end
def eval_seq([{:match, ptr, exp} | seq], env) do
case eval_expr(exp, env) do
:error ->
:error
{:ok, str} ->
env = eval_scope(ptr, env)
case eval_match(ptr, str, env) do
:fail ->
:error
{:ok, env} ->
eval_seq(seq, env)
end
end
end
@doc """
Evaluate an expression given an environment and a program.
"""
@spec eval_expr(expr, env) :: {:ok, str} | :error
def eval_expr({:atm, id}, _) do
{:ok, id}
end
def eval_expr({:var, id}, env) do
case Env.lookup(id, env) do
nil ->
IO.puts("variable binding for #{id} not present")
:error
{_, str} ->
{:ok, str}
end
end
def eval_expr({:cons, he, te}, env) do
case eval_expr(he, env) do
:error ->
:error
{:ok, hs} ->
case eval_expr(te, env) do
:error ->
:error
{:ok, ts} ->
{:ok, {hs , ts}}
end
end
end
def eval_expr({:case, expr, cls}, env) do
case eval_expr(expr, env) do
:error ->
:error
{:ok, str} ->
eval_cls(cls, str, env)
end
end
def eval_expr({:lambda, par, free, seq}, env) do
case Env.closure(free, env) do
:error ->
:error
closure ->
{:ok, {:closure, par, seq, closure}}
end
end
def eval_expr({:apply, expr, args}, env) do
case eval_expr(expr, env) do
:error ->
:error
{:ok, {:closure, par, seq, closure}} ->
case eval_args(args, env) do
:error ->
:error
{:ok, strs} ->
env = Env.args(par, strs, closure)
eval_seq(seq, env)
end
{:ok, _} ->
:error
end
end
def eval_expr({:fun, id}, _env) do
{par, seq} = apply(Prgm, id, [])
{:ok, {:closure, par, seq, []}}
end
def eval_expr(strange, _) do
IO.puts("strange expresion: ")
IO.inspect(strange)
:error
end
@doc """
Evaluate a match of a pattern and structure given an environment
"""
@spec eval_match(pattern, str, env) :: {:ok, env} | :fail
def eval_match({:atm, id}, id, env) do
{:ok, env}
end
def eval_match({:var, id}, str, env) do
case Env.lookup(id, env) do
nil ->
{:ok, Env.add(id, str, env)}
{^id, ^str} ->
{:ok, env}
{_, _} ->
:fail
end
end
def eval_match(:ignore, _, env) do
{:ok, env}
end
def eval_match({:cons, hp, tp}, {hs, ts}, env) do
case eval_match(hp, hs, env) do
:fail ->
:fail
{:ok, env} ->
eval_match(tp, ts, env)
end
end
def eval_match(_, _, _) do
:fail
end
@doc """
Create a new scope, remove all variables in the pattern
from the given environment.
"""
@spec eval_scope(pattern, env) :: env
def eval_scope(ptr, env) do
Env.remove(extract_vars(ptr), env)
end
@doc """
Evaluate a list of clauses given a structure an environment.
"""
@spec eval_cls([clause], str, env) :: {:ok, str} | :error
def eval_cls([], _, _) do
IO.puts("no more clauses")
:error
end
def eval_cls([{:clause, ptr, seq} | cls], str, env) do
env = eval_scope(ptr, env)
case eval_match(ptr, str, env) do
:fail ->
eval_cls(cls, str, env)
{:ok, env} ->
eval_seq(seq, env)
end
end
@doc """
Evaluate a list of expressions, if any expresion evaluates
to :error then evaluation stops and an :error is returned, otherwise
a list of the resulting structures is returned.
"""
@spec eval_args([expr], env) :: {:ok, [str]} | :error
def eval_args(args, env) do
eval_args(args, env, [])
end
def eval_args([], _, strs) do {:ok, Enum.reverse(strs)} end
def eval_args([expr | exprs], env, strs) do
case eval_expr(expr, env) do
:error ->
:error
{:ok, str} ->
eval_args(exprs, env, [str|strs])
end
end
@spec extract_vars(pattern) :: [variable]
def extract_vars(pattern) do
extract_vars(pattern, [])
end
@spec extract_vars(pattern, [variable]) :: [variable]
def extract_vars({:atm, _}, vars) do vars end
def extract_vars(:ignore, vars) do vars end
def extract_vars({:var, var}, vars) do
[var | vars]
end
def extract_vars({:cons, head, tail}, vars) do
extract_vars(tail, extract_vars(head, vars))
end
end
|
interpreter/lib/eager.ex
| 0.766818 | 0.434581 |
eager.ex
|
starcoder
|
defmodule CoopMinesweeper.Game.Field do
@moduledoc """
This module is responsible for the minesweeper fields. It initializes new
fields and handles turns and mark toggles. It also determines whether a turn
lead to a win or loss.
"""
alias __MODULE__
alias CoopMinesweeper.Game.Tile
@min_size 6
@max_size 50
@min_mines 5
defstruct [
:id,
:size,
:mines,
:tiles,
:mines_left,
:state,
:visibility,
:last_interaction,
mines_initialized: false,
recent_player: ""
]
@type position() :: {non_neg_integer(), non_neg_integer()}
@type tiles() :: %{position() => Tile.t()}
@type state() :: :running | :won | :lost
@type visibility() :: :public | :private
@type t() :: %Field{
id: String.t(),
size: non_neg_integer(),
mines: non_neg_integer(),
tiles: tiles(),
mines_left: non_neg_integer(),
state: state(),
visibility: visibility(),
last_interaction: DateTime.t(),
mines_initialized: boolean(),
recent_player: String.t()
}
@type on_new_error() :: {:error, :too_small | :too_large | :too_few_mines | :too_many_mines}
@type on_make_turn() ::
{:ok, {Field.t(), tiles()}} | {:error, :out_of_field | :invalid_position | :not_running}
@type on_toggle_mark() ::
{:ok, {Field.t(), tiles()}} | {:error, :out_of_field | :invalid_position | :not_running}
@type on_play_again() ::
{:ok, Field.t()} | {:error, :still_running}
@doc """
Generates a new field.
"""
@spec new(
size :: non_neg_integer(),
mines :: non_neg_integer(),
game_id :: String.t(),
visibility :: visibility()
) ::
{:ok, Field.t()} | on_new_error()
def new(size, _, _, _) when size < @min_size, do: {:error, :too_small}
def new(size, _, _, _) when size > @max_size, do: {:error, :too_large}
def new(_, mines, _, _) when mines < @min_mines, do: {:error, :too_few_mines}
def new(size, mines, _, _) when mines > size * size / 4, do: {:error, :too_many_mines}
def new(size, mines, game_id, visibility)
when is_binary(game_id) and visibility in [:public, :private] do
tiles =
for row <- 0..(size - 1), col <- 0..(size - 1), into: %{} do
{{row, col}, %Tile{}}
end
field = %Field{
id: game_id,
size: size,
mines: mines,
tiles: tiles,
mines_left: mines,
state: :running,
visibility: visibility,
last_interaction: DateTime.utc_now()
}
{:ok, field}
end
@doc """
Makes a turn at the provided position of the field.
If the field was untouched before, the mines are added before applying the
turn. This ensures that the first turn isn't placed on a mine and reveals a
bigger area.
"""
@spec make_turn(field :: Field.t(), pos :: position(), player :: String.t()) :: on_make_turn()
def make_turn(%Field{state: state}, _pos, _player) when state != :running,
do: {:error, :not_running}
def make_turn(%Field{size: size}, {row, col}, _player)
when row < 0 or row >= size or col < 0 or col >= size,
do: {:error, :out_of_field}
def make_turn(%Field{mines_initialized: false} = field, pos, player) do
restricted_positions = get_surrounding_positions(field, pos)
field = initialize_mines(field, restricted_positions)
field = %{field | mines_initialized: true}
make_turn(field, pos, player)
end
def make_turn(%Field{tiles: tiles} = field, pos, player) do
if tiles[pos].state != :hidden do
{:error, :invalid_position}
else
field = %{field | recent_player: player}
field = %{field | last_interaction: DateTime.utc_now()}
if tiles[pos].mine? do
{field, changes} = reveal_mines(field, :lost)
field = %{field | state: :lost}
{:ok, {field, changes}}
else
{field, changes} = reveal_tile(field, pos)
if won?(field) do
{field, reveal_changes} = reveal_mines(field, :won)
changes = Map.merge(changes, reveal_changes)
field = %{field | mines_left: 0}
field = %{field | state: :won}
{:ok, {field, changes}}
else
{:ok, {field, changes}}
end
end
end
end
@doc """
Marks a hidden tile or removes the mark of a marked tile.
"""
@spec toggle_mark(field :: Field.t(), pos :: position(), player :: String.t()) ::
on_toggle_mark()
def toggle_mark(%Field{state: state}, _pos, _player) when state != :running,
do: {:error, :not_running}
def toggle_mark(%Field{mines_initialized: mines_initialized}, _pos, _player)
when not mines_initialized,
do: {:error, :mines_not_initialized}
def toggle_mark(%Field{size: size}, {row, col}, _player)
when row < 0 or row >= size or col < 0 or col >= size,
do: {:error, :out_of_field}
def toggle_mark(%Field{tiles: tiles} = field, pos, player) do
state = tiles[pos].state
if state in [:hidden, :mark] do
field =
if state == :hidden do
field.tiles[pos]
|> update_in(&Tile.set_state(&1, :mark))
|> Map.update!(:mines_left, &(&1 - 1))
else
field.tiles[pos]
|> update_in(&Tile.set_state(&1, :hidden))
|> Map.update!(:mines_left, &(&1 + 1))
end
field = %{field | recent_player: player}
field = %{field | last_interaction: DateTime.utc_now()}
{:ok, {field, %{pos => field.tiles[pos]}}}
else
{:error, :invalid_position}
end
end
@doc """
Determines whether a position is inside of the field.
"""
@spec inside_field?(field :: Field.t(), pos :: position()) :: boolean()
def inside_field?(%Field{size: size}, {row, col}) do
row >= 0 and row < size and col >= 0 and col < size
end
@doc """
Returns a list of positions that are around the provided position or the
provided position itself. The returned positions are guaranteed to be
inside of the field.
"""
@spec get_surrounding_positions(field :: Field.t(), pos :: position(), include_self :: boolean) ::
[position()]
def get_surrounding_positions(%Field{} = field, {row, col}, include_self \\ true) do
for restr_row <- (row - 1)..(row + 1),
restr_col <- (col - 1)..(col + 1),
inside_field?(field, {restr_row, restr_col}),
include_self or restr_row != row or restr_col != col,
do: {restr_row, restr_col}
end
@doc """
Returns a string representation of the field.
"""
@spec to_string(field :: Field.t()) :: String.t()
def to_string(%Field{tiles: tiles, size: size}) do
for row <- 0..(size - 1), into: "" do
row_str =
for col <- 0..(size - 1), tile = tiles[{row, col}], into: "" do
if tile.mine?,
do: "X",
else: Kernel.to_string(tile.mines_close)
end
if row == size - 1 do
row_str
else
row_str <> "\n"
end
end
end
@doc """
Resets a field that is not running.
"""
@spec play_again(field :: Field.t()) :: on_play_again()
def play_again(%Field{state: :running}), do: {:error, :still_running}
def play_again(%Field{id: id, size: size, mines: mines, visibility: visibility}) do
{:ok, field} = new(size, mines, id, visibility)
{:ok, field}
end
@doc """
Returns a string representation of the field that can be shown to the player.
"""
@spec to_player_string(field :: Field.t()) :: String.t()
def to_player_string(%Field{tiles: tiles, size: size}) do
for row <- 0..(size - 1), into: "" do
row_str =
for col <- 0..(size - 1), tile = tiles[{row, col}], into: "" do
case tile.state do
:mine -> "X"
:mark -> "M"
:false_mark -> "F"
:revealed -> Kernel.to_string(tile.mines_close)
:hidden -> "_"
end
end
if row == size - 1 do
row_str
else
row_str <> "\n"
end
end
end
@spec initialize_mines(
field :: Field.t(),
restricted_positions :: [position()],
mines_generated :: non_neg_integer()
) :: Field.t()
defp initialize_mines(field, restricted_positions, mines_generated \\ 0)
defp initialize_mines(%Field{mines: mines} = field, _, mines), do: field
defp initialize_mines(
%Field{} = field,
restricted_positions,
mines_generated
) do
mine_position = generate_mine_position(field, restricted_positions)
field = add_mine(field, mine_position)
initialize_mines(field, restricted_positions, mines_generated + 1)
end
@spec generate_mine_position(field :: Field.t(), restricted_positions :: [position()]) ::
position
defp generate_mine_position(%Field{tiles: tiles, size: size} = field, restricted_positions) do
pos = {:rand.uniform(size) - 1, :rand.uniform(size) - 1}
if pos in restricted_positions or tiles[pos].mine? do
generate_mine_position(field, restricted_positions)
else
pos
end
end
@spec add_mine(field :: Field.t(), pos :: position()) :: Field.t()
defp add_mine(%Field{} = field, pos) do
field = update_in(field.tiles[pos], &Tile.change_to_mine/1)
field
|> get_surrounding_positions(pos, false)
|> Enum.reduce(
field,
fn sur_pos, field ->
update_in(field.tiles[sur_pos], &Tile.increment_mines_close/1)
end
)
end
# Reveals a tile which is not a mine. If the tile has zero mines close, also
# reveal all surrounding tiles.
@spec reveal_tile(
field :: Field.t(),
pos :: position(),
changes_so_far :: tiles()
) :: {Field.t(), tiles()}
defp reveal_tile(%Field{} = field, pos, changes_so_far \\ %{}) do
if field.tiles[pos].state not in [:hidden, :mark] do
{field, changes_so_far}
else
field = update_in(field.tiles[pos], &Tile.set_state(&1, :revealed))
changes_so_far = Map.put(changes_so_far, pos, field.tiles[pos])
if field.tiles[pos].mines_close == 0 do
field
|> get_surrounding_positions(pos, false)
|> Enum.reduce({field, changes_so_far}, fn sur_pos, {field, changes_so_far} ->
reveal_tile(field, sur_pos, changes_so_far)
end)
else
{field, changes_so_far}
end
end
end
# Reveals mines and identifies false marks.
@spec reveal_mines(field :: Field.t(), mode :: :won | :lost) :: {Field.t(), tiles()}
defp reveal_mines(%Field{mines_left: 0} = field, :won), do: {field, %{}}
defp reveal_mines(%Field{tiles: tiles} = field, mode) do
hidden_substitution = if mode == :won, do: :mark, else: :mine
Enum.reduce(Map.keys(tiles), {field, %{}}, fn pos, {field, changes_so_far} ->
cond do
field.tiles[pos].mine? and field.tiles[pos].state == :hidden ->
field = update_in(field.tiles[pos], &Tile.set_state(&1, hidden_substitution))
{field, Map.put(changes_so_far, pos, field.tiles[pos])}
field.tiles[pos].state == :mark and not field.tiles[pos].mine? ->
field = update_in(field.tiles[pos], &Tile.set_state(&1, :false_mark))
{field, Map.put(changes_so_far, pos, field.tiles[pos])}
true ->
{field, changes_so_far}
end
end)
end
@spec won?(field :: Field.t()) :: boolean()
defp won?(%Field{tiles: tiles}) do
Enum.all?(tiles, fn {_, tile} ->
tile.state == :revealed or tile.mine?
end)
end
end
defimpl String.Chars, for: CoopMinesweeper.Game.Field do
alias CoopMinesweeper.Game.Field
def to_string(%Field{} = field) do
Field.to_string(field)
end
end
|
lib/coop_minesweeper/game/field.ex
| 0.868924 | 0.680069 |
field.ex
|
starcoder
|
defmodule Drab.Live.Assign do
@moduledoc false
@spec trim(term) :: term
@doc """
Reduces size of the assigns by shrinking @conn to include only the essential information
(by default it is .private.phoenix_endpoint only).
"""
def trim(%Plug.Conn{} = conn) do
filter = Drab.Config.get(Phoenix.Controller.endpoint_module(conn), :live_conn_pass_through)
trim(conn, filter)
end
def trim(other), do: other
def trim(struct, filter) do
filtered = filter(struct, filter)
merge(struct(struct.__struct__), filtered)
end
def filter(%Plug.Conn{} = conn) do
filter = Drab.Config.get(Phoenix.Controller.endpoint_module(conn), :live_conn_pass_through)
filter(conn, filter)
end
def filter(other), do: other
def filter(struct, filter) do
deep_filter_map(struct, filter)
end
def merge(%Plug.Conn{} = conn, map) do
merged = deep_merge_map(conn, map)
struct(%Plug.Conn{}, merged)
end
def merge(other), do: other
# all hails to @OvermindDL1 for this idea and the following functions
defp deep_filter_map(%_{} = struct, map_filter) do
deep_filter_map(Map.from_struct(struct), map_filter)
end
defp deep_filter_map(original, map_filter) do
original
|> Enum.flat_map(fn {key, value} = set ->
case map_filter[key] do
true ->
[set]
%{} = map_filter when is_map(value) ->
value = deep_filter_map(value, map_filter)
if map_size(value) === 0, do: [], else: [{key, value}]
_ ->
[]
end
end)
|> Enum.into(%{})
end
defp deep_merge_map(%_{} = struct, to_merge) do
deep_merge_map(Map.from_struct(struct), to_merge)
end
defp deep_merge_map(base, to_merge) do
Enum.reduce(to_merge, base, fn
{key, %_{} = value}, base ->
Map.put(base, key, value)
{key, %{} = value}, base ->
sub = base[key] || %{}
sub = if is_map(sub), do: deep_merge_map(sub, value), else: sub
Map.put(base, key, sub)
{key, value}, base ->
Map.put(base, key, value)
end)
end
end
|
lib/drab/live/assign.ex
| 0.707708 | 0.403773 |
assign.ex
|
starcoder
|
defmodule Authority.Ecto.Template do
@moduledoc """
Automatically implements `Authority` behaviours into modules of your
choice, minimizing the amount of code that you have to write. All callbacks
remain overridable, however.
## Definition
`Authority` expects you to define a module in your application to hold all the
`Authority`-related functions. This module could be called `Accounts`, for
example.
defmodule MyApp.Accounts do
use Authority.Ecto.Template,
behaviours: [...], # A list of Authority behaviours
config: [...] # A keyword list of configuration options
end
You could also define multiple modules, each of which only implement _some_
`Authority` behaviours, depending on your preferences.
## Behaviours
### `Authority.Authentication`
_Provides basic email/password (or username/password) authentication._
defmodule MyApp.Accounts do
use Authority.Ecto.Template,
behaviours: [Authority.Authentication],
config: [
repo: MyApp.Repo,
user_schema: MyApp.Accounts.Schema
]
end
- `:user_schema`: (required) the `Ecto.Schema` that represents a user in
your app.
- `:user_identity_field`: (optional) the identification field on
`:user_schema`'s schema (Default: `:email`)
- `:user_password_field`: (optional) the password field `:user_schema`'s
schema (Default: `:encrypted_password`)
- `:user_password_algorithm`: (optional) the password hashing algorithm
(Default: `:bcrypt`)
### `Authority.Locking`
_Provides automatic account locking after a configurable number of
attempts. Must be used with `Authority.Authentication`_.
defmodule MyApp.Accounts do
use Authority.Ecto.Template,
behaviours: [
Authority.Authentication,
Authority.Locking
],
config: [
repo: MyApp.Repo,
user_schema: MyApp.Accounts.User,
lock_schema: MyApp.Accounts.Lock,
lock_attempt_schema: MyApp.Accounts.LoginAttempt
]
end
- `:lock_schema`: (required) the `Ecto.Schema` which represents a lock
- `:lock_attempt_schema`: (required) the `Ecto.Schema` which represents a
failed attempt to log in.
- `:lock_expiration_field`: (optional) the expiration field on the
`:lock_schema` schema (Default: `:expires_at`)
- `:lock_user_assoc`: (optional) the association on `:lock_schema` which
relates the lock to a user. (Default: `:user`)
- `:lock_reason_field`: (optional) the field on `:lock_schema`'s schema
which stores the reason for the lock. (Default: `:reason`)
- `:lock_max_attempts`: (optional) the number of failed attempts that will
create a lock. (Default: `5`)
- `:lock_interval_seconds`: (optional) the interval in which attempts are
counted. For example, '5 failures in 10 minutes'. (Default: `6000`, 10
minutes)
- `:lock_duration_seconds`: (optional) the duration that a user account
will be locked. (Default: `6000`, 10 minutes)
### `Authority.Recovery`
_Provides account recovery. Requires `Authority.Tokenization`._
defmodule MyApp.Accounts do
use Authority.Ecto.Template,
behaviours: [
Authority.Authentication,
Authority.Recovery,
Authority.Tokenization
],
config: [
repo: MyApp.Repo,
user_schema: MyApp.Accounts.User,
token_schema: MyApp.Accounts.Token,
recovery_callback: {MyApp.Notifications, :forgot_password}
]
end
defmodule MyApp.Notifications do
def forgot_password(email, token) do
# Send the forgot password email
end
end
- `:recovery_callback`: an atom function name or module/function tuple to
be called after generating a recovery token. This function is actually
responsible to send the "forgot password" email to the user.
### `Authority.Registration`
_Provides user registration and updating._
defmodule MyApp.Accounts do
use Authority.Ecto.Template,
behaviours: [
Authority.Registration
],
config: [
repo: MyApp.Repo,
user_schema: MyApp.Accounts.User
]
end
- `:user_schema`: (required) the `Ecto.Schema` which represents a user.
### `Authority.Tokenization`
_Provides tokenization for credentials. Must be used with
`Authority.Authentication`_.
defmodule MyApp.Accounts do
use Authority.Ecto.Template,
behaviours: [
Authority.Authentication,
Authority.Tokenization
],
config: [
repo: MyApp.Repo,
user_schema: MyApp.Accounts.User,
token_schema: MyApp.Accounts.Token
]
end
- `:token_schema`: (required) the `Ecto.Schema` which represents a token.
- `:token_field`: (optional) the field on `:token_schema` which stores the
token value. (Default: `:token`)
- `:token_user_assoc`: (optional) the association on `:token_schema` which
relates a token to a user. (Default: `:user`)
- `:token_expiration_field`: (optional) the field on `:token_schema` which
stores the expiration date of the token. (Default: `:expires_at`)
- `:token_purpose_field`: (optional) the field on `:token_schema` which
stores the purpose of the token. (Default: `:purpose`)
## Sample Schemas
You should use the following `Ecto.Schema`s as guides for how to design your
authentication-related schemas.
### User
defmodule MyApp.Accounts.User do
use Ecto.Schema
import Ecto.Changeset
import Authority.Ecto.Changeset
@type t :: %__MODULE__{}
schema "users" do
field :email, :string
field :encrypted_password, :string
field :password, :string, virtual: true
field :password_confirmation, :string, virtual: true
timestamps(type: :utc_datetime)
end
def changeset(struct, params \\ %{}) do
struct
|> cast(params, [:email, :password, :password_confirmation])
|> validate_required([:email, :password])
|> validate_secure_password(:password)
|> put_encrypted_password(:password, :<PASSWORD>)
end
end
### Token
An additional dependency on [ex_numerator](https://hex.pm/ex_numerator) can be helpful.
defmodule MyApp.Accounts.Token do
use Ecto.Schema
import Ecto.Changeset
import Authority.Ecto.Changeset
@type t :: %__MODULE__{}
defmodule Purpose do
use Exnumerator, values: [:any, :recovery]
end
defmodule HMAC do
use Authority.Ecto.HMAC, secret: "authority"
end
schema "tokens" do
belongs_to :user, MyApp.Accounts.User
field :token, HMAC
field :expires_at, :utc_datetime
field :purpose, Purpose
timestamps(type: :utc_datetime)
end
def changeset(struct, params \\ %{}) do
struct
|> cast(params, [:expires_at, :purpose])
|> put_token(:token)
|> put_token_expiration(:expires_at, :purpose, recovery: {24, :hours}, any: {14, :days})
end
end
### Lock
An additional dependency on [ex_numerator](https://hex.pm/ex_numerator) can be helpful.
defmodule Authority.Ecto.Test.Lock do
use Ecto.Schema
import Ecto.Changeset
@type t :: %__MODULE__{}
defmodule Reason do
use Exnumerator, values: [:too_many_attempts]
end
schema "locks" do
belongs_to :user, MyApp.Accounts.User
field :reason, Reason
field :expires_at, :utc_datetime
timestamps(type: :utc_datetime)
end
def changeset(struct, params \\ %{}) do
struct
|> cast(params, [:reason, :expires_at])
end
end
## Using Your Module
Once you've configured your module, you can call `Authority` behaviour
functions, depending on the behaviours your chose.
alias MyApp.Accounts
Accounts.create_user(%{
email: "<EMAIL>",
password: "password",
password_confirmation: "password"
})
# => {:ok, %MyApp.Accounts.User{}}
Accounts.authenticate({"<EMAIL>", "password"})
# => {:ok, %MyApp.Accounts.User{}}
Accounts.authenticate(%MyApp.Accounts.Token{token: "<PASSWORD>"})
# => {:ok, %MyApp.Accounts.User{}}
Accounts.tokenize({"<EMAIL>", "password"})
# => {:ok, %MyApp.Accounts.Token{}}
# After too many failed attempts to log in:
Accounts.authenticate({"<EMAIL>", "invalid"})
# => {:error, %MyApp.Accounts.Lock{reason: :too_many_attempts}}
Accounts.tokenize({"<EMAIL>", "invalid"})
# => {:error, %MyApp.Accounts.Lock{reason: :too_many_attempts}}
# Send a password reset email
Accounts.recover("<EMAIL>")
## Overriding Callbacks
You can override any callback function to add support for new data types.
For example, you can override `identify` to provide support for custom
types.
defmodule MyApp.Accounts do
use Authority.Ecto.Template,
behaviours: [Authority.Authentication],
config: [repo: MyApp.Repo, user_schema: MyApp.Accounts.User]
def identify(%MyApp.CustomStruct{} = struct) do
# find user
end
# Use `super` to fall back to the identify/1 function
# provided by the template.
def identify(other), do: super(other)
end
"""
alias Authority.{
Authentication,
Locking,
Recovery,
Registration,
Tokenization,
Ecto.Template
}
@templates %{
Authentication => Template.Authentication,
Locking => Template.Locking,
Recovery => Template.Recovery,
Registration => Template.Registration,
Tokenization => Template.Tokenization
}
defmodule Error do
defexception [:message]
end
defmacro __using__(config) do
{config, _} = Code.eval_quoted(config, [], __CALLER__)
unless config[:behaviours] do
raise Error, "You must specify :behaviours"
end
unless config[:config] do
raise Error, "You must specify :config"
end
for behaviour <- config[:behaviours] do
unless @templates[behaviour] do
raise Error, "No template found for behaviour #{inspect(behaviour)}"
end
quote location: :keep do
use unquote(@templates[behaviour]), unquote(config[:config])
end
end
end
@doc false
def implements?(module, Authority.Authentication) do
Module.defines?(module, {:authenticate, 2})
end
def implements?(module, Authority.Tokenization) do
Module.defines?(module, {:tokenize, 2})
end
def implements?(module, Authority.Locking) do
Module.defines?(module, {:lock, 2})
end
def implements?(_module, _behaviour), do: false
end
|
lib/authority/ecto/template.ex
| 0.881723 | 0.468669 |
template.ex
|
starcoder
|
defmodule GameOfLife do
@moduledoc """
Handles the logic for Conway's Game of Life.
Each cell is tracked as a list that represents its coordinate
E.g. [1, 2] = Column 1, Row 2
On each iteration of the game, we gather a list of neighbours
for all active cells. Then we apply the game of logic of each
of them.
The state of the next iteration is simply a new list of
active cells.
"""
@doc """
Transform current board state to the next iteration.
Returns a new list of active cells
"""
def tick(state, board_size) do
state
|> list_of_affected_cells_in_this_iteration(board_size)
|> apply_game_logic(state, board_size)
end
@doc """
Returns a list of cells that will be affected in this iteration.
This is basically the neighbours of all currently active cells.
"""
def list_of_affected_cells_in_this_iteration(state, board_size) do
state
|> Enum.flat_map(fn (cell) -> list_of_neighbours(cell, board_size) end)
|> Enum.uniq
end
@doc """
Returns a list of neighbour for a cell, including itself
"""
def list_of_neighbours([x,y], board_size) do
[ [x-1, y+1], [x, y+1], [x+1, y+1],
[x-1, y], [x, y], [x+1, y],
[x-1, y-1], [x, y-1], [x+1, y-1]
]
|> remove_out_of_bound_cells!(board_size)
end
@doc """
Remove cells that is beyond the current board size
"""
def remove_out_of_bound_cells!(cells, [cols, rows]) do
cells
|> Enum.filter(fn([x,y]) -> x > 0 && y > 0 && x <= cols && y <= rows end )
end
@doc """
Apply Game of Life Logic
"""
def apply_game_logic(affected_cells, state, board_size) do
affected_cells
|> Enum.filter(fn (cell) -> is_cell_populated?(cell, state, board_size) end)
end
@doc """
Determines if a cell should be populated or not
- If a cell is currently populated and have 2-3 active neighbours, it stays alive
- If a cell is not currently populated but have 3 active neighbours, it will be populated
- In all other cases, the cell dies
"""
def is_cell_populated?(cell, state, board_size) do
active_neighbours = number_of_active_neighbours(cell, state, board_size)
cond do
Enum.member?(state, cell) && active_neighbours in 2..3 ->
true
!Enum.member?(state, cell) && active_neighbours == 3 ->
true
true ->
false
end
end
@doc """
Calculates the number of active neighbours for a cell
"""
def number_of_active_neighbours(cell, state, board_size) do
list_of_neighbours(cell, board_size)
|> Enum.filter(fn (c) -> c != cell && Enum.member?(state, c) end)
|> length
end
end
|
apps/game_of_life/lib/game_of_life.ex
| 0.885724 | 0.83508 |
game_of_life.ex
|
starcoder
|
defmodule Fatura do
@moduledoc """
Este módulos executamos funções de faturas
"""
@doc """
Ao receber `fatura` retorna uma array de faturas
## Exemplos
iex> Fatura.criar_faturas(["Telefone", "Agua", "Luz"], [5,10])
[
%Fatura.Conta{fatura: "Telefone", vencimento: 5},
%Fatura.Conta{fatura: "Agua", vencimento: 5},
%Fatura.Conta{fatura: "Luz", vencimento: 5},
%Fatura.Conta{fatura: "Telefone", vencimento: 10},
%Fatura.Conta{fatura: "Agua", vencimento: 10},
%Fatura.Conta{fatura: "Luz", vencimento: 10}
]
"""
def criar_faturas(faturas, vencimentos) do
for vencimento <- vencimentos, fatura <- faturas do
%Fatura.Conta{vencimento: vencimento, fatura: fatura}
end
end
def faturas_a_pagar(faturas, quantidade) do
Enum.split(faturas,quantidade)
end
@doc """
Ao receber `fatura` retorna uma array de faturas
## Exemplos
iex> faturas = Fatura.criar_faturas(["Telefone", "Agua", "Luz"], [5,10])
iex> Fatura.save(faturas, "struct")
:ok
"""
def save(faturas, nome_arquivo) do
binary = :erlang.term_to_binary(faturas)
File.write(nome_arquivo, binary)
end
@doc """
Ao receber `fatura` retorna uma array de faturas
## Exemplos
iex> Fatura.load("struct")
[
%Fatura.Conta{fatura: "Telefone", vencimento: 5},
%Fatura.Conta{fatura: "Agua", vencimento: 5},
%Fatura.Conta{fatura: "Luz", vencimento: 5},
%Fatura.Conta{fatura: "Telefone", vencimento: 10},
%Fatura.Conta{fatura: "Agua", vencimento: 10},
%Fatura.Conta{fatura: "Luz", vencimento: 10}
]
"""
def load(nome_arquivo) do
case File.read(nome_arquivo) do
{:ok, binario} -> :erlang.binary_to_term(binario)
{:error, _} -> "Não foi possível carregar o nosso arquivo!!!"
end
end
@doc """
Ao receber dados para gerar fatura, deve ordenar e salvar em um arquivo
iex> Fatura.pagar_faturas(["Telefone", "Agua", "Luz"], [5,10], 1, "salvado")
:ok
"""
def pagar_faturas(faturas, vencimento, quantidade, nome_arquivo) do
criar_faturas(faturas, vencimento)
|> ordena_faturas()
|> faturas_a_pagar(quantidade)
|> save(nome_arquivo)
end
@doc """
Ao receber uma `fatura` retorna um array de faturas ordenado
## Exemplos
iex> faturas = Fatura.criar_faturas(["Telefone","Agua","Luz"], [5,10])
iex> Fatura.ordena_faturas(faturas)
[
%Fatura.Conta{fatura: "Agua", vencimento: 5},
%Fatura.Conta{fatura: "Agua", vencimento: 10},
%Fatura.Conta{fatura: "Luz", vencimento: 5},
%Fatura.Conta{fatura: "Luz", vencimento: 10},
%Fatura.Conta{fatura: "Telefone", vencimento: 5},
%Fatura.Conta{fatura: "Telefone", vencimento: 10},
]
"""
def ordena_faturas(faturas) do
Enum.sort(faturas)
end
@doc """
Ao receber `fatura` retorna uma array de faturas
## Exemplos
iex> faturas = Fatura.criar_faturas(["Telefone","Agua","Luz"], [5,10])
iex(4)> Fatura.fatura_existe?(faturas, %Fatura.Conta{fatura: "Telefone", vencimento: 5})
true
"""
def fatura_existe?(faturas, fatura) do
Enum.member?(faturas, fatura)
end
end
|
lib/fatura.ex
| 0.61855 | 0.556098 |
fatura.ex
|
starcoder
|
defmodule Multipart.Part do
@moduledoc """
Represents an individual part of a `Multipart` message.
"""
defstruct headers: [], body: nil, content_length: nil
@type body :: binary() | Enum.t()
@type t :: %__MODULE__{
headers: [],
body: body,
content_length: pos_integer() | nil
}
@type headers :: [{binary, binary}]
@type name :: String.t() | atom()
@doc """
Builds a `Part` with a binary body.
Set the `content_length` of the `Part` to the length of the binary.
"""
@spec binary_body(binary(), headers()) :: t()
def binary_body(body, headers \\ []) when is_binary(body) do
content_length = String.length(body)
%__MODULE__{body: body, content_length: content_length, headers: headers}
end
@doc """
Builds a `Part` with a streaming file body.
Set the `content_length` of the `Part` to the size of the file on disk, as
inspected with `File.stat`.
"""
@spec file_body(String.t(), headers()) :: t()
def file_body(path, headers \\ []) do
%File.Stat{size: size} = File.stat!(path)
file_stream = File.stream!(path, [{:read_ahead, 4096}], 1024)
%__MODULE__{body: file_stream, content_length: size, headers: headers}
end
@doc """
Builds a `Part` with a `Stream` body.
Because the length of the `Stream` cannot be known up front it doesn't
define the `content_length`. This will cause `Multipart.content_length/1`
to error unless you set the `content_length` manually in the struct.
"""
@spec stream_body(Enum.t(), headers()) :: t()
def stream_body(stream, headers \\ []) do
%__MODULE__{body: stream, headers: headers}
end
@doc """
Builds a form-data `Part` with a text body.
"""
@spec text_field(binary(), name(), headers()) :: t()
def text_field(body, name, headers \\ []) do
headers = headers ++ [{"content-disposition", content_disposition("form-data", name: name)}]
binary_body(body, headers)
end
@doc """
Builds a form-data `Part` with a streaming file body.
Takes the following `Keyword` options in `opts`:
* `filename`: controls the inclusion of the `filename="foo"` directive in the
`content-disposition` header. Defaults to `true`, which uses the filename
from the path on disk. Pass in a `String` to override this, or set to
`false` to disable this directive.
* `content_type`: controls the inclusion of the `content-type` header.
Defaults to `true` which will use `MIME.from_path/1` to detect the mime
type of the file. Pass in a `String` to override this, or set to `false`
to disable this header.
"""
@spec file_field(String.t(), name(), headers(), Keyword.t()) :: t()
def file_field(path, name, headers \\ [], opts \\ []) do
filename = Keyword.get(opts, :filename, true)
content_type = Keyword.get(opts, :content_type, true)
headers =
headers
|> maybe_add_content_type_header(content_type, path)
|> add_content_disposition_header(name, filename, path)
file_body(path, headers)
end
@doc """
Builds a form-data `Part` with a streaming body.
"""
@spec stream_field(Enum.t(), name(), headers()) :: t()
def stream_field(stream, name, headers \\ []) do
headers = headers |> add_content_disposition_header(name)
stream_body(stream, headers)
end
defp content_disposition(type, directives) do
directives
|> Enum.map(fn {k, v} ->
"#{k}=\"#{v}\""
end)
|> List.insert_at(0, type)
|> Enum.join("; ")
end
def add_content_disposition_header(headers, name) do
header = {"content-disposition", content_disposition("form-data", name: name)}
headers
|> Enum.concat(header)
end
def add_content_disposition_header(headers, name, filename, path) do
content_disposition_opts = [name: name] |> maybe_add_filename_directive(filename, path)
header = {"content-disposition", content_disposition("form-data", content_disposition_opts)}
headers
|> Enum.concat([header])
end
defp maybe_add_content_type_header(headers, true, path) do
content_type = MIME.from_path(path)
headers
|> Enum.concat([{"content-type", content_type}])
end
defp maybe_add_content_type_header(headers, content_type, _path) when is_binary(content_type) do
headers
|> Enum.concat([{"content-type", content_type}])
end
defp maybe_add_content_type_header(headers, false, _path) do
headers
end
defp maybe_add_filename_directive(directives, true, path) do
directives ++ [filename: Path.basename(path)]
end
defp maybe_add_filename_directive(directives, filename, _path) when is_binary(filename) do
directives ++ [filename: filename]
end
defp maybe_add_filename_directive(directives, false, _path) do
directives
end
end
|
lib/multipart/part.ex
| 0.83752 | 0.489992 |
part.ex
|
starcoder
|
defmodule Mix.Tasks.Hex.Repo do
use Mix.Task
@shortdoc "Manages Hex repositories"
@moduledoc """
Manages the list of available Hex repositories.
The repository is where packages and the registry of packages is stored.
You can fetch packages from multiple different repositories and packages
can depend on packages from other repositories. To use a package from another
repository than the global default `hexpm` add `repo: "my_repo"` to the
dependency declaration in `mix.exs`:
{:plug, "~> 1.0", repo: "my_repo"}
By default all dependencies of plug will also be fetched from `my_repo`
unless plug has declared otherwise in its dependency definition.
To use packages from `my_repo` you need to add it to your configuration
first. You do that by calling `mix hex.repo add my_repo https://myrepo.example.com`.
The default repo is called `hexpm` and points to https://repo.hex.pm. This
can be overridden by using `mix hex.repo set ...`.
A repository configured from an organization will have `hexpm:` prefixed to
its name. To depend on packages from an organization add `repo: "hexpm:my_organization"`
to the dependency declaration or simply `organization: "my_organization"`.
To configure organizations, see the `hex.organization` task.
## Add a repo
mix hex.repo add NAME URL
### Command line options
* `--public-key PATH` - Path to public key used to verify the registry (optional).
* `--auth-key KEY` - Key used to authenticate HTTP requests to repository (optional).
* `--fetch-public-key FINGERPRINT` - Download public key from the repository and verify against the fingerprint (optional).
## Set config for repo
mix hex.repo set NAME --url URL
mix hex.repo set NAME --public-key PATH
mix hex.repo set NAME --auth-key KEY
## Remove repo
mix hex.repo remove NAME
## Show repo config
mix hex.repo show NAME
mix hex.repo show NAME --url
## List all repos
mix hex.repo list
"""
@behaviour Hex.Mix.TaskDescription
@add_switches [public_key: :string, auth_key: :string, fetch_public_key: :string]
@set_switches [url: :string, public_key: :string, auth_key: :string]
@show_switches [url: :boolean, public_key: :boolean, auth_key: :boolean]
@impl true
def run(all_args) do
Hex.start()
{_opts, args} = Hex.OptionParser.parse!(all_args, switches: [])
case args do
["add", name, url] ->
{opts, _args} = Hex.OptionParser.parse!(all_args, strict: @add_switches)
add(name, url, opts)
["set", name] ->
{opts, _args} = Hex.OptionParser.parse!(all_args, strict: @set_switches)
set(name, opts)
["remove", name] ->
remove(name)
["show", name] ->
{opts, _args} = Hex.OptionParser.parse!(all_args, strict: @show_switches)
show(name, opts)
["list"] ->
list()
_ ->
invalid_args()
end
end
defp invalid_args() do
Mix.raise("""
Invalid arguments, expected one of:
mix hex.repo add NAME URL
mix hex.repo set NAME
mix hex.repo remove NAME
mix hex.repo show NAME
mix hex.repo list
""")
end
@impl true
def tasks() do
[
{"add NAME URL", "Add a repo"},
{"set NAME", "Set config for repo"},
{"remove NAME", "Remove repo"},
{"show NAME", "Show repo config"},
{"list", "List all repos"}
]
end
defp add(name, url, opts) do
public_key =
read_public_key(opts[:public_key]) ||
fetch_public_key(opts[:fetch_public_key], url, opts[:auth_key])
repo =
%{
url: url,
public_key: nil,
fetch_public_key: nil,
auth_key: nil
}
|> Map.merge(Enum.into(opts, %{}))
|> Map.put(:public_key, public_key)
Hex.State.fetch!(:repos)
|> Map.put(name, repo)
|> Hex.Config.update_repos()
end
defp set(name, opts) do
opts =
if public_key = opts[:public_key] do
Keyword.put(opts, :public_key, read_public_key(public_key))
else
opts
end
Hex.State.fetch!(:repos)
|> Map.update!(name, &Map.merge(&1, Enum.into(opts, %{})))
|> Hex.Config.update_repos()
end
defp remove(name) do
Hex.State.fetch!(:repos)
|> Map.delete(name)
|> Hex.Config.update_repos()
end
defp list() do
header = ["Name", "URL", "Public key", "Auth key"]
values =
Enum.map(Hex.State.fetch!(:repos), fn {name, config} ->
[
name,
config[:url],
show_public_key(config[:public_key]),
config[:auth_key]
]
end)
Mix.Tasks.Hex.print_table(header, values)
end
defp read_public_key(nil) do
nil
end
defp read_public_key(path) do
key =
path
|> Path.expand()
|> File.read!()
decode_public_key(key)
key
end
defp decode_public_key(key) do
[pem_entry] = :public_key.pem_decode(key)
:public_key.pem_entry_decode(pem_entry)
rescue
_ ->
Mix.raise("""
Could not decode public key. The public key contents are shown below.
#{key}
Public keys must be valid and be in the PEM format.
""")
end
defp show_public_key(nil), do: nil
defp show_public_key(public_key) do
[pem_entry] = :public_key.pem_decode(public_key)
public_key = :public_key.pem_entry_decode(pem_entry)
ssh_hostkey_fingerprint(public_key)
end
defp fetch_public_key(nil, _, _), do: nil
defp fetch_public_key(fingerprint, repo_url, auth_key) do
case Hex.Repo.get_public_key(repo_url, auth_key) do
{:ok, {200, key, _}} ->
if show_public_key(key) == fingerprint do
key
else
Mix.raise("Public key fingerprint mismatch")
end
{:ok, {code, _, _}} ->
Hex.Shell.error("Downloading public key failed with code \"#{inspect(code)}\"")
Mix.Tasks.Hex.set_exit_code(1)
other ->
Hex.Shell.error("Downloading public key failed")
Hex.Utils.print_error_result(other)
Mix.Tasks.Hex.set_exit_code(1)
end
end
# Adapted from https://github.com/erlang/otp/blob/3eddb0f762de248d3230b38bc9d478bfbc8e7331/lib/public_key/src/public_key.erl#L824
defp ssh_hostkey_fingerprint(key) do
"SHA256:#{sshfp_string(key)}"
end
defp sshfp_string(key) do
:crypto.hash(:sha256, Hex.Stdlib.ssh2_pubkey_encode(key))
|> Hex.Stdlib.base_encode64_nopadding()
end
defp show(name, [{key, _} | _]) do
case Map.fetch(Hex.State.fetch!(:repos), name) do
{:ok, config} ->
Hex.Shell.info(Map.get(config, key, ""))
:error ->
Mix.raise("Config does not contain repo #{name}")
end
end
defp show(name, []) do
case Map.fetch(Hex.State.fetch!(:repos), name) do
{:ok, repo} ->
header = ["URL", "Public key", "Auth key"]
rows = [[repo.url, show_public_key(repo.public_key), repo.auth_key]]
Mix.Tasks.Hex.print_table(header, rows)
:error ->
Mix.raise("Config does not contain repo #{name}")
end
end
end
|
lib/mix/tasks/hex.repo.ex
| 0.855565 | 0.454109 |
hex.repo.ex
|
starcoder
|
defmodule Solid.Context do
defstruct vars: %{}, counter_vars: %{}, iteration_vars: %{}, cycle_state: %{}, trim_next: false
@type t :: %__MODULE__{
vars: Map.t(),
counter_vars: Map.t(),
iteration_vars: %{optional(String.t()) => term},
cycle_state: Map.t(),
trim_next: boolean
}
@type scope :: :counter_vars | :vars | :iteration_vars
@doc """
Get data from context respecting the scope order provided.
Possible scope values: :counter_vars, :vars or :iteration_vars
"""
@spec get_in(t(), [term()], [scope]) :: term
def get_in(context, key, scopes) do
{:ok, result} =
scopes
|> Enum.map(&get_from_scope(context, &1, key))
|> Enum.find({:ok, nil}, fn
{:ok, _} -> true
_ -> false
end)
result
end
@doc """
Find the current value that `cycle` must return
"""
@spec run_cycle(t(), [values: [String.t()]] | [name: String.t(), values: [String.t()]]) ::
{t(), String.t()}
def run_cycle(%__MODULE__{cycle_state: cycle_state} = context, cycle) do
name = Keyword.get(cycle, :name, cycle[:values])
case cycle_state[name] do
{current_index, cycle_map} ->
limit = map_size(cycle_map)
next_index = if current_index + 1 < limit, do: current_index + 1, else: 0
{%{context | cycle_state: %{context.cycle_state | name => {next_index, cycle_map}}},
cycle_map[next_index]}
nil ->
values = Keyword.fetch!(cycle, :values)
cycle_map = cycle_to_map(values)
current_index = 0
{%{context | cycle_state: Map.put_new(cycle_state, name, {current_index, cycle_map})},
cycle_map[current_index]}
end
end
defp cycle_to_map(cycle) do
cycle
|> Enum.with_index()
|> Enum.into(%{}, fn {value, index} -> {index, value} end)
end
defp get_from_scope(context, :vars, key) do
do_get_in(context.vars, key)
end
defp get_from_scope(context, :counter_vars, key) do
do_get_in(context.counter_vars, key)
end
defp get_from_scope(context, :iteration_vars, key) do
do_get_in(context.iteration_vars, key)
end
defp do_get_in(nil, _), do: {:error, :not_found}
defp do_get_in(data, []), do: {:ok, data}
defp do_get_in(data, ["size"]) when is_list(data) do
{:ok, Enum.count(data)}
end
defp do_get_in(data, ["size"]) when is_map(data) do
{:ok, Map.get(data, "size", Enum.count(data))}
end
defp do_get_in(data, ["size"]) when is_binary(data) do
{:ok, String.length(data)}
end
defp do_get_in(data, [key | keys]) when is_map(data) do
do_get_in(data[key], keys)
end
defp do_get_in(data, [key | keys]) when is_integer(key) and is_list(data) do
do_get_in(Enum.at(data, key), keys)
end
defp do_get_in(_, _), do: {:error, :not_found}
end
|
lib/solid/context.ex
| 0.781414 | 0.465205 |
context.ex
|
starcoder
|
if Code.ensure_loaded?(Phoenix) do
defmodule Guardian.Phoenix.Socket do
@moduledoc """
Provides functions for managing authentication with sockets.
This module mostly provides convenience functions for storing tokens, claims and resources
on the socket assigns.
The main functions you'll be interested in are:
* `Guardian.Phoenix.Socket.authenticated?` - check if the socket has been authenticated
* `Guardian.Phoenix.Socket.authenticate` - Sign in a resource to a socket. Similar to `Guardian.Plug.authenticate`
### Getters
Once you're authenticated with your socket, you can use the getters
to fetch information about the authenticated resource for the socket.
* `Guardian.Phoenix.Socket.current_claims`
* `Guardian.Phoenix.Socket.current_token`
* `Guardian.Phoenix.Socket.current_resource`
These are the usual functions you'll want to use when dealing with authentication on sockets.
There is a bit of a difference between the usual `Guardian.Plug.sign_in` and the socket one.
The socket authenticate receives a token and signs in from that.
Please note that this is mere sugar on the underlying Guardian functions.
As an example:
```elixir
defmodule MyApp.UserSocket do
use Phoenix.Socket
def connect(%{"token" => token}, socket) do
case Guardian.Phoenix.Socket.authenticate(socket, MyApp.Guardian, token) do
{:ok, authed_socket} ->
{:ok, authed_socket}
{:error, _} -> :error
end
end
# This function will be called when there was no authentication information
def connect(_params, socket) do
:error
end
end
```
If you want to authenticate on the join of a channel, you can import this
module and use the authenticate function as normal.
"""
import Guardian.Plug.Keys
alias Phoenix.Socket
@doc """
Puts the current token onto the socket for later use.
Get the token from the socket with `current_token`
"""
@spec put_current_token(
socket :: Socket.t(),
token :: Guardian.Token.token() | nil,
key :: atom | String.t() | nil
) :: Socket.t()
def put_current_token(socket, token, key \\ :default) do
Socket.assign(socket, token_key(key), token)
end
@doc """
Put the current claims onto the socket for later use.
Get the claims from the socket with `current_claims`
"""
@spec put_current_claims(
socket :: Socket.t(),
new_claims :: Guardian.Token.claims() | nil,
atom | String.t() | nil
) :: Socket.t()
def put_current_claims(socket, new_claims, key \\ :default) do
Socket.assign(socket, claims_key(key), new_claims)
end
@doc """
Put the current resource onto the socket for later use.
Get the resource from the socket with `current_resource`
"""
@spec put_current_resource(
socket :: Socket.t(),
resource :: Guardian.Token.resource() | nil,
key :: atom | String.t() | nil
) :: Socket.t()
def put_current_resource(socket, resource, key \\ :default) do
Socket.assign(socket, resource_key(key), resource)
end
@doc """
Fetches the `claims` map that was encoded into the token from the socket.
"""
@spec current_claims(Socket.t(), atom | String.t()) :: Guardian.Token.claims() | nil
def current_claims(socket, key \\ :default) do
key = claims_key(key)
socket.assigns[key]
end
@doc """
Fetches the token that was provided for the initial authentication.
This is provided as an encoded string and fetched from the socket.
"""
@spec current_token(Socket.t(), atom | String.t()) :: Guardian.Token.token() | nil
def current_token(socket, key \\ :default) do
key = token_key(key)
socket.assigns[key]
end
@doc """
Fetches the resource from that was previously put onto the socket.
"""
@spec current_resource(Socket.t(), atom | String.t()) :: Guardian.Token.resource() | nil
def current_resource(socket, key \\ :default) do
key = resource_key(key)
socket.assigns[key]
end
@doc """
Boolean if the token is present or not to indicate an authenticated socket
"""
@spec authenticated?(Socket.t(), atom | String.t()) :: true | false
def authenticated?(socket, key \\ :default) do
current_token(socket, key) != nil
end
@doc """
Assigns the resource, token and claims to the socket.
Use the `key` to specify a different location. This allows
multiple tokens to be active on a socket at once.
"""
@spec assign_rtc(
socket :: Socket.t(),
resource :: Guardian.Token.resource() | nil,
token :: Guardian.Token.token() | nil,
claims :: Guardian.Token.claims() | nil,
key :: atom | String.t() | nil
) :: Socket.t()
def assign_rtc(socket, resource, token, claims, key \\ :default) do
socket
|> put_current_token(token, key)
|> put_current_claims(claims, key)
|> put_current_resource(resource, key)
end
@doc """
Given an implementation module and token, this will
* decode and verify the token
* load the resource
* store the resource, claims and token on the socket.
Use the `key` to store the information in a different location.
This allows multiple tokens and resources on a single socket.
"""
@spec authenticate(
socket :: Socket.t(),
impl :: module,
token :: Guardian.Token.token() | nil,
claims_to_check :: Guardian.Token.claims(),
opts :: Guardian.options()
) :: {:ok, Socket.t()} | {:error, atom | any}
def authenticate(socket, impl, token, claims_to_check \\ %{}, opts \\ [])
def authenticate(_socket, _impl, nil, _claims_to_check, _opts), do: {:error, :no_token}
def authenticate(socket, impl, token, claims_to_check, opts) do
with {:ok, resource, claims} <-
Guardian.resource_from_token(impl, token, claims_to_check, opts),
key <- Keyword.get(opts, :key, Guardian.Plug.default_key()) do
authed_socket = assign_rtc(socket, resource, token, claims, key)
{:ok, authed_socket}
end
end
end
end
|
lib/guardian/phoenix/socket.ex
| 0.830147 | 0.846387 |
socket.ex
|
starcoder
|
defmodule Cldr.DateTime.Compiler do
@moduledoc """
Tokenizes and parses `Date`, `Time` and `DateTime` format strings.
During compilation, each of the date, time and datetime format
strings defined in CLDR are compiled into a list of
function bodies that are then grafted onto the function head
`format/3` in a backend module. As a result these compiled
formats execute with good performance.
For formats not defined in CLDR (ie a user defined format),
the tokenizing and parsing is performed, then list of function
bodies is created and then `format/3`
recurses over the list, invoking each function and
collecting the results. This process is significantly slower
than that of the precompiled formats.
User defined formats can also be precompiled by configuring
them under the key `:precompile_datetime_formats`. For example:
config :ex_cldr,
precompile_datetime_formats: ["yy/dd", "hhh:mmm:sss"]
"""
@doc """
Scan a number format definition and return
the tokens of a date/time/datetime format
string.
This function is designed to produce output
that is fed into `Cldr.DateTime.Compiler.compile/3`.
## Arguments
* `definition` is a date, datetime or time format
string
## Returns
A list of 3-tuples which represent the tokens
of the format definition
## Example
iex> Cldr.DateTime.Compiler.tokenize "yyyy/MM/dd"
{:ok,
[{:year, 1, 4}, {:literal, 1, "/"}, {:month, 1, 2}, {:literal, 1, "/"},
{:day_of_month, 1, 2}], 1}
"""
def tokenize(definition) when is_binary(definition) do
definition
|> String.to_charlist()
|> :datetime_format_lexer.string()
end
def tokenize(%{number_system: _numbers, format: value}) do
tokenize(value)
end
@doc """
Parse a number format definition
## Arguments
* `format_string` is a string defining how a date/time/datetime
is to be formatted. See `Cldr.DateTime.Formatter` for the list
of supported format symbols.
## Returns
Returns a list of function bodies which are grafted onto
a function head in `Cldr.DateTime.Formatter` at compile time
to produce a series of functions that process a given format
string efficiently.
"""
@spec compile(String.t(), module(), module()) ::
{:ok, Cldr.Calendar.calendar()} | {:error, String.t()}
def compile(format_string, backend, context)
def compile("", _, _) do
{:error, "empty format string cannot be compiled"}
end
def compile(nil, _, _) do
{:error, "no format string or token list provided"}
end
def compile(definition, backend, context) when is_binary(definition) do
with {:ok, tokens, _end_line} <- tokenize(definition) do
transforms =
Enum.map(tokens, fn {fun, _line, count} ->
quote do
Cldr.DateTime.Formatter.unquote(fun)(
var!(date, unquote(context)),
unquote(count),
var!(locale, unquote(context)),
unquote(backend),
var!(options, unquote(context))
)
end
end)
{:ok, transforms}
else
error ->
raise ArgumentError, "Could not parse #{inspect(definition)}: #{inspect(error)}"
end
end
def compile(%{number_system: _number_system, format: value}, backend, context) do
compile(value, backend, context)
end
def compile(arg, _, _) do
raise ArgumentError, message: "No idea how to compile format: #{inspect(arg)}"
end
end
|
lib/cldr/backend/compiler.ex
| 0.910809 | 0.728965 |
compiler.ex
|
starcoder
|
defmodule Annex.DataCase do
@moduledoc """
Annex.DataCase is an ExUnit.CaseTemplate with helpers to validing the implementation of
Annex.Data behaviours.
"""
use ExUnit.CaseTemplate
alias Annex.{
Data.List1D,
DataAssertion
}
using kwargs do
quote do
@data_type Keyword.fetch!(unquote(kwargs), :type)
@datas_and_shapes Keyword.fetch!(unquote(kwargs), :data)
@pretty inspect(@data_type)
alias Annex.DataCase
test "Annex.DataCase: #{@pretty} to_flat_list/1 callback works" do
DataCase.test_to_flat_list(@data_type, @datas_and_shapes)
end
test "Annex.DataCase: #{@pretty} shape/1 callback works" do
DataCase.test_shape(@data_type, @datas_and_shapes)
end
test "Annex.DataCase: #{@pretty} cast/2 callback works" do
DataCase.test_cast(@data_type, @datas_and_shapes)
end
test "Annex.DataCase: #{@pretty} full conversion works" do
DataCase.test_conversion(@data_type, @datas_and_shapes)
end
end
end
def run_all_assertions(type, datas_and_shapes) do
test_to_flat_list(type, datas_and_shapes)
test_shape(type, datas_and_shapes)
test_cast(type, datas_and_shapes)
test_conversion(type, datas_and_shapes)
end
@doc """
Tests that a type has can be correctly converted
This macro relies on the correct implementation of Annex.Data.List1D.
"""
def test_conversion(_type, []) do
[]
end
def test_conversion(type, [first | rest]) do
[test_conversion(type, first) | test_conversion(type, rest)]
end
def test_conversion(type, {data, _expected_shape, _target_shape}) do
# get the data's shape
# get the flat data
# get the flat list's shape
DataAssertion.shape_is_valid(type, data)
shape = DataAssertion.shape(type, data)
flat_data = DataAssertion.to_flat_list(type, data)
list_shape = DataAssertion.shape(List1D, flat_data)
# the tested data type should be the same as the list type so conversion can
# happen back and forth
data_shape_product = DataAssertion.shape_product(shape)
list_shape_product = DataAssertion.shape_product(list_shape)
assert data_shape_product == list_shape_product, """
The shape for #{inspect(type)} did not match the shape for the List1D.
data_shape_product: #{inspect(data_shape_product)}
list_shape_product: #{inspect(list_shape_product)}
list_shape: #{inspect(list_shape)}
data_shape: #{inspect(shape)}
data: #{inspect(data)}
flat_data: #{inspect(flat_data)}
"""
# casting the list_flat_data and list_shape into the give type should end up with
# the same shapes and data again.
# exact comparision of the given data and the casted data cannot be relied upon
# due to the unknown nature of the underlying data structure.
casted = DataAssertion.cast(type, flat_data, shape)
assert DataAssertion.shape(type, data) == DataAssertion.shape(type, casted)
assert DataAssertion.to_flat_list(type, data) == DataAssertion.to_flat_list(type, casted)
end
@doc """
Tests the implemenation of to_flat_list for a type.
"""
def test_to_flat_list(_type, []) do
[]
end
def test_to_flat_list(type, [first | rest]) do
[test_to_flat_list(type, first) | test_to_flat_list(type, rest)]
end
def test_to_flat_list(type, {data, _shape, _target_shape}) do
DataAssertion.to_flat_list(type, data)
end
@doc """
Tests the implementation of a type's cast/3 function.
"""
def test_cast(_type, []) do
[]
end
def test_cast(type, [first | rest]) do
[test_cast(type, first) | test_cast(type, rest)]
end
def test_cast(type, {data, _expected_shape, target_shape}) do
DataAssertion.cast(type, data, target_shape)
end
@doc """
Tests the implementation of a type's shape/1 function.
"""
def test_shape(_type, []) do
[]
end
def test_shape(type, [first | rest]) do
[test_shape(type, first) | test_shape(type, rest)]
end
def test_shape(type, {data, expected_shape, _target_shape}) do
assert DataAssertion.shape_is_valid(type, data) == true
result = DataAssertion.shape(type, data)
assert result == expected_shape, """
#{inspect(type)}.shape/1 failed to produce the expected shape.
expected_shape: #{inspect(expected_shape)}
invalid_result: #{inspect(result)}
"""
end
end
|
test/support/data_case.ex
| 0.735926 | 0.731706 |
data_case.ex
|
starcoder
|
defmodule Opencensus.Plug.Metrics do
@moduledoc """
Template method for creating `Plug` to measure response times.
## Usage
1. Create your own `Plug` module:
```elixir
defmodule MyApp.TracingPlug do
use Opencensus.Plug.Trace
end
```
2. Add it to your pipeline, ex. for Phoenix:
```elixir
defmodule MyAppWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :my_app
plug MyApp.TracingPlug
end
```
## Configuration
`use` accepts `prefix` option that will be prefix of all measurements.
And also you can use `attributes` argument in `use` which must be either list
of attributes which are names of 1-argument functions in current module that
must return string value of the attribute, or map/keyword list of one of:
- `atom` - which is name of the called function
- `{module, function}` - which will call `apply(module, function, [conn])`
- `{module, function, args}` - which will prepend `conn` to the given arguments
and call `apply(module, function, [conn | args])`
## Measurements
- "#\{prefix}/request" - duration of requests in microseconds
"""
defmacro __using__(opts) do
prefix = Keyword.get(opts, :prefix, "plug")
measure_name = "#{prefix}/requests"
attributes = Keyword.get(opts, :attributes, [])
quote do
@behaviour Plug
@measure_name unquote(measure_name)
def setup_metrics do
[
:oc_stat_measure.new(
@measure_name,
"HTTP request duration in microseconds.",
:usec
)
]
end
def init(opts), do: opts
def call(conn, _opts) do
start = :erlang.monotonic_time()
Plug.Conn.register_before_send(conn, fn conn ->
stop = :erlang.monotonic_time()
diff = stop - start
tags =
Map.merge(
Opencensus.Plug.get_tags(conn, __MODULE__, unquote(attributes)),
%{
method: conn.method,
host: conn.host,
scheme: conn.scheme,
status: conn.status
}
)
:ok =
:oc_stat.record(
tags,
@measure_name,
:erlang.convert_time_unit(diff, :native, :microsecond)
)
conn
end)
end
end
end
end
|
lib/opencensus/plug/metrics.ex
| 0.856827 | 0.768733 |
metrics.ex
|
starcoder
|
defprotocol Enumerable do
@moduledoc """
Enumerable protocol used by `Enum` and `Stream` modules.
When you invoke a function in the `Enum` module, the first argument
is usually a collection that must implement this protocol. For example,
the expression
Enum.map([1, 2, 3], &(&1 * 2))
invokes underneath `Enumerable.reduce/3` to perform the reducing
operation that builds a mapped list by calling the mapping function
`&(&1 * 2)` on every element in the collection and cons'ing the
element with an accumulated list.
Internally, `Enum.map/2` is implemented as follows:
def map(enum, fun) do
reducer = fn x, acc -> {:cont, [fun.(x)|acc]} end
Enumerable.reduce(enum, {:cont, []}, reducer) |> elem(1) |> :lists.reverse()
end
Notice the user given function is wrapped into a `reducer` function.
The `reducer` function must return a tagged tuple after each step,
as described in the `acc/0` type.
The reason the accumulator requires a tagged tuple is to allow the
reducer function to communicate to the underlying enumerable the end
of enumeration, allowing any open resource to be properly closed. It
also allows suspension of the enumeration, which is useful when
interleaving between many enumerables is required (as in zip).
Finally, `Enumerable.reduce/3` will return another tagged tuple,
as represented by the `result/0` type.
"""
@typedoc """
The accumulator value for each step.
It must be a tagged tuple with one of the following "tags":
* `:cont` - the enumeration should continue
* `:halt` - the enumeration should halt immediately
* `:suspend` - the enumeration should be suspended immediately
Depending on the accumulator value, the result returned by
`Enumerable.reduce/3` will change. Please check the `result`
type docs for more information.
In case a reducer function returns a `:suspend` accumulator,
it must be explicitly handled by the caller and never leak.
"""
@type acc :: {:cont, term} | {:halt, term} | {:suspend, term}
@typedoc """
The reducer function.
Should be called with the collection element and the
accumulator contents. Returns the accumulator for
the next enumeration step.
"""
@type reducer :: (term, term -> acc)
@typedoc """
The result of the reduce operation.
It may be *done* when the enumeration is finished by reaching
its end, or *halted*/*suspended* when the enumeration was halted
or suspended by the reducer function.
In case a reducer function returns the `:suspend` accumulator, the
`:suspended` tuple must be explicitly handled by the caller and
never leak. In practice, this means regular enumeration functions
just need to be concerned about `:done` and `:halted` results.
Furthermore, a `:suspend` call must always be followed by another call,
eventually halting or continuing until the end.
"""
@type result :: {:done, term} | {:halted, term} | {:suspended, term, continuation}
@typedoc """
A partially applied reduce function.
The continuation is the closure returned as a result when
the enumeration is suspended. When invoked, it expects
a new accumulator and it returns the result.
A continuation is easily implemented as long as the reduce
function is defined in a tail recursive fashion. If the function
is tail recursive, all the state is passed as arguments, so
the continuation would simply be the reducing function partially
applied.
"""
@type continuation :: (acc -> result)
@doc """
Reduces the collection into a value.
Most of the operations in `Enum` are implemented in terms of reduce.
This function should apply the given `reducer` function to each
item in the collection and proceed as expected by the returned accumulator.
As an example, here is the implementation of `reduce` for lists:
def reduce(_, {:halt, acc}, _fun), do: {:halted, acc}
def reduce(list, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(list, &1, fun)}
def reduce([], {:cont, acc}, _fun), do: {:done, acc}
def reduce([h|t], {:cont, acc}, fun), do: reduce(t, fun.(h, acc), fun)
"""
@spec reduce(t, acc, reducer) :: result
def reduce(collection, acc, fun)
@doc """
Checks if a value exists within the collection.
It should return `{:ok, boolean}`.
If `{:error, __MODULE__}` is returned a default algorithm using `reduce` and
the match (`===`) operator is used. This algorithm runs in linear time.
Please force use of the default algorithm unless you can implement an
algorithm that is significantly faster.
"""
@spec member?(t, term) :: {:ok, boolean} | {:error, module}
def member?(collection, value)
@doc """
Retrieves the collection's size.
It should return `{:ok, size}`.
If `{:error, __MODULE__}` is returned a default algorithm using `reduce` and
the match (`===`) operator is used. This algorithm runs in linear time.
Please force use of the default algorithm unless you can implement an
algorithm that is significantly faster.
"""
@spec count(t) :: {:ok, non_neg_integer} | {:error, module}
def count(collection)
end
defmodule Enum do
import Kernel, except: [max: 2, min: 2]
@moduledoc """
Provides a set of algorithms that enumerate over collections according to the
`Enumerable` protocol:
iex> Enum.map([1, 2, 3], fn(x) -> x * 2 end)
[2, 4, 6]
Some particular types, like dictionaries, yield a specific format on
enumeration. For dicts, the argument is always a `{key, value}` tuple:
iex> dict = %{a: 1, b: 2}
iex> Enum.map(dict, fn {k, v} -> {k, v * 2} end)
[a: 2, b: 4]
Note that the functions in the `Enum` module are eager: they always start
the enumeration of the given collection. The `Stream` module allows
lazy enumeration of collections and provides infinite streams.
Since the majority of the functions in `Enum` enumerate the whole
collection and return a list as result, infinite streams need to
be carefully used with such functions, as they can potentially run
forever. For example:
Enum.each Stream.cycle([1, 2, 3]), &IO.puts(&1)
"""
@compile :inline_list_funcs
@type t :: Enumerable.t
@type element :: any
@type index :: non_neg_integer
@type default :: any
# Require Stream.Reducers and its callbacks
require Stream.Reducers, as: R
defmacrop skip(acc) do
acc
end
defmacrop next(_, entry, acc) do
quote do: [unquote(entry)|unquote(acc)]
end
defmacrop acc(h, n, _) do
quote do: {unquote(h), unquote(n)}
end
defmacrop next_with_acc(f, entry, h, n, _) do
quote do
{[unquote(entry)|unquote(h)], unquote(n)}
end
end
@doc """
Invokes the given `fun` for each item in the `collection` and returns `false`
if at least one invocation returns `false` or `nil`. Otherwise returns `true`.
## Examples
iex> Enum.all?([2, 4, 6], fn(x) -> rem(x, 2) == 0 end)
true
iex> Enum.all?([2, 3, 4], fn(x) -> rem(x, 2) == 0 end)
false
If no function is given, it defaults to checking if
all items in the collection are truthy values.
iex> Enum.all?([1, 2, 3])
true
iex> Enum.all?([1, nil, 3])
false
"""
@spec all?(t) :: boolean
@spec all?(t, (element -> as_boolean(term))) :: boolean
def all?(collection, fun \\ fn(x) -> x end)
def all?(collection, fun) when is_list(collection) do
do_all?(collection, fun)
end
def all?(collection, fun) do
Enumerable.reduce(collection, {:cont, true}, fn(entry, _) ->
if fun.(entry), do: {:cont, true}, else: {:halt, false}
end) |> elem(1)
end
@doc """
Invokes the given `fun` for each item in the `collection` and returns `true` if
at least one invocation returns a truthy value. Returns `false` otherwise.
## Examples
iex> Enum.any?([2, 4, 6], fn(x) -> rem(x, 2) == 1 end)
false
iex> Enum.any?([2, 3, 4], fn(x) -> rem(x, 2) == 1 end)
true
If no function is given, it defaults to checking if
at least one item in the collection is a truthy value.
iex> Enum.any?([false, false, false])
false
iex> Enum.any?([false, true, false])
true
"""
@spec any?(t) :: boolean
@spec any?(t, (element -> as_boolean(term))) :: boolean
def any?(collection, fun \\ fn(x) -> x end)
def any?(collection, fun) when is_list(collection) do
do_any?(collection, fun)
end
def any?(collection, fun) do
Enumerable.reduce(collection, {:cont, false}, fn(entry, _) ->
if fun.(entry), do: {:halt, true}, else: {:cont, false}
end) |> elem(1)
end
@doc """
Finds the element at the given index (zero-based).
Returns `default` if index is out of bounds.
Note this operation takes linear time. In order to access
the element at index `n`, it will need to traverse `n`
previous elements.
## Examples
iex> Enum.at([2, 4, 6], 0)
2
iex> Enum.at([2, 4, 6], 2)
6
iex> Enum.at([2, 4, 6], 4)
nil
iex> Enum.at([2, 4, 6], 4, :none)
:none
"""
@spec at(t, integer, default) :: element | default
def at(collection, n, default \\ nil) do
case fetch(collection, n) do
{:ok, h} -> h
:error -> default
end
end
@doc """
Shortcut to `chunk(collection, n, n)`.
"""
@spec chunk(t, non_neg_integer) :: [list]
def chunk(collection, n), do: chunk(collection, n, n, nil)
@doc """
Returns a collection of lists containing `n` items each, where
each new chunk starts `step` elements into the collection.
`step` is optional and, if not passed, defaults to `n`, i.e.
chunks do not overlap. If the final chunk does not have `n`
elements to fill the chunk, elements are taken as necessary
from `pad` if it was passed. If `pad` is passed and does not
have enough elements to fill the chunk, then the chunk is
returned anyway with less than `n` elements. If `pad` is not
passed at all or is `nil`, then the partial chunk is discarded
from the result.
## Examples
iex> Enum.chunk([1, 2, 3, 4, 5, 6], 2)
[[1, 2], [3, 4], [5, 6]]
iex> Enum.chunk([1, 2, 3, 4, 5, 6], 3, 2)
[[1, 2, 3], [3, 4, 5]]
iex> Enum.chunk([1, 2, 3, 4, 5, 6], 3, 2, [7])
[[1, 2, 3], [3, 4, 5], [5, 6, 7]]
iex> Enum.chunk([1, 2, 3, 4, 5, 6], 3, 3, [])
[[1, 2, 3], [4, 5, 6]]
"""
@spec chunk(t, non_neg_integer, non_neg_integer, t | nil) :: [list]
def chunk(collection, n, step, pad \\ nil) when n > 0 and step > 0 do
limit = :erlang.max(n, step)
{acc, {buffer, i}} =
reduce(collection, {[], {[], 0}}, R.chunk(n, step, limit))
if is_nil(pad) || i == 0 do
:lists.reverse(acc)
else
buffer = :lists.reverse(buffer, take(pad, n - i))
:lists.reverse([buffer|acc])
end
end
@doc """
Splits `collection` on every element for which `fun` returns a new value.
## Examples
iex> Enum.chunk_by([1, 2, 2, 3, 4, 4, 6, 7, 7], &(rem(&1, 2) == 1))
[[1], [2, 2], [3], [4, 4, 6], [7, 7]]
"""
@spec chunk_by(t, (element -> any)) :: [list]
def chunk_by(collection, fun) do
{acc, res} = reduce(collection, {[], nil}, R.chunk_by(fun))
case res do
{buffer, _} ->
:lists.reverse([:lists.reverse(buffer) | acc])
nil ->
[]
end
end
@doc """
Given an enumerable of enumerables, concatenates the enumerables into a single list.
## Examples
iex> Enum.concat([1..3, 4..6, 7..9])
[1, 2, 3, 4, 5, 6, 7, 8, 9]
iex> Enum.concat([[1, [2], 3], [4], [5, 6]])
[1, [2], 3, 4, 5, 6]
"""
@spec concat(t) :: t
def concat(enumerables) do
do_concat(enumerables)
end
@doc """
Concatenates the enumerable on the right with the enumerable on the left.
This function produces the same result as the `Kernel.++/2` operator for lists.
## Examples
iex> Enum.concat(1..3, 4..6)
[1, 2, 3, 4, 5, 6]
iex> Enum.concat([1, 2, 3], [4, 5, 6])
[1, 2, 3, 4, 5, 6]
"""
@spec concat(t, t) :: t
def concat(left, right) when is_list(left) and is_list(right) do
left ++ right
end
def concat(left, right) do
do_concat([left, right])
end
defp do_concat(enumerable) do
fun = &[&1|&2]
reduce(enumerable, [], &reduce(&1, &2, fun)) |> :lists.reverse
end
@doc """
Returns the collection's size.
## Examples
iex> Enum.count([1, 2, 3])
3
"""
@spec count(t) :: non_neg_integer
def count(collection) when is_list(collection) do
:erlang.length(collection)
end
def count(collection) do
case Enumerable.count(collection) do
{:ok, value} when is_integer(value) ->
value
{:error, module} ->
module.reduce(collection, {:cont, 0}, fn
_, acc -> {:cont, acc + 1}
end) |> elem(1)
end
end
@doc """
Returns the count of items in the collection for which
`fun` returns a truthy value.
## Examples
iex> Enum.count([1, 2, 3, 4, 5], fn(x) -> rem(x, 2) == 0 end)
2
"""
@spec count(t, (element -> as_boolean(term))) :: non_neg_integer
def count(collection, fun) do
Enumerable.reduce(collection, {:cont, 0}, fn(entry, acc) ->
{:cont, if(fun.(entry), do: acc + 1, else: acc)}
end) |> elem(1)
end
@doc """
Enumerates the collection, returning a list where all consecutive
duplicated elements are collapsed to a single element.
Elements are compared using `===`.
## Examples
iex> Enum.dedup([1, 2, 3, 3, 2, 1])
[1, 2, 3, 2, 1]
"""
@spec dedup(t) :: list
def dedup(collection) do
dedup_by(collection, fn x -> x end)
end
@doc """
Enumerates the collection, returning a list where all consecutive
duplicated elements are collapsed to a single element.
The function `fun` maps every element to a term which is used to
determine if two elements are duplicates.
## Examples
iex> Enum.dedup_by([{1, :x}, {2, :y}, {2, :z}, {1, :x}], fn {x, _} -> x end)
[{1, :x}, {2, :y}, {1, :x}]
iex> Enum.dedup_by([5, 1, 2, 3, 2, 1], fn x -> x > 2 end)
[5, 1, 3, 2]
"""
@spec dedup_by(t, (element -> term)) :: list
def dedup_by(collection, fun) when is_function(fun, 1) do
{list, _} = reduce(collection, {[], []}, R.dedup(fun))
:lists.reverse(list)
end
@doc """
Drops the first `count` items from `collection`.
If a negative value `count` is given, the last `count`
values will be dropped. The collection is enumerated
once to retrieve the proper index and the remaining
calculation is performed from the end.
## Examples
iex> Enum.drop([1, 2, 3], 2)
[3]
iex> Enum.drop([1, 2, 3], 10)
[]
iex> Enum.drop([1, 2, 3], 0)
[1, 2, 3]
iex> Enum.drop([1, 2, 3], -1)
[1, 2]
"""
@spec drop(t, integer) :: list
def drop(collection, count) when is_list(collection) and count >= 0 do
do_drop(collection, count)
end
def drop(collection, count) when count >= 0 do
res =
reduce(collection, count, fn
x, acc when is_list(acc) -> [x|acc]
x, 0 -> [x]
_, acc when acc > 0 -> acc - 1
end)
if is_list(res), do: :lists.reverse(res), else: []
end
def drop(collection, count) when count < 0 do
do_drop(reverse(collection), abs(count)) |> :lists.reverse
end
@doc """
Drops items at the beginning of `collection` while `fun` returns a truthy value.
## Examples
iex> Enum.drop_while([1, 2, 3, 4, 5], fn(x) -> x < 3 end)
[3, 4, 5]
"""
@spec drop_while(t, (element -> as_boolean(term))) :: list
def drop_while(collection, fun) when is_list(collection) do
do_drop_while(collection, fun)
end
def drop_while(collection, fun) do
{res, _} = reduce(collection, {[], true}, R.drop_while(fun))
:lists.reverse(res)
end
@doc """
Invokes the given `fun` for each item in the `collection`.
Returns `:ok`.
## Examples
Enum.each(["some", "example"], fn(x) -> IO.puts x end)
"some"
"example"
#=> :ok
"""
@spec each(t, (element -> any)) :: :ok
def each(collection, fun) when is_list(collection) do
:lists.foreach(fun, collection)
:ok
end
def each(collection, fun) do
reduce(collection, nil, fn(entry, _) ->
fun.(entry)
nil
end)
:ok
end
@doc """
Returns `true` if the collection is empty, otherwise `false`.
## Examples
iex> Enum.empty?([])
true
iex> Enum.empty?([1, 2, 3])
false
"""
@spec empty?(t) :: boolean
def empty?(collection) when is_list(collection) do
collection == []
end
def empty?(collection) do
Enumerable.reduce(collection, {:cont, true}, fn(_, _) -> {:halt, false} end) |> elem(1)
end
@doc """
Finds the element at the given index (zero-based).
Returns `{:ok, element}` if found, otherwise `:error`.
A negative index can be passed, which means the collection is
enumerated once and the index is counted from the end (i.e.
`-1` fetches the last element).
Note this operation takes linear time. In order to access
the element at index `n`, it will need to traverse `n`
previous elements.
## Examples
iex> Enum.fetch([2, 4, 6], 0)
{:ok, 2}
iex> Enum.fetch([2, 4, 6], 2)
{:ok, 6}
iex> Enum.fetch([2, 4, 6], 4)
:error
"""
@spec fetch(t, integer) :: {:ok, element} | :error
def fetch(collection, n) when is_list(collection) and is_integer(n) and n >= 0 do
do_fetch(collection, n)
end
def fetch(collection, n) when is_integer(n) and n >= 0 do
res =
Enumerable.reduce(collection, {:cont, 0}, fn(entry, acc) ->
if acc == n do
{:halt, entry}
else
{:cont, acc + 1}
end
end)
case res do
{:halted, entry} -> {:ok, entry}
{:done, _} -> :error
end
end
def fetch(collection, n) when is_integer(n) and n < 0 do
do_fetch(reverse(collection), abs(n + 1))
end
@doc """
Finds the element at the given index (zero-based).
Raises `OutOfBoundsError` if the given position
is outside the range of the collection.
Note this operation takes linear time. In order to access
the element at index `n`, it will need to traverse `n`
previous elements.
## Examples
iex> Enum.fetch!([2, 4, 6], 0)
2
iex> Enum.fetch!([2, 4, 6], 2)
6
iex> Enum.fetch!([2, 4, 6], 4)
** (Enum.OutOfBoundsError) out of bounds error
"""
@spec fetch!(t, integer) :: element | no_return
def fetch!(collection, n) do
case fetch(collection, n) do
{:ok, h} -> h
:error -> raise Enum.OutOfBoundsError
end
end
@doc """
Filters the collection, i.e. returns only those elements
for which `fun` returns a truthy value.
## Examples
iex> Enum.filter([1, 2, 3], fn(x) -> rem(x, 2) == 0 end)
[2]
"""
@spec filter(t, (element -> as_boolean(term))) :: list
def filter(collection, fun) when is_list(collection) do
for item <- collection, fun.(item), do: item
end
def filter(collection, fun) do
reduce(collection, [], R.filter(fun)) |> :lists.reverse
end
@doc """
Filters the collection and maps its values in one pass.
## Examples
iex> Enum.filter_map([1, 2, 3], fn(x) -> rem(x, 2) == 0 end, &(&1 * 2))
[4]
"""
@spec filter_map(t, (element -> as_boolean(term)), (element -> element)) :: list
def filter_map(collection, filter, mapper) when is_list(collection) do
for item <- collection, filter.(item), do: mapper.(item)
end
def filter_map(collection, filter, mapper) do
reduce(collection, [], R.filter_map(filter, mapper)) |> :lists.reverse
end
@doc """
Returns the first item for which `fun` returns a truthy value. If no such
item is found, returns `ifnone`.
## Examples
iex> Enum.find([2, 4, 6], fn(x) -> rem(x, 2) == 1 end)
nil
iex> Enum.find([2, 4, 6], 0, fn(x) -> rem(x, 2) == 1 end)
0
iex> Enum.find([2, 3, 4], fn(x) -> rem(x, 2) == 1 end)
3
"""
@spec find(t, default, (element -> any)) :: element | default
def find(collection, ifnone \\ nil, fun)
def find(collection, ifnone, fun) when is_list(collection) do
do_find(collection, ifnone, fun)
end
def find(collection, ifnone, fun) do
Enumerable.reduce(collection, {:cont, ifnone}, fn(entry, ifnone) ->
if fun.(entry), do: {:halt, entry}, else: {:cont, ifnone}
end) |> elem(1)
end
@doc """
Similar to `find/3`, but returns the value of the function
invocation instead of the element itself.
## Examples
iex> Enum.find_value([2, 4, 6], fn(x) -> rem(x, 2) == 1 end)
nil
iex> Enum.find_value([2, 3, 4], fn(x) -> rem(x, 2) == 1 end)
true
iex> Enum.find_value([1, 2, 3], "no bools!", &is_boolean/1)
"no bools!"
"""
@spec find_value(t, any, (element -> any)) :: any | :nil
def find_value(collection, ifnone \\ nil, fun)
def find_value(collection, ifnone, fun) when is_list(collection) do
do_find_value(collection, ifnone, fun)
end
def find_value(collection, ifnone, fun) do
Enumerable.reduce(collection, {:cont, ifnone}, fn(entry, ifnone) ->
fun_entry = fun.(entry)
if fun_entry, do: {:halt, fun_entry}, else: {:cont, ifnone}
end) |> elem(1)
end
@doc """
Similar to `find/3`, but returns the index (zero-based)
of the element instead of the element itself.
## Examples
iex> Enum.find_index([2, 4, 6], fn(x) -> rem(x, 2) == 1 end)
nil
iex> Enum.find_index([2, 3, 4], fn(x) -> rem(x, 2) == 1 end)
1
"""
@spec find_index(t, (element -> any)) :: index | :nil
def find_index(collection, fun) when is_list(collection) do
do_find_index(collection, 0, fun)
end
def find_index(collection, fun) do
res =
Enumerable.reduce(collection, {:cont, 0}, fn(entry, acc) ->
if fun.(entry), do: {:halt, acc}, else: {:cont, acc + 1}
end)
case res do
{:halted, entry} -> entry
{:done, _} -> nil
end
end
@doc """
Returns a new collection appending the result of invoking `fun`
on each corresponding item of `collection`.
The given function should return an enumerable.
## Examples
iex> Enum.flat_map([:a, :b, :c], fn(x) -> [x, x] end)
[:a, :a, :b, :b, :c, :c]
iex> Enum.flat_map([{1, 3}, {4, 6}], fn({x, y}) -> x..y end)
[1, 2, 3, 4, 5, 6]
"""
@spec flat_map(t, (element -> t)) :: list
def flat_map(collection, fun) do
reduce(collection, [], fn(entry, acc) ->
reduce(fun.(entry), acc, &[&1|&2])
end) |> :lists.reverse
end
@doc """
Maps and reduces a collection, flattening the given results.
It expects an accumulator and a function that receives each stream item
and an accumulator, and must return a tuple containing a new stream
(often a list) with the new accumulator or a tuple with `:halt` as first
element and the accumulator as second.
## Examples
iex> enum = 1..100
iex> n = 3
iex> Enum.flat_map_reduce(enum, 0, fn i, acc ->
...> if acc < n, do: {[i], acc + 1}, else: {:halt, acc}
...> end)
{[1, 2, 3], 3}
"""
@spec flat_map_reduce(t, acc, fun) :: {[any], any} when
fun: (element, acc -> {t, acc} | {:halt, acc}),
acc: any
def flat_map_reduce(collection, acc, fun) do
{_, {list, acc}} =
Enumerable.reduce(collection, {:cont, {[], acc}}, fn(entry, {list, acc}) ->
case fun.(entry, acc) do
{:halt, acc} ->
{:halt, {list, acc}}
{[], acc} ->
{:cont, {list, acc}}
{[entry], acc} ->
{:cont, {[entry|list], acc}}
{entries, acc} ->
{:cont, {reduce(entries, list, &[&1|&2]), acc}}
end
end)
{:lists.reverse(list), acc}
end
@doc """
Intersperses `element` between each element of the enumeration.
Complexity: O(n)
## Examples
iex> Enum.intersperse([1, 2, 3], 0)
[1, 0, 2, 0, 3]
iex> Enum.intersperse([1], 0)
[1]
iex> Enum.intersperse([], 0)
[]
"""
@spec intersperse(t, element) :: list
def intersperse(collection, element) do
list =
reduce(collection, [], fn(x, acc) ->
[x, element | acc]
end) |> :lists.reverse()
case list do
[] -> []
[_|t] -> t # Head is a superfluous intersperser element
end
end
@doc """
Inserts the given enumerable into a collectable.
## Examples
iex> Enum.into([1, 2], [0])
[0, 1, 2]
iex> Enum.into([a: 1, b: 2], %{})
%{a: 1, b: 2}
"""
@spec into(Enumerable.t, Collectable.t) :: Collectable.t
def into(collection, list) when is_list(list) do
list ++ to_list(collection)
end
def into(%{__struct__: _} = collection, collectable) do
do_into(collection, collectable)
end
def into(collection, %{__struct__: _} = collectable) do
do_into(collection, collectable)
end
def into(%{} = collection, %{} = collectable) do
Map.merge(collectable, collection)
end
def into(collection, %{} = collectable) when is_list(collection) do
Map.merge(collectable, :maps.from_list(collection))
end
def into(collection, %{} = collectable) do
reduce(collection, collectable, fn {k, v}, acc ->
Map.put(acc, k, v)
end)
end
def into(collection, collectable) do
do_into(collection, collectable)
end
defp do_into(collection, collectable) do
{initial, fun} = Collectable.into(collectable)
into(collection, initial, fun, fn x, acc ->
fun.(acc, {:cont, x})
end)
end
@doc """
Inserts the given enumerable into a collectable
according to the transformation function.
## Examples
iex> Enum.into([2, 3], [3], fn x -> x * 3 end)
[3, 6, 9]
"""
@spec into(Enumerable.t, Collectable.t, (term -> term)) :: Collectable.t
def into(collection, list, transform) when is_list(list) and is_function(transform, 1) do
list ++ map(collection, transform)
end
def into(collection, collectable, transform) when is_function(transform, 1) do
{initial, fun} = Collectable.into(collectable)
into(collection, initial, fun, fn x, acc ->
fun.(acc, {:cont, transform.(x)})
end)
end
defp into(collection, initial, fun, callback) do
try do
reduce(collection, initial, callback)
catch
kind, reason ->
stacktrace = System.stacktrace
fun.(initial, :halt)
:erlang.raise(kind, reason, stacktrace)
else
acc -> fun.(acc, :done)
end
end
@doc """
Joins the given `collection` into a binary using `joiner` as a separator.
If `joiner` is not passed at all, it defaults to the empty binary.
All items in the collection must be convertible
to a binary, otherwise an error is raised.
## Examples
iex> Enum.join([1, 2, 3])
"123"
iex> Enum.join([1, 2, 3], " = ")
"1 = 2 = 3"
"""
@spec join(t, String.t) :: String.t
def join(collection, joiner \\ "")
def join(collection, joiner) when is_binary(joiner) do
reduced = reduce(collection, :first, fn
entry, :first -> enum_to_string(entry)
entry, acc -> [acc, joiner|enum_to_string(entry)]
end)
if reduced == :first do
""
else
IO.iodata_to_binary reduced
end
end
@doc """
Returns a new collection, where each item is the result
of invoking `fun` on each corresponding item of `collection`.
For dicts, the function expects a key-value tuple.
## Examples
iex> Enum.map([1, 2, 3], fn(x) -> x * 2 end)
[2, 4, 6]
iex> Enum.map([a: 1, b: 2], fn({k, v}) -> {k, -v} end)
[a: -1, b: -2]
"""
@spec map(t, (element -> any)) :: list
def map(collection, fun) when is_list(collection) do
for item <- collection, do: fun.(item)
end
def map(collection, fun) do
reduce(collection, [], R.map(fun)) |> :lists.reverse
end
@doc """
Maps and joins the given `collection` in one pass.
`joiner` can be either a binary or a list and the
result will be of the same type as `joiner`. If
`joiner` is not passed at all, it defaults to an
empty binary.
All items in the collection must be convertible
to a binary, otherwise an error is raised.
## Examples
iex> Enum.map_join([1, 2, 3], &(&1 * 2))
"246"
iex> Enum.map_join([1, 2, 3], " = ", &(&1 * 2))
"2 = 4 = 6"
"""
@spec map_join(t, String.t, (element -> any)) :: String.t
def map_join(collection, joiner \\ "", mapper)
def map_join(collection, joiner, mapper) when is_binary(joiner) do
reduced = reduce(collection, :first, fn
entry, :first -> enum_to_string(mapper.(entry))
entry, acc -> [acc, joiner|enum_to_string(mapper.(entry))]
end)
if reduced == :first do
""
else
IO.iodata_to_binary reduced
end
end
@doc """
Invokes the given `fun` for each item in the `collection`
while also keeping an accumulator. Returns a tuple where
the first element is the mapped collection and the second
one is the final accumulator.
For dicts, the first tuple element must be a `{key, value}`
tuple.
## Examples
iex> Enum.map_reduce([1, 2, 3], 0, fn(x, acc) -> {x * 2, x + acc} end)
{[2, 4, 6], 6}
"""
@spec map_reduce(t, any, (element, any -> {any, any})) :: {any, any}
def map_reduce(collection, acc, fun) when is_list(collection) do
:lists.mapfoldl(fun, acc, collection)
end
def map_reduce(collection, acc, fun) do
{list, acc} = reduce(collection, {[], acc}, fn(entry, {list, acc}) ->
{new_entry, acc} = fun.(entry, acc)
{[new_entry|list], acc}
end)
{:lists.reverse(list), acc}
end
@doc """
Returns the maximum value.
Raises `EmptyError` if the collection is empty.
## Examples
iex> Enum.max([1, 2, 3])
3
"""
@spec max(t) :: element | no_return
def max(collection) do
reduce(collection, &Kernel.max(&1, &2))
end
@doc """
Returns the maximum value as calculated by the given function.
Raises `EmptyError` if the collection is empty.
## Examples
iex> Enum.max_by(["a", "aa", "aaa"], fn(x) -> String.length(x) end)
"aaa"
"""
@spec max_by(t, (element -> any)) :: element | no_return
def max_by([h|t], fun) do
reduce(t, {h, fun.(h)}, fn(entry, {_, fun_max} = old) ->
fun_entry = fun.(entry)
if(fun_entry > fun_max, do: {entry, fun_entry}, else: old)
end) |> elem(0)
end
def max_by([], _fun) do
raise Enum.EmptyError
end
def max_by(collection, fun) do
result =
reduce(collection, :first, fn
entry, {_, fun_max} = old ->
fun_entry = fun.(entry)
if(fun_entry > fun_max, do: {entry, fun_entry}, else: old)
entry, :first ->
{entry, fun.(entry)}
end)
case result do
:first -> raise Enum.EmptyError
{entry, _} -> entry
end
end
@doc """
Checks if `value` exists within the `collection`.
Membership is tested with the match (`===`) operator, although
enumerables like ranges may include floats inside the given
range.
## Examples
iex> Enum.member?(1..10, 5)
true
iex> Enum.member?([:a, :b, :c], :d)
false
"""
@spec member?(t, element) :: boolean
def member?(collection, value) when is_list(collection) do
:lists.member(value, collection)
end
def member?(collection, value) do
case Enumerable.member?(collection, value) do
{:ok, value} when is_boolean(value) ->
value
{:error, module} ->
module.reduce(collection, {:cont, false}, fn
v, _ when v === value -> {:halt, true}
_, _ -> {:cont, false}
end) |> elem(1)
end
end
@doc """
Returns the minimum value.
Raises `EmptyError` if the collection is empty.
## Examples
iex> Enum.min([1, 2, 3])
1
"""
@spec min(t) :: element | no_return
def min(collection) do
reduce(collection, &Kernel.min(&1, &2))
end
@doc """
Returns the minimum value as calculated by the given function.
Raises `EmptyError` if the collection is empty.
## Examples
iex> Enum.min_by(["a", "aa", "aaa"], fn(x) -> String.length(x) end)
"a"
"""
@spec min_by(t, (element -> any)) :: element | no_return
def min_by([h|t], fun) do
reduce(t, {h, fun.(h)}, fn(entry, {_, fun_min} = old) ->
fun_entry = fun.(entry)
if(fun_entry < fun_min, do: {entry, fun_entry}, else: old)
end) |> elem(0)
end
def min_by([], _fun) do
raise Enum.EmptyError
end
def min_by(collection, fun) do
result =
reduce(collection, :first, fn
entry, {_, fun_min} = old ->
fun_entry = fun.(entry)
if(fun_entry < fun_min, do: {entry, fun_entry}, else: old)
entry, :first ->
{entry, fun.(entry)}
end)
case result do
:first -> raise Enum.EmptyError
{entry, _} -> entry
end
end
@doc """
Returns a tuple with the minimum and maximum values.
Raises `EmptyError` if the collection is empty.
## Examples
iex> Enum.minmax([2, 3, 1])
{1, 3}
"""
@spec minmax(t) :: element | no_return
def minmax(collection) do
result =
Enum.reduce(collection, :first, fn
entry, {min_value, max_value} ->
{Kernel.min(entry, min_value), Kernel.max(entry, max_value)}
entry, :first ->
{entry, entry}
end)
case result do
:first -> raise Enum.EmptyError
result -> result
end
end
@doc """
Returns a tuple with the minimum and maximum values as calculated by the given function.
Raises `EmptyError` if the collection is empty.
## Examples
iex> Enum.minmax_by(["aaa", "bb", "c"], fn(x) -> String.length(x) end)
{"c", "aaa"}
"""
@spec minmax_by(t, (element -> any)) :: element | no_return
def minmax_by(collection, fun) do
result =
Enum.reduce(collection, :first, fn
entry, {{_, fun_min} = acc_min, {_, fun_max} = acc_max} ->
fun_entry = fun.(entry)
if fun_entry < fun_min, do: acc_min = {entry, fun_entry}
if fun_entry > fun_max, do: acc_max = {entry, fun_entry}
{acc_min, acc_max}
entry, :first ->
fun_entry = fun.(entry)
{{entry, fun_entry}, {entry, fun_entry}}
end)
case result do
:first ->
raise Enum.EmptyError
{{min_entry, _}, {max_entry, _}} ->
{min_entry, max_entry}
end
end
@doc """
Returns the sum of all values.
Raises `ArithmeticError` if collection contains a non-numeric value.
## Examples
iex> Enum.sum([1, 2, 3])
6
"""
@spec sum(t) :: number
def sum(collection) do
reduce(collection, 0, &+/2)
end
@doc """
Partitions `collection` into two collections, where the first one contains elements
for which `fun` returns a truthy value, and the second one -- for which `fun`
returns `false` or `nil`.
## Examples
iex> Enum.partition([1, 2, 3], fn(x) -> rem(x, 2) == 0 end)
{[2], [1, 3]}
"""
@spec partition(t, (element -> any)) :: {list, list}
def partition(collection, fun) do
{acc1, acc2} =
reduce(collection, {[], []}, fn(entry, {acc1, acc2}) ->
if fun.(entry) do
{[entry|acc1], acc2}
else
{acc1, [entry|acc2]}
end
end)
{:lists.reverse(acc1), :lists.reverse(acc2)}
end
@doc """
Splits `collection` into groups based on `fun`.
The result is a dict (by default a map) where each key is
a group and each value is a list of elements from `collection`
for which `fun` returned that group. Ordering is not necessarily
preserved.
## Examples
iex> Enum.group_by(~w{ant buffalo cat dingo}, &String.length/1)
%{3 => ["cat", "ant"], 7 => ["buffalo"], 5 => ["dingo"]}
"""
@spec group_by(t, dict, (element -> any)) :: dict when dict: Dict.t
def group_by(collection, dict \\ %{}, fun) do
reduce(collection, dict, fn(entry, categories) ->
Dict.update(categories, fun.(entry), [entry], &[entry|&1])
end)
end
@doc """
Invokes `fun` for each element in the collection passing that element and the
accumulator `acc` as arguments. `fun`'s return value is stored in `acc`.
Returns the accumulator.
## Examples
iex> Enum.reduce([1, 2, 3], 0, fn(x, acc) -> x + acc end)
6
"""
@spec reduce(t, any, (element, any -> any)) :: any
def reduce(collection, acc, fun) when is_list(collection) do
:lists.foldl(fun, acc, collection)
end
def reduce(%{__struct__: _} = collection, acc, fun) do
Enumerable.reduce(collection, {:cont, acc},
fn x, acc -> {:cont, fun.(x, acc)} end) |> elem(1)
end
def reduce(%{} = collection, acc, fun) do
:maps.fold(fn k, v, acc -> fun.({k, v}, acc) end, acc, collection)
end
def reduce(collection, acc, fun) do
Enumerable.reduce(collection, {:cont, acc},
fn x, acc -> {:cont, fun.(x, acc)} end) |> elem(1)
end
@doc """
Invokes `fun` for each element in the collection passing that element and the
accumulator `acc` as arguments. `fun`'s return value is stored in `acc`.
The first element of the collection is used as the initial value of `acc`.
Returns the accumulator.
## Examples
iex> Enum.reduce([1, 2, 3, 4], fn(x, acc) -> x * acc end)
24
"""
@spec reduce(t, (element, any -> any)) :: any
def reduce([h|t], fun) do
reduce(t, h, fun)
end
def reduce([], _fun) do
raise Enum.EmptyError
end
def reduce(collection, fun) do
result =
Enumerable.reduce(collection, {:cont, :first}, fn
x, :first ->
{:cont, {:acc, x}}
x, {:acc, acc} ->
{:cont, {:acc, fun.(x, acc)}}
end) |> elem(1)
case result do
:first -> raise Enum.EmptyError
{:acc, acc} -> acc
end
end
@doc """
Reduces the collection until halt is emitted.
The return value for `fun` is expected to be
`{:cont, acc}`, return `{:halt, acc}` to end the reduction early.
Returns the accumulator.
## Examples
iex> Enum.reduce_while(1..100, 0, fn i, acc ->
...> if i < 3, do: {:cont, acc + i}, else: {:halt, acc}
...> end)
3
"""
def reduce_while(collection, acc, fun) do
Enumerable.reduce(collection, {:cont, acc}, fun) |> elem(1)
end
@doc """
Returns elements of collection for which `fun` returns `false` or `nil`.
## Examples
iex> Enum.reject([1, 2, 3], fn(x) -> rem(x, 2) == 0 end)
[1, 3]
"""
@spec reject(t, (element -> as_boolean(term))) :: list
def reject(collection, fun) when is_list(collection) do
for item <- collection, !fun.(item), do: item
end
def reject(collection, fun) do
reduce(collection, [], R.reject(fun)) |> :lists.reverse
end
@doc """
Reverses the collection.
## Examples
iex> Enum.reverse([1, 2, 3])
[3, 2, 1]
"""
@spec reverse(t) :: list
def reverse(collection) when is_list(collection) do
:lists.reverse(collection)
end
def reverse(collection) do
reverse(collection, [])
end
@doc """
Reverses the collection and appends the tail.
This is an optimization for
`Enum.concat(Enum.reverse(collection), tail)`.
## Examples
iex> Enum.reverse([1, 2, 3], [4, 5, 6])
[3, 2, 1, 4, 5, 6]
"""
@spec reverse(t, t) :: list
def reverse(collection, tail) when is_list(collection) and is_list(tail) do
:lists.reverse(collection, tail)
end
def reverse(collection, tail) do
reduce(collection, to_list(tail), fn(entry, acc) ->
[entry|acc]
end)
end
@doc """
Reverses the collection in the range from initial position `first`
through `count` elements. If `count` is greater than the size of
the rest of the collection, then this function will reverse the rest
of the collection.
## Examples
iex> Enum.reverse_slice([1, 2, 3, 4, 5, 6], 2, 4)
[1, 2, 6, 5, 4, 3]
"""
@spec reverse_slice(t, non_neg_integer, non_neg_integer) :: list
def reverse_slice(collection, start, count) when start >= 0 and count >= 0 do
list = reverse(collection)
length = length(list)
count = Kernel.min(count, length - start)
if count > 0 do
reverse_slice(list, length, start + count, count, [])
else
:lists.reverse(list)
end
end
@doc """
Returns a random element of a collection.
Raises `EmptyError` if the collection is empty.
Notice that you need to explicitly call `:random.seed/1` and
set a seed value for the random algorithm. Otherwise, the
default seed will be set which will always return the same
result. For example, one could do the following to set a seed
dynamically:
:random.seed(:os.timestamp)
The implementation is based on the
[reservoir sampling](http://en.wikipedia.org/wiki/Reservoir_sampling#Relation_to_Fisher-Yates_shuffle)
algorithm.
It assumes that the sample being returned can fit into memory;
the input collection doesn't have to - it is traversed just once.
## Examples
iex> Enum.random([1, 2, 3])
1
iex> Enum.random([1, 2, 3])
2
"""
@spec random(t) :: element
def random(collection) do
case random(collection, 1) do
[] -> raise Enum.EmptyError
[e] -> e
end
end
@doc """
Returns a random sublist of a collection.
Notice this function will traverse the whole collection to
get the random sublist of collection. If you want the random
number between two integers, the best option is to use the
:random module.
See `random/1` for notes on implementation and random seed.
## Examples
iex> Enum.random(1..10, 2)
[1, 5]
iex> Enum.random(?a..?z, 5)
'tfesm'
"""
@spec random(t, integer) :: list
def random(collection, count) when count > 0 do
sample = Tuple.duplicate(nil, count)
reducer = fn x, {i, sample} ->
j = random_index(i)
if i < count do
swapped = sample |> elem(j)
{i + 1, sample |> put_elem(i, swapped) |> put_elem(j, x)}
else
if j < count, do: sample = sample |> put_elem(j, x)
{i + 1, sample}
end
end
{n, sample} = reduce(collection, {0, sample}, reducer)
sample |> Tuple.to_list |> take(Kernel.min(count, n))
end
def random(_collection, 0), do: []
@doc """
Applies the given function to each element in the collection,
storing the result in a list and passing it as the accumulator
for the next computation.
## Examples
iex> Enum.scan(1..5, &(&1 + &2))
[1, 3, 6, 10, 15]
"""
@spec scan(t, (element, any -> any)) :: list
def scan(enum, fun) do
{res, _} = reduce(enum, {[], :first}, R.scan_2(fun))
:lists.reverse(res)
end
@doc """
Applies the given function to each element in the collection,
storing the result in a list and passing it as the accumulator
for the next computation. Uses the given `acc` as the starting value.
## Examples
iex> Enum.scan(1..5, 0, &(&1 + &2))
[1, 3, 6, 10, 15]
"""
@spec scan(t, any, (element, any -> any)) :: list
def scan(enum, acc, fun) do
{res, _} = reduce(enum, {[], acc}, R.scan_3(fun))
:lists.reverse(res)
end
@doc """
Returns a list of collection elements shuffled.
Notice that you need to explicitly call `:random.seed/1` and
set a seed value for the random algorithm. Otherwise, the
default seed will be set which will always return the same
result. For example, one could do the following to set a seed
dynamically:
:random.seed(:os.timestamp)
## Examples
iex> Enum.shuffle([1, 2, 3])
[3, 2, 1]
iex> Enum.shuffle([1, 2, 3])
[3, 1, 2]
"""
@spec shuffle(t) :: list
def shuffle(collection) do
randomized = reduce(collection, [], fn x, acc ->
[{:random.uniform, x}|acc]
end)
unwrap(:lists.keysort(1, randomized), [])
end
@doc """
Returns a subset list of the given collection. Drops elements
until element position `start`, then takes `count` elements.
If the count is greater than collection length, it returns as
much as possible. If zero, then it returns `[]`.
## Examples
iex> Enum.slice(1..100, 5, 10)
[6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
iex> Enum.slice(1..10, 5, 100)
[6, 7, 8, 9, 10]
iex> Enum.slice(1..10, 5, 0)
[]
"""
@spec slice(t, integer, non_neg_integer) :: list
def slice(_collection, _start, 0), do: []
def slice(collection, start, count) when start < 0 do
{list, new_start} = enumerate_and_count(collection, start)
if new_start >= 0 do
slice(list, new_start, count)
else
[]
end
end
def slice(collection, start, count) when is_list(collection) and start >= 0 and count > 0 do
do_slice(collection, start, count)
end
def slice(collection, start, count) when start >= 0 and count > 0 do
{_, _, list} = Enumerable.reduce(collection, {:cont, {start, count, []}}, fn
_entry, {start, count, _list} when start > 0 ->
{:cont, {start-1, count, []}}
entry, {start, count, list} when count > 1 ->
{:cont, {start, count-1, [entry|list]}}
entry, {start, count, list} ->
{:halt, {start, count, [entry|list]}}
end) |> elem(1)
:lists.reverse(list)
end
@doc """
Returns a subset list of the given collection. Drops elements
until element position `range.first`, then takes elements until element
position `range.last` (inclusive).
Positions are calculated by adding the number of items in the collection to
negative positions (so position -3 in a collection with count 5 becomes
position 2).
The first position (after adding count to negative positions) must be smaller
or equal to the last position.
If the start of the range is not a valid offset for the given
collection or if the range is in reverse order, returns `[]`.
## Examples
iex> Enum.slice(1..100, 5..10)
[6, 7, 8, 9, 10, 11]
iex> Enum.slice(1..10, 5..20)
[6, 7, 8, 9, 10]
iex> Enum.slice(1..10, 11..20)
[]
iex> Enum.slice(1..10, 6..5)
[]
"""
@spec slice(t, Range.t) :: list
def slice(collection, range)
def slice(collection, first..last) when first >= 0 and last >= 0 do
# Simple case, which works on infinite collections
if last - first >= 0 do
slice(collection, first, last - first + 1)
else
[]
end
end
def slice(collection, first..last) do
{list, count} = enumerate_and_count(collection, 0)
corr_first = if first >= 0, do: first, else: first + count
corr_last = if last >= 0, do: last, else: last + count
length = corr_last - corr_first + 1
if corr_first >= 0 and length > 0 do
slice(list, corr_first, length)
else
[]
end
end
@doc """
Sorts the collection according to Elixir's term ordering.
Uses the merge sort algorithm.
## Examples
iex> Enum.sort([3, 2, 1])
[1, 2, 3]
"""
@spec sort(t) :: list
def sort(collection) when is_list(collection) do
:lists.sort(collection)
end
def sort(collection) do
sort(collection, &(&1 <= &2))
end
@doc """
Sorts the collection by the given function.
This function uses the merge sort algorithm. The given function
must return `false` if the first argument is less than right one.
## Examples
iex> Enum.sort([1, 2, 3], &(&1 > &2))
[3, 2, 1]
The sorting algorithm will be stable as long as the given function
returns `true` for values considered equal:
iex> Enum.sort ["some", "kind", "of", "monster"], &(byte_size(&1) <= byte_size(&2))
["of", "some", "kind", "monster"]
If the function does not return `true` for equal values, the sorting is not stable and
the order of equal terms may be shuffled:
iex> Enum.sort ["some", "kind", "of", "monster"], &(byte_size(&1) < byte_size(&2))
["of", "kind", "some", "monster"]
"""
@spec sort(t, (element, element -> boolean)) :: list
def sort(collection, fun) when is_list(collection) do
:lists.sort(fun, collection)
end
def sort(collection, fun) do
reduce(collection, [], &sort_reducer(&1, &2, fun)) |> sort_terminator(fun)
end
@doc """
Sorts the mapped results of the `collection` according to the `sorter` function.
This function maps each element of the collection using the `mapper`
function. The collection is then sorted by the mapped elements using the
`sorter` function, which defaults to `<=/2`
`sort_by/3` differs from `sort/2` in that it only calculates the comparison
value for each element in the collection once instead of once for each
element in each comparison. If the same function is being called on both
element, it's also more compact to use `sort_by/3`.
This technique is also known as a
[Schwartzian Transform](https://en.wikipedia.org/wiki/Schwartzian_transform),
or the Lisp decorate-sort-undecorate idiom as the `mapper` is decorating the
original `collection`, then `sorter` is sorting the decorations, and finally
the `collection` is being undecorated so only the original elements remain,
but now in sorted order.
## Examples
Using the default `sorter` of `<=/2`:
iex> Enum.sort_by ["some", "kind", "of", "monster"], &byte_size/1
["of", "some", "kind", "monster"]
Using a custom `sorter` to override the order:
iex> Enum.sort_by ["some", "kind", "of", "monster"], &byte_size/1, &>=/2
["monster", "some", "kind", "of"]
"""
@spec sort_by(t, (element -> mapped_element), (mapped_element, mapped_element -> boolean)) :: list when mapped_element: element
def sort_by(collection, mapper, sorter \\ &<=/2) do
collection
|> map(&{&1, mapper.(&1)})
|> sort(&sorter.(elem(&1, 1), elem(&2, 1)))
|> map(&elem(&1, 0))
end
@doc """
Splits the enumerable into two collections, leaving `count`
elements in the first one. If `count` is a negative number,
it starts counting from the back to the beginning of the
collection.
Be aware that a negative `count` implies the collection
will be enumerated twice: once to calculate the position, and
a second time to do the actual splitting.
## Examples
iex> Enum.split([1, 2, 3], 2)
{[1, 2], [3]}
iex> Enum.split([1, 2, 3], 10)
{[1, 2, 3], []}
iex> Enum.split([1, 2, 3], 0)
{[], [1, 2, 3]}
iex> Enum.split([1, 2, 3], -1)
{[1, 2], [3]}
iex> Enum.split([1, 2, 3], -5)
{[], [1, 2, 3]}
"""
@spec split(t, integer) :: {list, list}
def split(collection, count) when is_list(collection) and count >= 0 do
do_split(collection, count, [])
end
def split(collection, count) when count >= 0 do
{_, list1, list2} =
reduce(collection, {count, [], []}, fn(entry, {counter, acc1, acc2}) ->
if counter > 0 do
{counter - 1, [entry|acc1], acc2}
else
{counter, acc1, [entry|acc2]}
end
end)
{:lists.reverse(list1), :lists.reverse(list2)}
end
def split(collection, count) when count < 0 do
do_split_reverse(reverse(collection), abs(count), [])
end
@doc """
Splits `collection` in two at the position of the element for which `fun` returns `false` for the
first time.
## Examples
iex> Enum.split_while([1, 2, 3, 4], fn(x) -> x < 3 end)
{[1, 2], [3, 4]}
"""
@spec split_while(t, (element -> as_boolean(term))) :: {list, list}
def split_while(collection, fun) when is_list(collection) do
do_split_while(collection, fun, [])
end
def split_while(collection, fun) do
{list1, list2} =
reduce(collection, {[], []}, fn
entry, {acc1, []} ->
if(fun.(entry), do: {[entry|acc1], []}, else: {acc1, [entry]})
entry, {acc1, acc2} ->
{acc1, [entry|acc2]}
end)
{:lists.reverse(list1), :lists.reverse(list2)}
end
@doc """
Takes the first `count` items from the collection.
`count` must be an integer. If a negative `count` is given, the last `count` values will
be taken. For such, the collection is fully enumerated keeping up
to `2 * count` elements in memory. Once the end of the collection is
reached, the last `count` elements are returned.
## Examples
iex> Enum.take([1, 2, 3], 2)
[1, 2]
iex> Enum.take([1, 2, 3], 10)
[1, 2, 3]
iex> Enum.take([1, 2, 3], 0)
[]
iex> Enum.take([1, 2, 3], -1)
[3]
"""
@spec take(t, integer) :: list
def take(_collection, 0), do: []
def take([], _count), do: []
def take(collection, n) when is_list(collection) and is_integer(n) and n > 0 do
do_take(collection, n)
end
def take(collection, n) when is_integer(n) and n > 0 do
{_, {res, _}} =
Enumerable.reduce(collection, {:cont, {[], n}}, fn(entry, {list, count}) ->
case count do
0 -> {:halt, {list, count}}
1 -> {:halt, {[entry|list], count - 1}}
_ -> {:cont, {[entry|list], count - 1}}
end
end)
:lists.reverse(res)
end
def take(collection, n) when is_integer(n) and n < 0 do
n = abs(n)
{_count, buf1, buf2} =
reduce(collection, {0, [], []}, fn entry, {count, buf1, buf2} ->
buf1 = [entry|buf1]
count = count + 1
if count == n do
{0, [], buf1}
else
{count, buf1, buf2}
end
end)
do_take_last(buf1, buf2, n, [])
end
defp do_take_last(_buf1, _buf2, 0, acc),
do: acc
defp do_take_last([], [], _, acc),
do: acc
defp do_take_last([], [h|t], n, acc),
do: do_take_last([], t, n-1, [h|acc])
defp do_take_last([h|t], buf2, n, acc),
do: do_take_last(t, buf2, n-1, [h|acc])
@doc """
Returns a collection of every `nth` item in the collection,
starting with the first element.
The second argument specifying every `nth` item must be a non-negative integer.
## Examples
iex> Enum.take_every(1..10, 2)
[1, 3, 5, 7, 9]
"""
@spec take_every(t, non_neg_integer) :: list
def take_every(_collection, 0), do: []
def take_every([], _nth), do: []
def take_every(collection, nth) when is_integer(nth) and nth > 0 do
{res, _} = reduce(collection, {[], :first}, R.take_every(nth))
:lists.reverse(res)
end
@doc """
Takes the items from the beginning of `collection` while `fun` returns a truthy value.
## Examples
iex> Enum.take_while([1, 2, 3], fn(x) -> x < 3 end)
[1, 2]
"""
@spec take_while(t, (element -> as_boolean(term))) :: list
def take_while(collection, fun) when is_list(collection) do
do_take_while(collection, fun)
end
def take_while(collection, fun) do
{_, res} =
Enumerable.reduce(collection, {:cont, []}, fn(entry, acc) ->
if fun.(entry) do
{:cont, [entry|acc]}
else
{:halt, acc}
end
end)
:lists.reverse(res)
end
@doc """
Converts `collection` to a list.
## Examples
iex> Enum.to_list(1 .. 3)
[1, 2, 3]
"""
@spec to_list(t) :: [term]
def to_list(collection) when is_list(collection) do
collection
end
def to_list(collection) do
reverse(collection) |> :lists.reverse
end
@doc """
Enumerates the collection, removing all duplicated elements.
## Examples
iex> Enum.uniq([1, 2, 3, 3, 2, 1]) |> Enum.to_list
[1, 2, 3]
"""
@spec uniq(t) :: list
def uniq(collection) do
uniq_by(collection, fn x -> x end)
end
# TODO: Deprecate by 1.2
# TODO: Remove by 2.0
@doc false
def uniq(collection, fun) do
uniq_by(collection, fun)
end
@doc """
Enumerates the collection, removing all duplicated elements.
## Example
iex> Enum.uniq_by([{1, :x}, {2, :y}, {1, :z}], fn {x, _} -> x end)
[{1, :x}, {2, :y}]
"""
@spec uniq_by(t, (element -> term)) :: list
def uniq_by(collection, fun) when is_list(collection) do
do_uniq(collection, HashSet.new, fun)
end
def uniq_by(collection, fun) do
{list, _} = reduce(collection, {[], HashSet.new}, R.uniq(fun))
:lists.reverse(list)
end
@doc """
Opposite of `Enum.zip/2`; takes a list of two-element tuples and returns a
tuple with two lists, each of which is formed by the first and second element
of each tuple, respectively.
This function fails unless `collection` is or can be converted into a list of
tuples with *exactly* two elements in each tuple.
## Examples
iex> Enum.unzip([{:a, 1}, {:b, 2}, {:c, 3}])
{[:a, :b, :c], [1, 2, 3]}
iex> Enum.unzip(%{a: 1, b: 2})
{[:a, :b], [1, 2]}
"""
@spec unzip(t) :: {list(element), list(element)}
def unzip(collection) do
{list1, list2} = reduce(collection, {[], []}, fn({el1, el2}, {list1, list2}) ->
{[el1|list1], [el2|list2]}
end)
{:lists.reverse(list1), :lists.reverse(list2)}
end
@doc """
Zips corresponding elements from two collections into one list
of tuples.
The zipping finishes as soon as any enumerable completes.
## Examples
iex> Enum.zip([1, 2, 3], [:a, :b, :c])
[{1, :a}, {2, :b}, {3, :c}]
iex> Enum.zip([1, 2, 3, 4, 5], [:a, :b, :c])
[{1, :a}, {2, :b}, {3, :c}]
"""
@spec zip(t, t) :: [{any, any}]
def zip(collection1, collection2) when is_list(collection1) and is_list(collection2) do
do_zip(collection1, collection2)
end
def zip(collection1, collection2) do
Stream.zip(collection1, collection2).({:cont, []}, &{:cont, [&1|&2]}) |> elem(1) |> :lists.reverse
end
@doc """
Returns the collection with each element wrapped in a tuple
alongside its index.
## Examples
iex> Enum.with_index [1, 2, 3]
[{1, 0}, {2, 1}, {3, 2}]
"""
@spec with_index(t) :: list({element, non_neg_integer})
def with_index(collection) do
map_reduce(collection, 0, fn x, acc ->
{{x, acc}, acc + 1}
end) |> elem(0)
end
## Helpers
@compile {:inline, enum_to_string: 1}
defp enumerate_and_count(collection, count) when is_list(collection) do
{collection, length(collection) - abs(count)}
end
defp enumerate_and_count(collection, count) do
map_reduce(collection, -abs(count), fn(x, acc) -> {x, acc + 1} end)
end
defp enum_to_string(entry) when is_binary(entry), do: entry
defp enum_to_string(entry), do: String.Chars.to_string(entry)
defp random_index(n) do
:random.uniform(n + 1) - 1
end
## Implementations
## all?
defp do_all?([h|t], fun) do
if fun.(h) do
do_all?(t, fun)
else
false
end
end
defp do_all?([], _) do
true
end
## any?
defp do_any?([h|t], fun) do
if fun.(h) do
true
else
do_any?(t, fun)
end
end
defp do_any?([], _) do
false
end
## fetch
defp do_fetch([h|_], 0), do: {:ok, h}
defp do_fetch([_|t], n), do: do_fetch(t, n - 1)
defp do_fetch([], _), do: :error
## drop
defp do_drop([_|t], counter) when counter > 0 do
do_drop(t, counter - 1)
end
defp do_drop(list, 0) do
list
end
defp do_drop([], _) do
[]
end
## drop_while
defp do_drop_while([h|t], fun) do
if fun.(h) do
do_drop_while(t, fun)
else
[h|t]
end
end
defp do_drop_while([], _) do
[]
end
## find
defp do_find([h|t], ifnone, fun) do
if fun.(h) do
h
else
do_find(t, ifnone, fun)
end
end
defp do_find([], ifnone, _) do
ifnone
end
## find_index
defp do_find_index([h|t], counter, fun) do
if fun.(h) do
counter
else
do_find_index(t, counter + 1, fun)
end
end
defp do_find_index([], _, _) do
nil
end
## find_value
defp do_find_value([h|t], ifnone, fun) do
fun.(h) || do_find_value(t, ifnone, fun)
end
defp do_find_value([], ifnone, _) do
ifnone
end
## shuffle
defp unwrap([{_, h} | collection], t) do
unwrap(collection, [h|t])
end
defp unwrap([], t), do: t
## sort
defp sort_reducer(entry, {:split, y, x, r, rs, bool}, fun) do
cond do
fun.(y, entry) == bool ->
{:split, entry, y, [x|r], rs, bool}
fun.(x, entry) == bool ->
{:split, y, entry, [x|r], rs, bool}
r == [] ->
{:split, y, x, [entry], rs, bool}
true ->
{:pivot, y, x, r, rs, entry, bool}
end
end
defp sort_reducer(entry, {:pivot, y, x, r, rs, s, bool}, fun) do
cond do
fun.(y, entry) == bool ->
{:pivot, entry, y, [x | r], rs, s, bool}
fun.(x, entry) == bool ->
{:pivot, y, entry, [x | r], rs, s, bool}
fun.(s, entry) == bool ->
{:split, entry, s, [], [[y, x | r] | rs], bool}
true ->
{:split, s, entry, [], [[y, x | r] | rs], bool}
end
end
defp sort_reducer(entry, [x], fun) do
{:split, entry, x, [], [], fun.(x, entry)}
end
defp sort_reducer(entry, acc, _fun) do
[entry|acc]
end
defp sort_terminator({:split, y, x, r, rs, bool}, fun) do
sort_merge([[y, x | r] | rs], fun, bool)
end
defp sort_terminator({:pivot, y, x, r, rs, s, bool}, fun) do
sort_merge([[s], [y, x | r] | rs], fun, bool)
end
defp sort_terminator(acc, _fun) do
acc
end
defp sort_merge(list, fun, true), do:
reverse_sort_merge(list, [], fun, true)
defp sort_merge(list, fun, false), do:
sort_merge(list, [], fun, false)
defp sort_merge([t1, [h2 | t2] | l], acc, fun, true), do:
sort_merge(l, [sort_merge_1(t1, h2, t2, [], fun, false) | acc], fun, true)
defp sort_merge([[h2 | t2], t1 | l], acc, fun, false), do:
sort_merge(l, [sort_merge_1(t1, h2, t2, [], fun, false) | acc], fun, false)
defp sort_merge([l], [], _fun, _bool), do: l
defp sort_merge([l], acc, fun, bool), do:
reverse_sort_merge([:lists.reverse(l, []) | acc], [], fun, bool)
defp sort_merge([], acc, fun, bool), do:
reverse_sort_merge(acc, [], fun, bool)
defp reverse_sort_merge([[h2 | t2], t1 | l], acc, fun, true), do:
reverse_sort_merge(l, [sort_merge_1(t1, h2, t2, [], fun, true) | acc], fun, true)
defp reverse_sort_merge([t1, [h2 | t2] | l], acc, fun, false), do:
reverse_sort_merge(l, [sort_merge_1(t1, h2, t2, [], fun, true) | acc], fun, false)
defp reverse_sort_merge([l], acc, fun, bool), do:
sort_merge([:lists.reverse(l, []) | acc], [], fun, bool)
defp reverse_sort_merge([], acc, fun, bool), do:
sort_merge(acc, [], fun, bool)
defp sort_merge_1([h1 | t1], h2, t2, m, fun, bool) do
if fun.(h1, h2) == bool do
sort_merge_2(h1, t1, t2, [h2 | m], fun, bool)
else
sort_merge_1(t1, h2, t2, [h1 | m], fun, bool)
end
end
defp sort_merge_1([], h2, t2, m, _fun, _bool), do:
:lists.reverse(t2, [h2 | m])
defp sort_merge_2(h1, t1, [h2 | t2], m, fun, bool) do
if fun.(h1, h2) == bool do
sort_merge_2(h1, t1, t2, [h2 | m], fun, bool)
else
sort_merge_1(t1, h2, t2, [h1 | m], fun, bool)
end
end
defp sort_merge_2(h1, t1, [], m, _fun, _bool), do:
:lists.reverse(t1, [h1 | m])
## reverse_slice
defp reverse_slice(rest, idx, idx, count, acc) do
{slice, rest} = head_slice(rest, count, [])
:lists.reverse(rest, :lists.reverse(slice, acc))
end
defp reverse_slice([elem | rest], idx, start, count, acc) do
reverse_slice(rest, idx - 1, start, count, [elem | acc])
end
defp head_slice(rest, 0, acc), do: {acc, rest}
defp head_slice([elem | rest], count, acc) do
head_slice(rest, count - 1, [elem | acc])
end
## split
defp do_split([h|t], counter, acc) when counter > 0 do
do_split(t, counter - 1, [h|acc])
end
defp do_split(list, 0, acc) do
{:lists.reverse(acc), list}
end
defp do_split([], _, acc) do
{:lists.reverse(acc), []}
end
defp do_split_reverse([h|t], counter, acc) when counter > 0 do
do_split_reverse(t, counter - 1, [h|acc])
end
defp do_split_reverse(list, 0, acc) do
{:lists.reverse(list), acc}
end
defp do_split_reverse([], _, acc) do
{[], acc}
end
## split_while
defp do_split_while([h|t], fun, acc) do
if fun.(h) do
do_split_while(t, fun, [h|acc])
else
{:lists.reverse(acc), [h|t]}
end
end
defp do_split_while([], _, acc) do
{:lists.reverse(acc), []}
end
## take
defp do_take([h|t], counter) when counter > 0 do
[h|do_take(t, counter - 1)]
end
defp do_take(_list, 0) do
[]
end
defp do_take([], _) do
[]
end
## take_while
defp do_take_while([h|t], fun) do
if fun.(h) do
[h|do_take_while(t, fun)]
else
[]
end
end
defp do_take_while([], _) do
[]
end
## uniq
defp do_uniq([h|t], acc, fun) do
fun_h = fun.(h)
if HashSet.member?(acc, fun_h) do
do_uniq(t, acc, fun)
else
[h|do_uniq(t, HashSet.put(acc, fun_h), fun)]
end
end
defp do_uniq([], _acc, _fun) do
[]
end
## zip
defp do_zip([h1|next1], [h2|next2]) do
[{h1, h2}|do_zip(next1, next2)]
end
defp do_zip(_, []), do: []
defp do_zip([], _), do: []
## slice
defp do_slice([], _start, _count) do
[]
end
defp do_slice(_list, _start, 0) do
[]
end
defp do_slice([h|t], 0, count) do
[h|do_slice(t, 0, count-1)]
end
defp do_slice([_|t], start, count) do
do_slice(t, start-1, count)
end
end
defimpl Enumerable, for: List do
def reduce(_, {:halt, acc}, _fun), do: {:halted, acc}
def reduce(list, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(list, &1, fun)}
def reduce([], {:cont, acc}, _fun), do: {:done, acc}
def reduce([h|t], {:cont, acc}, fun), do: reduce(t, fun.(h, acc), fun)
def member?(_list, _value),
do: {:error, __MODULE__}
def count(_list),
do: {:error, __MODULE__}
end
defimpl Enumerable, for: Map do
def reduce(map, acc, fun) do
do_reduce(:maps.to_list(map), acc, fun)
end
defp do_reduce(_, {:halt, acc}, _fun), do: {:halted, acc}
defp do_reduce(list, {:suspend, acc}, fun), do: {:suspended, acc, &do_reduce(list, &1, fun)}
defp do_reduce([], {:cont, acc}, _fun), do: {:done, acc}
defp do_reduce([h|t], {:cont, acc}, fun), do: do_reduce(t, fun.(h, acc), fun)
def member?(map, {key, value}) do
{:ok, match?({:ok, ^value}, :maps.find(key, map))}
end
def member?(_map, _other) do
{:ok, false}
end
def count(map) do
{:ok, map_size(map)}
end
end
defimpl Enumerable, for: Function do
def reduce(function, acc, fun) when is_function(function, 2),
do: function.(acc, fun)
def member?(_function, _value),
do: {:error, __MODULE__}
def count(_function),
do: {:error, __MODULE__}
end
|
lib/elixir/lib/enum.ex
| 0.934924 | 0.7413 |
enum.ex
|
starcoder
|
defmodule AWS.LookoutVision do
@moduledoc """
This is the Amazon Lookout for Vision API Reference.
It provides descriptions of actions, data types, common parameters, and common
errors.
Amazon Lookout for Vision enables you to find visual defects in industrial
products, accurately and at scale. It uses computer vision to identify missing
components in an industrial product, damage to vehicles or structures,
irregularities in production lines, and even minuscule defects in silicon wafers
— or any other physical item where quality is important such as a missing
capacitor on printed circuit boards.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2020-11-20",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "lookoutvision",
global?: false,
protocol: "rest-json",
service_id: "LookoutVision",
signature_version: "v4",
signing_name: "lookoutvision",
target_prefix: nil
}
end
@doc """
Creates a new dataset in an Amazon Lookout for Vision project.
`CreateDataset` can create a training or a test dataset from a valid dataset
source (`DatasetSource`).
If you want a single dataset project, specify `train` for the value of
`DatasetType`.
To have a project with separate training and test datasets, call `CreateDataset`
twice. On the first call, specify `train` for the value of `DatasetType`. On the
second call, specify `test` for the value of `DatasetType`.
This operation requires permissions to perform the `lookoutvision:CreateDataset`
operation.
"""
def create_dataset(%Client{} = client, project_name, input, options \\ []) do
url_path = "/2020-11-20/projects/#{URI.encode(project_name)}/datasets"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Creates a new version of a model within an an Amazon Lookout for Vision project.
`CreateModel` is an asynchronous operation in which Amazon Lookout for Vision
trains, tests, and evaluates a new version of a model.
To get the current status, check the `Status` field returned in the response
from `DescribeModel`.
If the project has a single dataset, Amazon Lookout for Vision internally splits
the dataset to create a training and a test dataset. If the project has a
training and a test dataset, Lookout for Vision uses the respective datasets to
train and test the model.
After training completes, the evaluation metrics are stored at the location
specified in `OutputConfig`.
This operation requires permissions to perform the `lookoutvision:CreateModel`
operation. If you want to tag your model, you also require permission to the
`lookoutvision:TagResource` operation.
"""
def create_model(%Client{} = client, project_name, input, options \\ []) do
url_path = "/2020-11-20/projects/#{URI.encode(project_name)}/models"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Creates an empty Amazon Lookout for Vision project.
After you create the project, add a dataset by calling `CreateDataset`.
This operation requires permissions to perform the `lookoutvision:CreateProject`
operation.
"""
def create_project(%Client{} = client, input, options \\ []) do
url_path = "/2020-11-20/projects"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes an existing Amazon Lookout for Vision `dataset`.
If your the project has a single dataset, you must create a new dataset before
you can create a model.
If you project has a training dataset and a test dataset consider the following.
* If you delete the test dataset, your project reverts to a single
dataset project. If you then train the model, Amazon Lookout for Vision
internally splits the remaining dataset into a training and test dataset.
* If you delete the training dataset, you must create a training
dataset before you can create a model.
This operation requires permissions to perform the `lookoutvision:DeleteDataset`
operation.
"""
def delete_dataset(%Client{} = client, dataset_type, project_name, input, options \\ []) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/datasets/#{URI.encode(dataset_type)}"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Deletes an Amazon Lookout for Vision model.
You can't delete a running model. To stop a running model, use the `StopModel`
operation.
It might take a few seconds to delete a model. To determine if a model has been
deleted, call `ListProjects` and check if the version of the model
(`ModelVersion`) is in the `Models` array.
This operation requires permissions to perform the `lookoutvision:DeleteModel`
operation.
"""
def delete_model(%Client{} = client, model_version, project_name, input, options \\ []) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/models/#{URI.encode(model_version)}"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Deletes an Amazon Lookout for Vision project.
To delete a project, you must first delete each version of the model associated
with the project. To delete a model use the `DeleteModel` operation.
You also have to delete the dataset(s) associated with the model. For more
information, see `DeleteDataset`. The images referenced by the training and test
datasets aren't deleted.
This operation requires permissions to perform the `lookoutvision:DeleteProject`
operation.
"""
def delete_project(%Client{} = client, project_name, input, options \\ []) do
url_path = "/2020-11-20/projects/#{URI.encode(project_name)}"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Describe an Amazon Lookout for Vision dataset.
This operation requires permissions to perform the
`lookoutvision:DescribeDataset` operation.
"""
def describe_dataset(%Client{} = client, dataset_type, project_name, options \\ []) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/datasets/#{URI.encode(dataset_type)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Describes a version of an Amazon Lookout for Vision model.
This operation requires permissions to perform the `lookoutvision:DescribeModel`
operation.
"""
def describe_model(%Client{} = client, model_version, project_name, options \\ []) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/models/#{URI.encode(model_version)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Describes an Amazon Lookout for Vision project.
This operation requires permissions to perform the
`lookoutvision:DescribeProject` operation.
"""
def describe_project(%Client{} = client, project_name, options \\ []) do
url_path = "/2020-11-20/projects/#{URI.encode(project_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Detects anomalies in an image that you supply.
The response from `DetectAnomalies` includes a boolean prediction that the image
contains one or more anomalies and a confidence value for the prediction.
Before calling `DetectAnomalies`, you must first start your model with the
`StartModel` operation. You are charged for the amount of time, in minutes, that
a model runs and for the number of anomaly detection units that your model uses.
If you are not using a model, use the `StopModel` operation to stop your model.
This operation requires permissions to perform the
`lookoutvision:DetectAnomalies` operation.
"""
def detect_anomalies(%Client{} = client, model_version, project_name, input, options \\ []) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/models/#{URI.encode(model_version)}/detect"
{headers, input} =
[
{"ContentType", "Content-Type"}
]
|> Request.build_params(input)
query_params = []
options =
Keyword.put(
options,
:send_body_as_binary?,
true
)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Lists the JSON Lines within a dataset.
An Amazon Lookout for Vision JSON Line contains the anomaly information for a
single image, including the image location and the assigned label.
This operation requires permissions to perform the
`lookoutvision:ListDatasetEntries` operation.
"""
def list_dataset_entries(
%Client{} = client,
dataset_type,
project_name,
after_creation_date \\ nil,
anomaly_class \\ nil,
before_creation_date \\ nil,
labeled \\ nil,
max_results \\ nil,
next_token \\ nil,
source_ref_contains \\ nil,
options \\ []
) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/datasets/#{URI.encode(dataset_type)}/entries"
headers = []
query_params = []
query_params =
if !is_nil(source_ref_contains) do
[{"sourceRefContains", source_ref_contains} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(labeled) do
[{"labeled", labeled} | query_params]
else
query_params
end
query_params =
if !is_nil(before_creation_date) do
[{"createdBefore", before_creation_date} | query_params]
else
query_params
end
query_params =
if !is_nil(anomaly_class) do
[{"anomalyClass", anomaly_class} | query_params]
else
query_params
end
query_params =
if !is_nil(after_creation_date) do
[{"createdAfter", after_creation_date} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the versions of a model in an Amazon Lookout for Vision project.
This operation requires permissions to perform the `lookoutvision:ListModels`
operation.
"""
def list_models(
%Client{} = client,
project_name,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/2020-11-20/projects/#{URI.encode(project_name)}/models"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the Amazon Lookout for Vision projects in your AWS account.
This operation requires permissions to perform the `lookoutvision:ListProjects`
operation.
"""
def list_projects(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/2020-11-20/projects"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of tags attached to the specified Amazon Lookout for Vision
model.
This operation requires permissions to perform the
`lookoutvision:ListTagsForResource` operation.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/2020-11-20/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Starts the running of the version of an Amazon Lookout for Vision model.
Starting a model takes a while to complete. To check the current state of the
model, use `DescribeModel`.
A model is ready to use when its status is `HOSTED`.
Once the model is running, you can detect custom labels in new images by calling
`DetectAnomalies`.
You are charged for the amount of time that the model is running. To stop a
running model, call `StopModel`.
This operation requires permissions to perform the `lookoutvision:StartModel`
operation.
"""
def start_model(%Client{} = client, model_version, project_name, input, options \\ []) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/models/#{URI.encode(model_version)}/start"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Stops the hosting of a running model.
The operation might take a while to complete. To check the current status, call
`DescribeModel`.
After the model hosting stops, the `Status` of the model is `TRAINED`.
This operation requires permissions to perform the `lookoutvision:StopModel`
operation.
"""
def stop_model(%Client{} = client, model_version, project_name, input, options \\ []) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/models/#{URI.encode(model_version)}/stop"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Adds one or more key-value tags to an Amazon Lookout for Vision model.
For more information, see *Tagging a model* in the *Amazon Lookout for Vision
Developer Guide*.
This operation requires permissions to perform the `lookoutvision:TagResource`
operation.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/2020-11-20/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes one or more tags from an Amazon Lookout for Vision model.
For more information, see *Tagging a model* in the *Amazon Lookout for Vision
Developer Guide*.
This operation requires permissions to perform the `lookoutvision:UntagResource`
operation.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/2020-11-20/tags/#{URI.encode(resource_arn)}"
headers = []
{query_params, input} =
[
{"TagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Adds one or more JSON Line entries to a dataset.
A JSON Line includes information about an image used for training or testing an
Amazon Lookout for Vision model. The following is an example JSON Line.
Updating a dataset might take a while to complete. To check the current status,
call `DescribeDataset` and check the `Status` field in the response.
This operation requires permissions to perform the
`lookoutvision:UpdateDatasetEntries` operation.
"""
def update_dataset_entries(%Client{} = client, dataset_type, project_name, input, options \\ []) do
url_path =
"/2020-11-20/projects/#{URI.encode(project_name)}/datasets/#{URI.encode(dataset_type)}/entries"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
202
)
end
end
|
lib/aws/generated/lookout_vision.ex
| 0.888674 | 0.661868 |
lookout_vision.ex
|
starcoder
|
defmodule SampleProjects.Language.WordComplete do
@moduledoc false
def run do
{training_data, letters} = gen_training_data
blank_vector = NeuralNet.get_blank_vector(letters)
IO.puts "Generating neural network."
net = GRUM.new(%{input_ids: letters, output_ids: letters, memory_size: 100})
IO.puts "Beginning training."
NeuralNet.train(net, training_data, 1.5, 2, fn info ->
IO.puts "#{info.error}, iteration ##{info.iterations}"
{input, exp_output} = Enum.random(training_data)
{_, acc_plain} = NeuralNet.eval(info.net, input) #Get its expected letters given the whole word.
{_, acc_feedback} = Enum.reduce 1..10, {hd(input), [%{}]}, fn _, {letter, acc} -> #generates with feedback
{vec, acc} = NeuralNet.eval(info.net, [letter], acc)
{Map.put(blank_vector, NeuralNet.get_max_component(vec), 1), acc}
end
actual = stringify [hd(input) | exp_output]
letters = stringify [hd(input) | get_values(acc_plain)]
feedbacked = stringify [hd(input) | get_values(acc_feedback)]
IO.puts "#{actual} / #{letters} | #{feedbacked}"
info.error < 0.0001
end, 2)
end
def get_values(acc) do
Enum.map(Enum.slice(acc, 1..(length(acc) - 1)), fn time_frame ->
time_frame.output.values
end)
end
def stringify(vectors) do
Enum.map(vectors, fn vec ->
NeuralNet.get_max_component(vec)
end)
end
def gen_training_data do
{_, words} = SampleProjects.Language.Parse.parse("lib/sample_projects/language/common_sense.txt")
IO.puts "Sample data contains #{MapSet.size(words)} words."
words = words
|> Enum.to_list()
|> Enum.map(&String.to_char_list/1)
|> Enum.filter(fn word -> length(word) > 1 end)
letters = Enum.to_list(hd('a')..hd('z'))
blank_vector = NeuralNet.get_blank_vector(letters)
training_data = Enum.map words, fn word ->
word = Enum.map(word, fn letter ->
if !Enum.member?(letters, letter), do: raise "Weird word #{word}"
Map.put(blank_vector, letter, 1)
end)
last = length(word) - 1
{Enum.slice(word, 0..(last - 1)), Enum.slice(word, 1..last)}
end
{training_data, letters}
end
end
|
lib/sample_projects/language/word_complete.ex
| 0.516595 | 0.485173 |
word_complete.ex
|
starcoder
|
defmodule ExWire.Packet.Disconnect do
@moduledoc """
Disconnect is when a peer wants to end a connection for a reason.
```
**Disconnect** `0x01` [`reason`: `P`]
Inform the peer that a disconnection is imminent; if received, a peer should
disconnect immediately. When sending, well-behaved hosts give their peers a
fighting chance (read: wait 2 seconds) to disconnect to before disconnecting
themselves.
* `reason` is an optional integer specifying one of a number of reasons for disconnect:
* `0x00` Disconnect requested;
* `0x01` TCP sub-system error;
* `0x02` Breach of protocol, e.g. a malformed message, bad RLP, incorrect magic number &c.;
* `0x03` Useless peer;
* `0x04` Too many peers;
* `0x05` Already connected;
* `0x06` Incompatible P2P protocol version;
* `0x07` Null node identity received - this is automatically invalid;
* `0x08` Client quitting;
* `0x09` Unexpected identity (i.e. a different identity to a previous connection/what a trusted peer told us).
* `0x0a` Identity is the same as this node (i.e. connected to itself);
* `0x0b` Timeout on receiving a message (i.e. nothing received since sending last ping);
* `0x10` Some other reason specific to a subprotocol.
```
"""
require Logger
@behaviour ExWire.Packet
@type t :: %__MODULE__{
reason: integer()
}
defstruct [
:reason
]
@reason_msgs %{
disconnect_request: "disconnect requested",
tcp_sub_system_error: "TCP sub-system error",
break_of_protocol: "breach of protocol",
useless_peer: "useless peer",
too_many_peers: "too many peers",
already_connected: "already connected",
incompatible_p2p_protcol_version: "incompatible P2P protocol version",
null_node_identity_received: "null node identity received",
client_quitting: "client quitting",
unexpected_identity: "unexpected identity",
identity_is_same_as_self: "identity is the same as this node",
timeout_on_receiving_message: "timeout on receiving a message",
other_reason: "some other reason specific to a subprotocol"
}
@reasons %{
disconnect_request: 0x00,
tcp_sub_system_error: 0x01,
break_of_protocol: 0x02,
useless_peer: 0x03,
too_many_peers: 0x04,
already_connected: 0x05,
incompatible_p2p_protcol_version: 0x06,
null_node_identity_received: 0x07,
client_quitting: 0x08,
unexpected_identity: 0x09,
identity_is_same_as_self: 0x0A,
timeout_on_receiving_message: 0x0B,
other_reason: 0x10
}
@reasons_inverted for({k, v} <- @reasons, do: {v, k}) |> Enum.into(%{})
@doc """
Given a Disconnect packet, serializes for transport over Eth Wire Protocol.
## Examples
iex> %ExWire.Packet.Disconnect{reason: :timeout_on_receiving_message}
...> |> ExWire.Packet.Disconnect.serialize
[0x0b]
"""
@spec serialize(t) :: ExRLP.t()
def serialize(packet = %__MODULE__{}) do
[
Map.get(@reasons, packet.reason)
]
end
@doc """
Given an RLP-encoded Disconnect packet from Eth Wire Protocol,
decodes into a Disconnect struct.
## Examples
iex> ExWire.Packet.Disconnect.deserialize([<<0x0b>>])
%ExWire.Packet.Disconnect{reason: :timeout_on_receiving_message}
"""
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
[
reason
] = rlp
%__MODULE__{
reason: @reasons_inverted[reason |> :binary.decode_unsigned()]
}
end
@doc """
Creates a new disconnect message with given reason. This
function raises if `reason` is not a known reason.
## Examples
iex> ExWire.Packet.Disconnect.new(:too_many_peers)
%ExWire.Packet.Disconnect{reason: :too_many_peers}
iex> ExWire.Packet.Disconnect.new(:something_else)
** (RuntimeError) Invalid reason
"""
def new(reason) do
if @reasons[reason] == nil, do: raise("Invalid reason")
%__MODULE__{
reason: reason
}
end
@doc """
Returns a string interpretation of a reason for disconnect.
## Examples
iex> ExWire.Packet.Disconnect.get_reason_msg(:timeout_on_receiving_message)
"timeout on receiving a message"
"""
@spec get_reason_msg(integer()) :: String.t()
def get_reason_msg(reason) do
@reason_msgs[reason]
end
@doc """
Handles a Disconnect message. We are instructed to disconnect, which
we'll abide by.
## Examples
iex> %ExWire.Packet.GetBlockBodies{hashes: [<<5>>, <<6>>]}
...> |> ExWire.Packet.GetBlockBodies.handle()
:ok
"""
@spec handle(ExWire.Packet.packet()) :: ExWire.Packet.handle_response()
def handle(packet = %__MODULE__{}) do
_ = Logger.info("[Packet] Peer asked to disconnect for #{get_reason_msg(packet.reason)}.")
:peer_disconnect
end
end
|
apps/ex_wire/lib/ex_wire/packet/disconnect.ex
| 0.893704 | 0.798344 |
disconnect.ex
|
starcoder
|
defmodule Brando.Villain.Filters do
use Phoenix.Component
alias Brando.Utils
alias Liquex.Context
@moduledoc """
Contains all the basic filters for Liquid
"""
@type filter_t :: {:filter, [...]}
@callback apply(any, filter_t, map) :: any
defmacro __using__(_) do
quote do
@behaviour Brando.Villain.Filters
@spec apply(any, Brando.Villain.Filters.filter_t(), map) :: any
@impl Brando.Villain.Filters
def apply(value, filter, context),
do: Brando.Villain.Filters.apply(__MODULE__, value, filter, context)
end
end
@spec filter_name(filter_t) :: String.t()
def filter_name({:filter, [filter_name | _]}), do: filter_name
def apply(
mod \\ __MODULE__,
value,
{:filter, [function, {:arguments, arguments}]},
context
) do
func = String.to_existing_atom(function)
function_args =
Enum.map(
arguments,
&Liquex.Argument.eval(&1, context)
)
|> merge_keywords()
mod =
if mod != __MODULE__ and Kernel.function_exported?(mod, func, length(function_args) + 2) do
mod
else
__MODULE__
end
Kernel.apply(mod, func, [value | function_args] ++ [context])
rescue
# credo:disable-for-next-line
ArgumentError -> raise Liquex.Error, "Invalid filter #{function}"
end
# Merges the tuples at the end of the argument list into a keyword list, but with string keys
# value, size, {"crop", direction}, {"filter", filter}
# becomes
# value, size, [{"crop", direction}, {"filter", filter}]
defp merge_keywords(arguments) do
{keywords, rest} =
arguments
|> Enum.reverse()
|> Enum.split_while(&is_tuple/1)
case keywords do
[] -> rest
_ -> [Enum.reverse(keywords) | rest]
end
|> Enum.reverse()
end
@doc """
Returns the absolute value of `value`.
## Examples
iex> Liquex.Filter.abs(-1, %{})
1
iex> Liquex.Filter.abs(1, %{})
1
iex> Liquex.Filter.abs("-1.1", %{})
1.1
"""
@spec abs(String.t() | number, any) :: number
def abs(value, _) when is_binary(value) do
{float, ""} = Float.parse(value)
abs(float)
end
def abs(value, _), do: abs(value)
@doc """
Appends `text` to the end of `value`
## Examples
iex> Liquex.Filter.append("myfile", ".html", %{})
"myfile.html"
"""
@spec append(String.t(), String.t(), map()) :: String.t()
def append(value, text, _), do: to_string(value) <> to_string(text)
@doc """
Sets a minimum value
## Examples
iex> Liquex.Filter.at_least(3, 5, %{})
5
iex> Liquex.Filter.at_least(5, 3, %{})
5
"""
@spec at_least(number, number, map()) :: number
def at_least(value, min, _) when value > min, do: value
def at_least(_value, min, _), do: min
@doc """
Sets a maximum value
## Examples
iex> Liquex.Filter.at_most(4, 5, %{})
4
iex> Liquex.Filter.at_most(4, 3, %{})
3
"""
@spec at_most(number, number, map()) :: number
def at_most(value, max, _) when value < max, do: value
def at_most(_value, max, _), do: max
@doc """
Capitalizes a string
## Examples
iex> Liquex.Filter.capitalize("title", %{})
"Title"
iex> Liquex.Filter.capitalize("my great title", %{})
"My great title"
"""
@spec capitalize(String.t(), map()) :: String.t()
def capitalize(value, _), do: String.capitalize(to_string(value))
@doc """
Rounds `value` up to the nearest whole number. Liquid tries to convert the input to a number before the filter is applied.
## Examples
iex> Liquex.Filter.ceil(1.2, %{})
2
iex> Liquex.Filter.ceil(2.0, %{})
2
iex> Liquex.Filter.ceil(183.357, %{})
184
iex> Liquex.Filter.ceil("3.5", %{})
4
"""
@spec ceil(number | String.t(), map()) :: number
def ceil(value, _) when is_binary(value) do
{num, ""} = Float.parse(value)
Float.ceil(num) |> trunc()
end
def ceil(value, _), do: Float.ceil(value) |> trunc()
@doc """
Removes any nil values from an array.
## Examples
iex> Liquex.Filter.compact([1, 2, nil, 3], %{})
[1,2,3]
iex> Liquex.Filter.compact([1, 2, 3], %{})
[1,2,3]
"""
@spec compact([any], map()) :: [any]
def compact(value, _) when is_list(value),
do: Enum.reject(value, &is_nil/1)
@doc """
Concatenates (joins together) multiple arrays. The resulting array contains all the items
## Examples
iex> Liquex.Filter.concat([1,2], [3,4], %{})
[1,2,3,4]
"""
def concat(value, other, _) when is_list(value) and is_list(other),
do: value ++ other
@doc """
Allows you to specify a fallback in case a value doesn’t exist. default will show its value
if the left side is nil, false, or empty.
## Examples
iex> Liquex.Filter.default("1.99", "2.99", %{})
"1.99"
iex> Liquex.Filter.default("", "2.99", %{})
"2.99"
"""
def default(value, def_value, _) when value in [nil, "", false, []], do: def_value
def default(value, _, _), do: value
@doc """
Divides a number by another number.
## Examples
The result is rounded down to the nearest integer (that is, the floor) if the divisor is an integer.
iex> Liquex.Filter.divided_by(16, 4, %{})
4
iex> Liquex.Filter.divided_by(5, 3, %{})
1
iex> Liquex.Filter.divided_by(20, 7.0, %{})
2.857142857142857
"""
def divided_by(value, divisor, _) when is_integer(divisor), do: trunc(value / divisor)
def divided_by(value, divisor, _), do: value / divisor
@doc """
Makes each character in a string lowercase. It has no effect on strings
which are already all lowercase.
## Examples
iex> Liquex.Filter.downcase("<NAME>", %{})
"<NAME>"
iex> Liquex.Filter.downcase("apple", %{})
"apple"
"""
def downcase(nil, _), do: nil
def downcase(value, _), do: String.downcase(to_string(value))
@doc """
Escapes a string by replacing characters with escape sequences (so that the string can
be used in a URL, for example). It doesn’t change strings that don’t have anything to
escape.
## Examples
iex> Liquex.Filter.escape("Have you read 'James & the Giant Peach'?", %{})
"Have you read 'James & the Giant Peach'?"
iex> Liquex.Filter.escape("Tetsuro Takara", %{})
"Tetsuro Takara"
"""
def escape(value, _),
do: HtmlEntities.encode(to_string(value))
@doc """
Escapes a string by replacing characters with escape sequences (so that the string can
be used in a URL, for example). It doesn’t change strings that don’t have anything to
escape.
## Examples
iex> Liquex.Filter.escape_once("1 < 2 & 3", %{})
"1 < 2 & 3"
"""
def escape_once(value, _),
do: to_string(value) |> HtmlEntities.decode() |> HtmlEntities.encode()
@doc """
Returns the first item of an array.
## Examples
iex> Liquex.Filter.first([1, 2, 3], %{})
1
iex> Liquex.Filter.first([], %{})
nil
"""
def first([], _), do: nil
def first([f | _], _), do: f
@doc """
Rounds the input down to the nearest whole number. Liquid tries to convert the input to a
number before the filter is applied.
## Examples
iex> Liquex.Filter.floor(1.2, %{})
1
iex> Liquex.Filter.floor(2.0, %{})
2
"""
def floor(value, _), do: Kernel.trunc(value)
@doc """
Combines the items in `values` into a single string using `joiner` as a separator.
## Examples
iex> Liquex.Filter.join(~w(<NAME> George Ringo), " and ", %{})
"John and Paul and George and Ringo"
"""
def join(values, joiner, _), do: Enum.join(values, joiner)
@doc """
Returns the last item of `arr`.
## Examples
iex> Liquex.Filter.last([1, 2, 3], %{})
3
iex> Liquex.Filter.first([], %{})
nil
"""
@spec last(list, Liquex.Context.t()) :: any
def last(arr, context), do: arr |> Enum.reverse() |> first(context)
@doc """
Removes all whitespace (tabs, spaces, and newlines) from the left side of a string.
It does not affect spaces between words.
## Examples
iex> Liquex.Filter.lstrip(" So much room for activities! ", %{})
"So much room for activities! "
"""
@spec lstrip(String.t(), Context.t()) :: String.t()
def lstrip(value, _), do: to_string(value) |> String.trim_leading()
@doc """
Creates an array (`arr`) of values by extracting the values of a named property from another object (`key`).
## Examples
iex> Liquex.Filter.map([%{"a" => 1}, %{"a" => 2, "b" => 1}], "a", %{})
[1, 2]
"""
@spec map([any], term, Context.t()) :: [any]
def map(arr, key, _), do: Enum.map(arr, &Liquex.Indifferent.get(&1, key, nil))
@doc """
Subtracts a number from another number.
## Examples
iex> Liquex.Filter.minus(4, 2, %{})
2
iex> Liquex.Filter.minus(183.357, 12, %{})
171.357
"""
@spec minus(number, number, Context.t()) :: number
def minus(left, right, _), do: left - right
@doc """
Returns the remainder of a division operation.
## Examples
iex> Liquex.Filter.modulo(3, 2, %{})
1
iex> Liquex.Filter.modulo(183.357, 12, %{})
3.357
"""
@spec modulo(number, number, Context.t()) :: number
def modulo(left, right, _) when is_float(left) or is_float(right),
do: :math.fmod(left, right) |> Float.round(5)
def modulo(left, right, _), do: rem(left, right)
@doc """
Replaces every newline (\n) in a string with an HTML line break (<br />).
## Examples
iex> Liquex.Filter.newline_to_br("\\nHello\\nthere\\n", %{})
"<br />\\nHello<br />\\nthere<br />\\n"
"""
@spec newline_to_br(String.t(), Context.t()) :: String.t()
def newline_to_br(value, _), do: String.replace(to_string(value), "\n", "<br />\n")
@doc """
Adds a number to another number.
## Examples
iex> Liquex.Filter.plus(4, 2, %{})
6
iex> Liquex.Filter.plus(183.357, 12, %{})
195.357
"""
def plus(left, right, _), do: left + right
@doc """
Adds the specified string to the beginning of another string.
## Examples
iex> Liquex.Filter.prepend("apples, oranges, and bananas", "Some fruit: ", %{})
"Some fruit: apples, oranges, and bananas"
iex> Liquex.Filter.prepend("/index.html", "example.com", %{})
"example.com/index.html"
"""
def prepend(value, prepender, _), do: to_string(prepender) <> to_string(value)
@doc """
Removes every occurrence of the specified substring from a string.
## Examples
iex> Liquex.Filter.remove("I strained to see the train through the rain", "rain", %{})
"I sted to see the t through the "
"""
def remove(value, original, context), do: replace(value, original, "", context)
@doc """
Removes every occurrence of the specified substring from a string.
## Examples
iex> Liquex.Filter.remove_first("I strained to see the train through the rain", "rain", %{})
"I sted to see the train through the rain"
"""
def remove_first(value, original, context), do: replace_first(value, original, "", context)
@doc """
Replaces every occurrence of the first argument in a string with the second argument.
## Examples
iex> Liquex.Filter.replace("Take my protein pills and put my helmet on", "my", "your", %{})
"Take your protein pills and put your helmet on"
"""
def replace(value, original, replacement, _),
do: String.replace(to_string(value), to_string(original), to_string(replacement))
@doc """
Replaces only the first occurrence of the first argument in a string with the second argument.
## Examples
iex> Liquex.Filter.replace_first("Take my protein pills and put my helmet on", "my", "your", %{})
"Take your protein pills and put my helmet on"
"""
def replace_first(value, original, replacement, _),
do:
String.replace(to_string(value), to_string(original), to_string(replacement), global: false)
@doc """
Reverses the order of the items in an array. reverse cannot reverse a string.
## Examples
iex> Liquex.Filter.reverse(~w(apples oranges peaches plums), %{})
["plums", "peaches", "oranges", "apples"]
"""
def reverse(arr, _) when is_list(arr), do: Enum.reverse(arr)
@doc """
Rounds a number to the nearest integer or, if a number is passed as an argument, to that number of decimal places.
## Examples
iex> Liquex.Filter.round(1, %{})
1
iex> Liquex.Filter.round(1.2, %{})
1
iex> Liquex.Filter.round(2.7, %{})
3
iex> Liquex.Filter.round(183.357, 2, %{})
183.36
"""
def round(value, precision \\ 0, context)
def round(value, _, _) when is_integer(value), do: value
def round(value, 0, _), do: value |> Float.round() |> trunc()
def round(value, precision, _), do: Float.round(value, precision)
@doc """
Removes all whitespace (tabs, spaces, and newlines) from the right side of a string.
It does not affect spaces between words.
## Examples
iex> Liquex.Filter.rstrip(" So much room for activities! ", %{})
" So much room for activities!"
"""
def rstrip(value, _), do: to_string(value) |> String.trim_trailing()
@doc """
Returns the number of characters in a string or the number of items in an array.
## Examples
iex> Liquex.Filter.size("Ground control to Major Tom.", %{})
28
iex> Liquex.Filter.size(~w(apples oranges peaches plums), %{})
4
"""
def size(value, _) when is_list(value), do: length(value)
def size(value, _), do: String.length(to_string(value))
@doc """
Returns a substring of 1 character beginning at the index specified by the
first argument. An optional second argument specifies the length of the
substring to be returned.
## Examples
iex> Liquex.Filter.slice("Liquid", 0, %{})
"L"
iex> Liquex.Filter.slice("Liquid", 2, %{})
"q"
iex> Liquex.Filter.slice("Liquid", 2, 5, %{})
"quid"
If the first argument is a negative number, the indices are counted from
the end of the string:
## Examples
iex> Liquex.Filter.slice("Liquid", -3, 2, %{})
"ui"
"""
def slice(value, start, length \\ 1, _),
do: String.slice(to_string(value), start, length)
@doc """
Sorts items in an array in case-sensitive order.
## Examples
iex> Liquex.Filter.sort(["zebra", "octopus", "giraffe", "Sally Snake"], %{})
["Sally Snake", "giraffe", "octopus", "zebra"]
"""
def sort(list, _), do: Liquex.Collection.sort(list)
def sort(list, field_name, _), do: Liquex.Collection.sort(list, field_name)
@doc """
Sorts items in an array in case-insensitive order.
## Examples
iex> Liquex.Filter.sort_natural(["zebra", "octopus", "giraffe", "Sally Snake"], %{})
["giraffe", "octopus", "Sally Snake", "zebra"]
"""
def sort_natural(list, _), do: Liquex.Collection.sort_case_insensitive(list)
def sort_natural(list, field_name, _),
do: Liquex.Collection.sort_case_insensitive(list, field_name)
@doc """
Divides a string into an array using the argument as a separator. split is
commonly used to convert comma-separated items from a string to an array.
## Examples
iex> Liquex.Filter.split("John, Paul, George, Ringo", ", ", %{})
["John", "Paul", "George", "Ringo"]
"""
def split(value, separator, _), do: String.split(to_string(value), to_string(separator))
@doc """
Removes all whitespace (tabs, spaces, and newlines) from both the left and
right side of a string. It does not affect spaces between words.
## Examples
iex> Liquex.Filter.strip(" So much room for activities! ", %{})
"So much room for activities!"
"""
def strip(value, _), do: String.trim(to_string(value))
@doc """
Removes any HTML tags from a string.
## Examples
iex> Liquex.Filter.strip_html("Have <em>you</em> read <strong>Ulysses</strong>?", %{})
"Have you read Ulysses?"
"""
def strip_html(value, _), do: HtmlSanitizeEx.strip_tags(to_string(value))
@doc """
Removes any newline characters (line breaks) from a string.
## Examples
iex> Liquex.Filter.strip_newlines("Hello\\nthere", %{})
"Hellothere"
"""
def strip_newlines(value, _) do
to_string(value)
|> String.replace("\r", "")
|> String.replace("\n", "")
end
@doc """
Multiplies a number by another number.
## Examples
iex> Liquex.Filter.times(3, 4, %{})
12
iex> Liquex.Filter.times(24, 7, %{})
168
iex> Liquex.Filter.times(183.357, 12, %{})
2200.284
"""
def times(value, divisor, _), do: value * divisor
@doc """
Shortens a string down to the number of characters passed as an argument. If
the specified number of characters is less than the length of the string, an
ellipsis (…) is appended to the string and is included in the character
count.
## Examples
iex> Liquex.Filter.truncate("Ground control to Major Tom.", 20, %{})
"Ground control to..."
iex> Liquex.Filter.truncate("Ground control to Major Tom.", 25, ", and so on", %{})
"Ground control, and so on"
iex> Liquex.Filter.truncate("Ground control to Major Tom.", 20, "", %{})
"Ground control to Ma"
"""
def truncate(value, length, ellipsis \\ "...", _) do
value = to_string(value)
if String.length(value) <= length do
value
else
String.slice(
value,
0,
length - String.length(ellipsis)
) <> ellipsis
end
end
@doc """
Shortens a string down to the number of characters passed as an argument. If
the specified number of characters is less than the length of the string, an
ellipsis (…) is appended to the string and is included in the character
count.
## Examples
iex> Liquex.Filter.truncatewords("Ground control to Major Tom.", 3, %{})
"Ground control to..."
iex> Liquex.Filter.truncatewords("Ground control to Major Tom.", 3, "--", %{})
"Ground control to--"
iex> Liquex.Filter.truncatewords("Ground control to Major Tom.", 3, "", %{})
"Ground control to"
"""
def truncatewords(value, length, ellipsis \\ "...", _) do
value = to_string(value)
words = value |> String.split()
if length(words) <= length do
value
else
sentence =
words
|> Enum.take(length)
|> Enum.join(" ")
sentence <> ellipsis
end
end
@doc """
Removes any duplicate elements in an array.
## Examples
iex> Liquex.Filter.uniq(~w(ants bugs bees bugs ants), %{})
["ants", "bugs", "bees"]
"""
def uniq(list, _), do: Enum.uniq(list)
@doc """
Makes each character in a string uppercase. It has no effect on strings
which are already all uppercase.
## Examples
iex> Liquex.Filter.upcase("<NAME>", %{})
"<NAME>"
iex> Liquex.Filter.upcase("APPLE", %{})
"APPLE"
"""
def upcase(value, _), do: String.upcase(to_string(value))
@doc """
Decodes a string that has been encoded as a URL or by url_encode/2.
## Examples
iex> Liquex.Filter.url_decode("%27Stop%21%27+said+Fred", %{})
"'Stop!' said Fred"
"""
def url_decode(value, _), do: URI.decode_www_form(to_string(value))
@doc """
Decodes a string that has been encoded as a URL or by url_encode/2.
## Examples
iex> Liquex.Filter.url_encode("<EMAIL>", %{})
"john%40liquid.com"
iex> Liquex.Filter.url_encode("Tetsuro Takara", %{})
"Tetsuro+Takara"
"""
def url_encode(value, _), do: URI.encode_www_form(to_string(value))
@doc """
Creates an array including only the objects with a given property value, or
any truthy value by default.
## Examples
iex> Liquex.Filter.where([%{"b" => 2}, %{"b" => 1}], "b", 1, %{})
[%{"b" => 1}]
"""
def where(list, key, value, _), do: Liquex.Collection.where(list, key, value)
@doc """
Creates an array including only the objects with a given truthy property value
## Examples
iex> Liquex.Filter.where([%{"b" => true, "value" => 1}, %{"b" => 1, "value" => 2}, %{"b" => false, "value" => 3}], "b", %{})
[%{"b" => true, "value" => 1}, %{"b" => 1, "value" => 2}]
"""
def where(list, key, _), do: Liquex.Collection.where(list, key)
@doc """
Converts `value` timestamp into another date `format`.
The format for this syntax is the same as strftime. The input uses the same format as Ruby’s Time.parse.
## Examples
iex> Brando.Lexer.Filter.date(~D[2000-01-01], "%m/%d/%Y", %{})
"01/01/2000"
iex> Brando.Lexer.Filter.date(~N[2020-07-06 15:00:00.000000], "%m/%d/%Y", %{})
"07/06/2020"
iex> Brando.Lexer.Filter.date(~U[2020-07-06 15:00:00.000000Z], "%m/%d/%Y", %{})
"07/06/2020"
"""
def date(%Date{} = value, format, _), do: Utils.Datetime.format_datetime(value, format, nil)
def date(%DateTime{} = value, format, _) do
value
|> DateTime.shift_zone!(Brando.timezone())
|> Utils.Datetime.format_datetime(format, nil)
end
def date(%NaiveDateTime{} = value, format, _),
do: Utils.Datetime.format_datetime(value, format, nil)
def date("now", format, context), do: date(DateTime.utc_now(), format, context)
def date("today", format, context), do: date(Date.utc_today(), format, context)
def date(value, format, _) when is_binary(value) do
value
|> DateTime.from_iso8601()
|> Utils.Datetime.format_datetime(format, nil)
end
# {{ entry.inserted_at | date:"%-d. %B %Y","no" }}
# = 4. januar 2022
def date(%DateTime{} = value, format, locale, _) do
Utils.Datetime.format_datetime(value, format, locale)
end
def date(value, format, locale, _) do
Utils.Datetime.format_datetime(value, format, locale)
end
def inspect(value, _), do: "#{Kernel.inspect(value, pretty: true)}"
def rows(%{data: %{data: %{rows: rows}}}, _), do: rows
def humanize(value, _), do: Brando.Utils.humanize(value)
@doc """
Get key from image.
It is prefered to use |size:"thumb" instead of this, but keeping these for backwards
compatibility
TODO: Remove before 1.0
"""
@deprecated "Use `|size:\"large\"` instead"
def large(%Brando.Images.Image{} = img, _) do
assigns = %{
src: img,
opts: [
key: :large,
prefix: Brando.Utils.media_url()
]
}
~H"""
<Brando.HTML.picture src={@src} opts={@opts} />
"""
|> Phoenix.LiveViewTest.rendered_to_string()
end
@deprecated "Use `|size:\"large\"` instead"
def large(img, _) do
assigns = %{
src: img,
opts: [
key: :large
]
}
~H"""
<Brando.HTML.picture src={@src} opts={@opts} />
"""
|> Phoenix.LiveViewTest.rendered_to_string()
end
@deprecated "Use `|size:\"xlarge\"` instead"
def xlarge(%Brando.Images.Image{} = img, _) do
assigns = %{
src: img,
opts: [
key: :xlarge,
prefix: Brando.Utils.media_url()
]
}
~H"""
<Brando.HTML.picture src={@src} opts={@opts} />
"""
|> Phoenix.LiveViewTest.rendered_to_string()
end
@deprecated "Use `|size:\"xlarge\"` instead"
def xlarge(img, _) do
assigns = %{
src: img,
opts: [
key: :xlarge
]
}
~H"""
<Brando.HTML.picture src={@src} opts={@opts} />
"""
|> Phoenix.LiveViewTest.rendered_to_string()
end
@doc """
Get sized version of image
"""
def size(%Brando.Images.Image{} = img, size, _) do
assigns = %{
src: img,
opts: [
key: size,
prefix: Brando.Utils.media_url()
]
}
~H"""
<Brando.HTML.picture src={@src} opts={@opts} />
"""
|> Phoenix.LiveViewTest.rendered_to_string()
end
def size(img, size, _) do
assigns = %{
src: img,
opts: [
key: size
]
}
~H"""
<Brando.HTML.picture src={@src} opts={@opts} />
"""
|> Phoenix.LiveViewTest.rendered_to_string()
end
@doc """
Get srcset picture of image
{{ entry.cover|srcset:"Attivo.Team.Employee:cover" }}
{{ entry.cover|srcset:"Attivo.Team.Employee:cover.listing_crop" }}
"""
def srcset(%struct_type{} = img, srcset, _)
when struct_type in [Brando.Images.Image] do
assigns = %{
src: img,
opts: [
placeholder: :svg,
lazyload: true,
srcset: srcset,
prefix: Brando.Utils.media_url(),
cache: img.updated_at
]
}
~H"""
<Brando.HTML.picture src={@src} opts={@opts} />
"""
|> Phoenix.LiveViewTest.rendered_to_string()
end
def srcset(img, srcset, _) do
assigns = %{
src: img,
opts: [
placeholder: :svg,
lazyload: true,
srcset: srcset
]
}
~H"""
<Brando.HTML.picture src={@src} opts={@opts} />
"""
|> Phoenix.LiveViewTest.rendered_to_string()
end
def filesize(size, _) do
Brando.Utils.human_size(size)
end
@doc """
Get entry publication date by publish_at OR inserted_at
"""
def publish_date(%{publish_at: publish_at}, format, locale, _)
when not is_nil(publish_at) do
Utils.Datetime.format_datetime(publish_at, format, locale)
end
def publish_date(%{inserted_at: inserted_at}, format, locale, _) do
Utils.Datetime.format_datetime(inserted_at, format, locale)
end
@doc """
Attempt to get `entry`'s absolute URL through blueprint
"""
def absolute_url(%{__struct__: schema} = entry, _) do
schema.__absolute_url__(entry)
end
@doc """
Prefix media url to file/image
"""
def media_url(%Brando.Files.File{} = file, _) do
Utils.file_url(file)
end
def media_url(%Brando.Images.Image{} = img, _) do
Brando.Utils.img_url(img, :original, prefix: Brando.Utils.media_url())
end
def schema(%{__struct__: schema}, _) do
to_string(schema)
end
def schema(_, _) do
nil
end
def renderless(_, _) do
""
end
@doc """
Get src of image
"""
def src(%Brando.Images.Image{} = img, size, _) do
Brando.Utils.img_url(img, size, prefix: Brando.Utils.media_url())
end
def src(img, size, _) do
Brando.Utils.img_url(img, size)
end
def orientation(value, _), do: Brando.Images.get_image_orientation(value)
@doc """
Converts from markdown
## Examples
iex> Brando.Lexer.Filter.markdown("this is a **string**", %{}) |> String.trim("\\n")
"<p>this is a <strong>string</strong></p>"
"""
def markdown(%{value: str}, opts), do: markdown(str, opts)
def markdown(str, _) when is_binary(str) do
str
|> Brando.HTML.render_markdown()
|> Phoenix.HTML.safe_to_string()
end
@doc """
Checks if `url` matches the current entry's `url`
"""
def active(url, ctx) do
if absolute_url = Map.get(ctx.variables, "url") do
url = (String.starts_with?(url, "/") && url) || "/#{url}"
(url == absolute_url && "active") || ""
else
""
end
end
def slugify(nil, _) do
""
end
def slugify(str, _) when is_binary(str) do
Brando.Utils.slugify(str)
end
end
|
lib/brando/villain/filters.ex
| 0.891439 | 0.408808 |
filters.ex
|
starcoder
|
defmodule Brando.Content.Module do
@moduledoc """
Ecto schema for the Villain Content Module schema
A module can hold a setup for multiple blocks.
## Multi module
A module can be setup as a multi module, meaning it can contain X other entries.
If the entry template is not floating your boat, you can access the child entries directly
from you main module's code:
```
{% for link in entries %}
<h2>{{ link.data.vars.header_text }}</h2>
{% endfor %}
{{ content | renderless }}
```
We include `{{ content | renderless }}` at the bottom to show the proper UI for the
child entries in the admin area, but since it runs through the `renderless` filter,
it will be excluded from rendering in the frontend.
"""
@type t :: %__MODULE__{}
use Brando.Blueprint,
application: "Brando",
domain: "Content",
schema: "Module",
singular: "module",
plural: "modules",
gettext_module: Brando.Gettext
import Brando.Gettext
alias Brando.Content.Var
identifier "{{ entry.name }}"
@derived_fields ~w(id name sequence namespace help_text wrapper class code refs vars svg deleted_at)a
@derive {Jason.Encoder, only: @derived_fields}
trait Brando.Trait.Sequenced
trait Brando.Trait.SoftDelete
trait Brando.Trait.Timestamped
trait Brando.Trait.CastPolymorphicEmbeds
attributes do
attribute :name, :string, required: true
attribute :namespace, :string, required: true
attribute :help_text, :text, required: true
attribute :class, :string, required: true
attribute :code, :text, required: true
attribute :svg, :text
attribute :wrapper, :boolean
attribute :vars, {:array, Brando.PolymorphicEmbed},
types: Var.types(),
type_field: :type,
on_type_not_found: :raise,
on_replace: :delete
end
relations do
relation :entry_template, :embeds_one, module: __MODULE__.EmbeddedModule, on_replace: :delete
relation :refs, :embeds_many, module: __MODULE__.Ref, on_replace: :delete
end
listings do
listing do
listing_query %{
order: [{:asc, :namespace}, {:asc, :sequence}, {:desc, :inserted_at}]
}
filters([
[label: t("Name"), filter: "name"],
[label: t("Namespace"), filter: "namespace"],
[label: t("Class"), filter: "class"]
])
actions([
[label: t("Edit module"), event: "edit_entry"],
[
label: t("Delete module"),
event: "delete_entry",
confirm: t("Are you sure?")
],
[label: t("Duplicate module"), event: "duplicate_entry"]
])
template(
"""
<div class="svg">{{ entry.svg }}</div><br>
""",
columns: 2
)
template(
"""
<div class="badge">{{ entry.namespace }}</div><br>
""",
columns: 3
)
template(
"""
<a
data-phx-link="redirect"
data-phx-link-state="push"
href="/admin/config/content/modules/update/{{ entry.id }}"
class="entry-link">
{{ entry.name }}
</a>
<br>
<small>{{ entry.help_text }}</small>
""",
columns: 9
)
end
end
translations do
context :naming do
translate :singular, t("module")
translate :plural, t("modules")
end
end
factory %{
class: "header middle",
code: """
<article data-v="text center" data-moonwalk-section>
<div class="inner" data-moonwalk>
<div class="text">
{% ref refs.H2 %}
</div>
</div>
</article>
""",
deleted_at: nil,
help_text: "Help Text",
name: "Heading",
namespace: "general",
refs: [
%{
"data" => %{
"data" => %{
"class" => nil,
"id" => nil,
"level" => 2,
"text" => "Heading here"
},
"type" => "header"
},
"description" => "A heading",
"name" => "H2"
}
],
vars: [],
wrapper: false,
uid: "abcdef"
}
end
|
lib/brando/content/module.ex
| 0.796134 | 0.772402 |
module.ex
|
starcoder
|
defmodule Rps.Games.Leaderboard do
@moduledoc """
The Leaderboard can be implemented in different ways, it depends on what
problem we are trying to optimize. The common approach might bea single
`ordered_set` table (sorted table) where the key is the score and the value
is a lis of users with that score. The problem with this appoach is:
1) Given the case, the value might be huge, a long list of users
2) Every time we have to update a score for an user, we have to get
the list of users (the value) for that score (the key), update
that list (write operation) and then find the previous score for
that user, get the value and remove the user from that list
(another read and write operation).
3) Additionally, because we are storing several users in the same key,
at some point we can have high concurrency in a particular key and
maybe inconsistency issues. To prevent this, we vave to disable
read and write concurrency on that table, and additionally perform
all previous mentioned operation in a transactional context
(to avoid inconsistency issues); transactions might be implemented
using `:global` module.
Therefore, the approach implemented here tries to provide a good trade off,
in this case we are using two tables, one `ordered_set` to store only the
scores in a sorted way; the key and value is the same store. The other table
is a `duplicated_bag` where the key is the score and the value is the user
or player, since the key is the score, it can be duplicated multiple times,
that's why we are using a `duplicated_bag`. With this approach:
1) Enable read and write concurrence, since every key is related to only
one user, hence, there is not going to be multiple different users
accessing the same key (like in the previous mentioned approach).
Even in the `ordered_set` we can also allow read and write concurrency,
in this case it doesn't matter multiple users updating the same key,
since the kay and value are the same for all of them.
2) For the `duplicated_bag` table, either reads and writes are performed
in constant complexity. In the previous approach since it is an
`ordered_set`, all operations are performed in logaritmic complexity
(it is implemented as AVL Tree).
3) Perhaps the downside is we are using an extra ETS table, but again,
it is matter of find the best trade off according to our needs.
**Considerations and/or Assumptions**
For the `update_player_score` function, there is one thing to consider
and it is how to track the previous score (it doesn't matter the approach
we take). We can assume the previous score is the given one minus one,
since a game is managed by one FSM and the score is incremented by one.
But, in order to be more flexible, we can have another table to track
that value, it might be a `set` whwre the key is the username of the
player and the value the current score, so every thime we update the
score, we can update that counter and then retrieve the previous one,
but we have to initialize the counter with the current player scores.
So, for purposes of the exercice, the function is implemented in the
simplest way (assuming the previous score as the given one minus one).
This implementation doesn't works in distributed fashion, for purposes of
the exercice it works only locally (single-node), this is a nice-to-have
but it might be more challenging to implement using only ETS tables,
specially `ordered_set` tables. So currently, the way to get that info
(about the leaderboard /ranking) in distributed way is rely on the DB,
since we have in the DB the score for every user, we can perform a query
to get that info.
"""
use GenServer
@ets_opts [
:public,
:named_table,
{:read_concurrency, true},
{:write_concurrency, true}
]
## API
@spec start_link() :: GenServer.on_start
def start_link() do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
@spec update_player_score(String.t, integer) :: :ok | no_return()
def update_player_score(username, score) do
update_player_score(username, score, score - 1)
end
@spec update_player_score(String.t, integer, integer) :: :ok | no_return()
def update_player_score(username, score, prev_score) do
true = :ets.delete_object(:player_scores, {prev_score, username})
true = :ets.insert(:player_scores, {score, username})
true = :ets.insert(:scores, {score, score})
:ok
end
@spec ranking() :: [{score :: integer, [username :: String.t]}]
def ranking() do
:ets.foldl(fn({score, _}, acc) ->
players = for {_, p} <- :ets.lookup(:player_scores, score), do: p
[{score, players} | acc]
end, [], :scores)
end
@spec flush() :: :ok
def flush do
true = :ets.delete_all_objects(:scores)
true = :ets.delete_all_objects(:player_scores)
:ok
end
## GenServer Callbacks
@impl true
def init(_arg) do
:ok = create_tables()
{:ok, %{}}
end
## Private Functions
defp create_tables() do
:scores = :ets.new(:scores, [:ordered_set | @ets_opts])
:player_scores = :ets.new(:player_scores, [:duplicate_bag | @ets_opts])
:ok
end
end
|
lib/rps/games/leaderboard.ex
| 0.861655 | 0.852568 |
leaderboard.ex
|
starcoder
|
defmodule HLDSLogs.LogProducer do
@moduledoc """
A `GenStage` producer that connects to a HLDS server and sets up log forwarding to itself. Should only be calling this
module directly if you want to manage the supervision of these processes yourself, otherwise the functions in
`HLDSLogs` will create processes under a dynamic supervisor.
When a process is started, it is provided with host:port informtion for the HLDS server, and self-referenial host:port
information to provide to the HLDS server. The self-referencial host information must be reachable from the HLDS server.
## Log Forwarding
HLDS provides a mechanisim for forwarding logged messages over UDP to a designated host:port. This is set up with the
`logaddress` console commands. This module will use `HLDSRcon` to set up an rcon connection to the HLDS server, then it
will issue the `logaddress_add` command with the self-referencial host information it was provided - this will cause
HLDS to forward all log entries to this process.
## Producer
This producer will create a single event, represented by the `HLDSLogs.LogEntry` struct, for each log entry it recieves.
It does not respond to consumer demand, and instead creates events as soon as possible, based on the log activity from
the HLDS server.
## Example Consumer
This example module is set up to simply forward log bodies from HLDS to `Logger.info/1`.
```
defmodule LoggerConsumer do
use GenStage
require Logger
def start_link() do
GenStage.start_link(__MODULE__, :ok)
end
def init(:ok) do
{:consumer, nil}
end
def handle_events(events, _from, nil) do
events
|> Enum.map(fn log_entry -> log_entry.body end)
|> Enum.map(&Logger.info/1)
{:noreply, [], nil}
end
end
```
"""
use GenStage
@global_name_prefix "HLDSLogs.LogProducer:"
@logaddress_add_command "logaddress_add"
alias HLDSRcon.ServerInfo
alias HLDSLogs.ListenInfo
@doc """
Creates a producer that will connect to the server.
Server and listener information is given in a tuple, where the first element is a `HLDSRcon.ServerInfo` struct defining
the HLDS host:port information, and the second element in the tuple is a `HLDSLogs.ListenInfo` struct defining the host:port
information for this process.
The `HLDSLogs.ListenInfo` struct is used to create a UDP socket by this process, and for informing the HLDS server where
to forward logs. If a port is not specified, a port will be select by the OS.
"""
@spec start_link({%HLDSRcon.ServerInfo{}, %HLDSLogs.ListenInfo{}}) :: GenServer.on_start()
def start_link({%ServerInfo{} = server_info, %ListenInfo{} = listen_info}) do
GenStage.start_link(
__MODULE__,
{server_info, listen_info},
name: {
:global, get_global_name(server_info, listen_info)
}
)
end
@doc false
def init({
%ServerInfo{} = server_info,
%ListenInfo{
port: listen_port
} = listen_info
}) do
{:ok, socket} = :gen_udp.open(listen_port, [:binary, active: true])
# Could be OS assigned if 0, not necessarily the same as listen_port
{:ok, assigned_port} = :inet.port(socket)
:ok = establish_logaddress(server_info, listen_info, assigned_port)
{
:producer,
%{
server_info: server_info,
listen_info: listen_info,
assigned_port: assigned_port,
socket: socket
}
}
end
@doc """
Get the UDP port used by the processes socket to recieve log messages. Useful to determine port when OS assigned.
"""
def handle_call(:get_port, _from, state) do
{:reply, state.assigned_port, [], state}
end
@doc false
def handle_info({:udp, _socket, _address, _port, data}, state) do
{
:noreply,
data
|> String.chunk(:printable)
|> Enum.filter(&String.valid?/1)
|> Enum.filter(fn s -> s != "" end)
|> Enum.map(&HLDSLogs.LogEntry.from/1)
|> Enum.filter(fn s -> s != nil end),
state
}
end
@doc false
def handle_demand(_demand, state) do
{:noreply, [], state}
end
@doc false
defp establish_logaddress(%ServerInfo{} = server_info, %ListenInfo{
host: listen_host
}, assigned_port) do
{:ok, _pid} = HLDSRcon.connect(server_info)
{:ok, _resp} = HLDSRcon.command(
server_info.host,
server_info.port,
@logaddress_add_command <> " " <> listen_host <> " " <> Integer.to_string(assigned_port)
)
:ok
end
defp get_global_name(%ServerInfo{} = server_info, %ListenInfo{} = listen_info) do
@global_name_prefix <> "from:" <> server_info.host <> ":" <> Integer.to_string(server_info.port) <> ":to:" <> listen_info.host <> ":" <> Integer.to_string(listen_info.port)
end
end
|
lib/hlds_logs/log_producer.ex
| 0.827759 | 0.738009 |
log_producer.ex
|
starcoder
|
defmodule HtmlWriter do
@moduledoc """
Provide helper macros to write html programatically into a chardata
all macros in this module take an chardata as first argument and put more data in it
as the return value
"""
@doc ~S"""
helper macro to bind a value to a name, in order to maintain flow of the pipe operator.
For example:
```elixir
v
|> do_someting()
|> do_something_else()
|> bind_to(v)
```
This feel better than doing the assignment
"""
defmacro bind_to(value, name) do
quote do
unquote(name) = unquote(value)
end
end
@doc ~S"""
This is Enum.reduce with first 2 arguments switched order.
For example:
```elixir
h
|> roll_in(list, fn item, h ->
h
|> do_something_with(item)
end)
```
Using this one can maintian flow of the pipe chain.
"""
defmacro roll_in(s, enum, function) do
quote do
Enum.reduce(unquote(enum), unquote(s), unquote(function))
end
end
@doc ~S"""
Invoke the func with s. This is used to keep the pipe flowing
"""
defmacro invoke(s, func) do
quote do
unquote(func).(unquote(s))
end
end
@doc ~S"""
start with minimum boilerplate
"""
defmacro new_html() do
quote do
["<!DOCTYPE html>\n"]
end
end
@doc ~S"""
Just add some text. Please note no text is escaped
"""
defmacro text(s, text) do
quote do
[unquote(text) | unquote(s)]
end
end
@doc ~S"""
export the data.
Since the data is a list, so all it does for now is Enum.reverse()
"""
defmacro export(s) do
quote do
Enum.reverse(unquote(s))
end
end
@doc ~S"""
build a html fragment with a builder function
"""
def fragment(f), do: [] |> f.() |> export()
@doc ~S"""
build a html fragment with a predefined header string or io_list
"""
def fragment(header, f) when is_binary(header), do: [header] |> f.() |> export()
def fragment(header, f) when is_list(header) do
# we need to reverse the header first because the builder will prepend
# and export will reverse
header
|> Enum.reverse()
|> f.()
|> export()
end
@doc ~S"""
escape the string to be HTML safe
"""
def escape(str) when is_binary(str) do
IO.iodata_to_binary(to_iodata(str, 0, str, []))
end
# following code lifted from Plug.HTML
escapes = [
{?<, "<"},
{?>, ">"},
{?&, "&"},
{?", """},
{?', "'"}
]
for {match, insert} <- escapes do
defp to_iodata(<<unquote(match), rest::bits>>, skip, original, acc) do
to_iodata(rest, skip + 1, original, [acc | unquote(insert)])
end
end
defp to_iodata(<<_char, rest::bits>>, skip, original, acc) do
to_iodata(rest, skip, original, acc, 1)
end
defp to_iodata(<<>>, _skip, _original, acc) do
acc
end
for {match, insert} <- escapes do
defp to_iodata(<<unquote(match), rest::bits>>, skip, original, acc, len) do
part = binary_part(original, skip, len)
to_iodata(rest, skip + len + 1, original, [acc, part | unquote(insert)])
end
end
defp to_iodata(<<_char, rest::bits>>, skip, original, acc, len) do
to_iodata(rest, skip, original, acc, len + 1)
end
defp to_iodata(<<>>, 0, original, _acc, _len) do
original
end
defp to_iodata(<<>>, skip, original, acc, len) do
[acc | binary_part(original, skip, len)]
end
@doc ~S"""
build a void-element, which is an element that should not have inner text. It may have attributes
though. Don't call this unless you are making a custom element; use the element specific funcions
instead.
tag is the tag name.
attr are a keyword list of attrbutes, each value can be a string, an list of strings, or nil
"""
def tag(s, tag, attrs \\ []) do
["<#{tag}#{attr_string(attrs)}>\n" | s]
end
@doc ~S"""
build a non-void element, which is an element that may have inner text. It may also have
attributes. Don't call this unless you are making a custom element; use the element specific
funcions instead.
tag is the tag name.
inner can be nil, a string or a function with arity of 1 that build inner text
attr are a keyword list of attrbutes, each value can be a string, an list of strings, or nil
"""
def element(s, tag, content, attrs \\ [])
def element(s, tag, nil, attrs) do
["<#{tag}#{attr_string(attrs)}></#{tag}>\n" | s]
end
def element(s, tag, text, attrs) when is_binary(text) do
start_tag = "<#{tag}#{attr_string(attrs)}>"
end_tag = "</#{tag}>\n"
[end_tag | text([start_tag | s], text)]
end
def element(s, tag, func, attrs) when is_function(func, 1) do
start_tag = "<#{tag}#{attr_string(attrs)}>\n"
end_tag = "</#{tag}>\n"
inner = [] |> func.() |> Enum.reverse()
[end_tag, inner, start_tag] ++ s
end
defp attr_string(attrs) do
attrs |> Enum.map(&one_attr_string/1) |> Enum.join()
end
defp one_attr_string({key, value}) do
case value do
nil -> " #{key}"
list when is_list(list) -> " #{key}=\"#{Enum.join(list, " ")}\""
v -> " #{key}=\"#{v}\""
end
end
defp one_attr_string({key}), do: " #{key}"
[:meta, :link, :hr, :br, :img]
|> Enum.each(fn k ->
@doc ~s"""
build void element #{to_string(k)}
"""
defmacro unquote(k)(s, attrs \\ []) do
str = to_string(unquote(k))
quote do
tag(unquote(s), unquote(str), unquote(attrs))
end
end
end)
[
:html,
:head,
:body,
:section,
:article,
:title,
:style,
:script,
:h1,
:h2,
:h3,
:h4,
:h5,
:h6,
:p,
:a,
:nav,
:div,
:span,
:em,
:b,
:i,
:u,
:blockquote,
:del,
:code,
:strong,
:ul,
:ol,
:li,
:table,
:tbody,
:thead,
:tr,
:th,
:td,
:form,
:input,
:select,
:option,
:label,
:textarea,
:pre,
:button
]
|> Enum.each(fn k ->
@doc ~s"""
build non-void element #{to_string(k)}
"""
defmacro unquote(k)(s, inner, attrs \\ []) do
str = to_string(unquote(k))
quote do
element(unquote(s), unquote(str), unquote(inner), unquote(attrs))
end
end
end)
end
|
lib/html_writer.ex
| 0.700075 | 0.815416 |
html_writer.ex
|
starcoder
|
defmodule Scenic.Primitive.Line do
@moduledoc """
Draw a line on the screen.
## Data
`{point_a, point_b}`
The data for a line is a tuple containing two points.
* `point_a` - position to start drawing from
* `point_b` - position to draw to
## Styles
This primitive recognizes the following styles
* [`hidden`](Scenic.Primitive.Style.Hidden.html) - show or hide the primitive
* [`cap`](Scenic.Primitive.Style.Cap.html) - says how to draw the ends of the line.
* [`stroke`](Scenic.Primitive.Style.Stroke.html) - stroke the outline of the primitive. In this case, only the curvy part.
## Usage
You should add/modify primitives via the helper functions in
[`Scenic.Primitives`](Scenic.Primitives.html#line/3)
```elixir
graph
|> line( {{0, 0}, {20, 40}}, stroke: {1, :yellow} )
```
"""
use Scenic.Primitive
alias Scenic.Script
alias Scenic.Primitive
alias Scenic.Primitive.Style
# import IEx
@type t :: {{x0 :: number, y0 :: number}, {x1 :: number, y1 :: number}}
@type styles_t :: [:hidden | :scissor | :stroke_width | :stroke_fill | :cap]
@styles [:hidden, :scissor, :stroke_width, :stroke_fill, :cap]
@impl Primitive
@spec validate(t()) ::
{:ok, {{x0 :: number, y0 :: number}, {x1 :: number, y1 :: number}}}
| {:error, String.t()}
def validate({{x0, y0}, {x1, y1}} = data)
when is_number(x0) and is_number(y0) and is_number(x1) and is_number(y1) do
{:ok, data}
end
def validate(data) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Line specification
Received: #{inspect(data)}
#{IO.ANSI.yellow()}
The data for a Rectangle is {{x1,y1}, {x2,y2}}#{IO.ANSI.default_color()}
"""
}
end
# --------------------------------------------------------
@doc """
Returns a list of styles recognized by this primitive.
"""
@impl Primitive
@spec valid_styles() :: styles_t()
def valid_styles(), do: @styles
# --------------------------------------------------------
@doc """
Compile the data for this primitive into a mini script. This can be combined with others to
generate a larger script and is called when a graph is compiled.
"""
@impl Primitive
@spec compile(primitive :: Primitive.t(), styles :: Style.t()) :: Script.t()
def compile(%Primitive{module: __MODULE__, data: {{x0, y0}, {x1, y1}}}, %{stroke_fill: _}) do
Script.draw_line([], x0, y0, x1, y1, :stroke)
end
def compile(%Primitive{module: __MODULE__}, _styles), do: []
# ============================================================================
# --------------------------------------------------------
def default_pin(data), do: centroid(data)
# --------------------------------------------------------
@doc """
Returns a the midpoint of the line. This is used as the default pin when applying
rotate or scale transforms.
"""
def centroid(data)
def centroid({{x0, y0}, {x1, y1}}) do
{
(x0 + x1) / 2,
(y0 + y1) / 2
}
end
# -----------------------------------------
def bounds(data, mx, styles)
def bounds({p0, p1}, <<_::binary-size(64)>> = mx, _styles) do
[p0, p1]
|> Scenic.Math.Vector2.project(mx)
|> Scenic.Math.Vector2.bounds()
end
# --------------------------------------------------------
@doc false
def default_pin({{x0, y0}, {x1, y1}}, _styles) do
{
(x0 + x1) / 2,
(y0 + y1) / 2
}
end
end
|
lib/scenic/primitive/line.ex
| 0.925525 | 0.897111 |
line.ex
|
starcoder
|
defmodule ABI.TypeEncoder do
@moduledoc """
`ABI.TypeEncoder` is responsible for encoding types to the format
expected by Solidity. We generally take a function selector and an
array of data and encode that array according to the specification.
"""
@doc """
Encodes the given data based on the function selector.
## Examples
iex> [69, true]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: "baz",
...> types: [
...> {:uint, 32},
...> :bool
...> ],
...> returns: :bool
...> }
...> )
...> |> Base.encode16(case: :lower)
"cdcd77c000000000000000000000000000000000000000000000000000000000000000450000000000000000000000000000000000000000000000000000000000000001"
iex> ["hello world"]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: nil,
...> types: [
...> :string,
...> ]
...> }
...> )
...> |> Base.encode16(case: :lower)
"000000000000000000000000000000000000000000000000000000000000000b68656c6c6f20776f726c64000000000000000000000000000000000000000000"
iex> [{"awesome", true}]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: nil,
...> types: [
...> {:tuple, [:string, :bool]}
...> ]
...> }
...> )
...> |> Base.encode16(case: :lower)
"000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000007617765736f6d6500000000000000000000000000000000000000000000000000"
iex> [{17, true, <<32, 64>>}]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: nil,
...> types: [
...> {:tuple, [{:uint, 32}, :bool, {:bytes, 2}]}
...> ]
...> }
...> )
...> |> Base.encode16(case: :lower)
"000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000012040000000000000000000000000000000000000000000000000000000000000"
iex> [[17, 1]]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: "baz",
...> types: [
...> {:array, {:uint, 32}, 2}
...> ]
...> }
...> )
...> |> Base.encode16(case: :lower)
"3d0ec53300000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000001"
iex> [<<1>>]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: "foo",
...> types: [
...> :bytes
...> ]
...> }
...> )
...> |> Base.encode16(case: :lower)
"30c8d1da000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000010100000000000000000000000000000000000000000000000000000000000000"
iex> [[17, 1], true]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: nil,
...> types: [
...> {:array, {:uint, 32}, 2},
...> :bool
...> ]
...> }
...> )
...> |> Base.encode16(case: :lower)
"000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001"
iex> [[17, 1]]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: nil,
...> types: [
...> {:array, {:uint, 32}}
...> ]
...> }
...> )
...> |> Base.encode16(case: :lower)
"000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000001"
iex> [[]]
...> |> ABI.TypeEncoder.encode(
...> %ABI.FunctionSelector{
...> function: nil,
...> types: [
...> {:array, {:uint, 32}}
...> ]
...> }
...> )
...> |> Base.encode16(case: :lower)
"0000000000000000000000000000000000000000000000000000000000000000"
"""
@spec encode(list(), ABI.FunctionSelector.t()) :: binary()
def encode(data, function_selector) do
encode_method_id(function_selector) <> encode_head_and_data(data, function_selector.types)
end
@doc """
Simiar to `ABI.TypeEncoder.encode/2` except we accept
an array of types instead of a function selector. We also
do not pre-pend the method id.
## Examples
iex> [{"awesome", true}]
...> |> ABI.TypeEncoder.encode_raw([{:tuple, [:string, :bool]}])
...> |> Base.encode16(case: :lower)
"000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000007617765736f6d6500000000000000000000000000000000000000000000000000"
"""
def encode_raw(data, types) do
do_encode(types, data, [])
end
defp encode_head_and_data(data, types) do
body_start = Enum.count(types) * 32
{head, body, [], _} =
Enum.reduce(
types,
{<<>>, <<>>, data, body_start},
fn type, {head, body, data, body_position} ->
{encoded, rest} = encode_type(type, data)
if ABI.FunctionSelector.is_dynamic?(type) do
# If we're a dynamic type, just add encoded length to the head and encoded value to the body
{head <> encode_uint(body_position, 256), body <> encoded, rest,
body_position + byte_size(encoded)}
else
# If we're a static type, simply add encoded value to the head
{head <> encoded, body, rest, body_position}
end
end
)
head <> body
end
@spec encode_method_id(%ABI.FunctionSelector{}) :: binary()
defp encode_method_id(%ABI.FunctionSelector{function: nil}), do: ""
defp encode_method_id(function_selector) do
# Encode selector e.g. "baz(uint32,bool)" and take keccak
kec =
function_selector
|> ABI.FunctionSelector.encode()
|> ExthCrypto.Hash.Keccak.kec()
# Take first four bytes
<<init::binary-size(4), _rest::binary>> = kec
# That's our method id
init
end
@spec do_encode([ABI.FunctionSelector.type()], [any()], [binary()]) :: binary()
defp do_encode([], _, acc), do: :erlang.iolist_to_binary(Enum.reverse(acc))
defp do_encode([type | remaining_types], data, acc) do
{encoded, remaining_data} = encode_type(type, data)
do_encode(remaining_types, remaining_data, [encoded | acc])
end
@spec encode_type(ABI.FunctionSelector.type(), [any()]) :: {binary(), [any()]}
defp encode_type({:uint, size}, [data | rest]) do
{encode_uint(data, size), rest}
end
defp encode_type(:address, data), do: encode_type({:uint, 160}, data)
defp encode_type(:bool, [data | rest]) do
value =
case data do
true -> encode_uint(1, 8)
false -> encode_uint(0, 8)
_ -> raise "Invalid data for bool: #{data}"
end
{value, rest}
end
defp encode_type(:string, [data | rest]) do
{encode_uint(byte_size(data), 256) <> encode_bytes(data), rest}
end
defp encode_type(:bytes, [data | rest]) do
{encode_uint(byte_size(data), 256) <> encode_bytes(data), rest}
end
defp encode_type({:bytes, size}, [data | rest])
when is_binary(data) and byte_size(data) <= size do
{encode_bytes(data), rest}
end
defp encode_type({:bytes, size}, [data | _]) when is_binary(data) do
raise "size mismatch for bytes#{size}: #{inspect(data)}"
end
defp encode_type({:bytes, size}, [data | _]) do
raise "wrong datatype for bytes#{size}: #{inspect(data)}"
end
defp encode_type({:tuple, types}, [data | rest]) do
encoded = encode_head_and_data(Tuple.to_list(data), types)
{encoded, rest}
end
defp encode_type({:array, _type, 0}, [data | rest]) do
encode_type({:tuple, []}, [data |> List.to_tuple() | rest])
end
defp encode_type({:array, type, element_count}, [data | rest]) do
repeated_type = Enum.map(1..element_count, fn _ -> type end)
encode_type({:tuple, repeated_type}, [data |> List.to_tuple() | rest])
end
defp encode_type({:array, type}, [data | _rest] = all_data) do
element_count = Enum.count(data)
encoded_uint = encode_uint(element_count, 256)
{encoded_array, rest} = encode_type({:array, type, element_count}, all_data)
{encoded_uint <> encoded_array, rest}
end
defp encode_type(els, _) do
raise "Unsupported encoding type: #{inspect(els)}"
end
def encode_bytes(bytes) do
bytes |> pad(byte_size(bytes), :right)
end
# Note, we'll accept a binary or an integer here, so long as the
# binary is not longer than our allowed data size
defp encode_uint(data, size_in_bits) when rem(size_in_bits, 8) == 0 do
size_in_bytes = (size_in_bits / 8) |> round
bin = maybe_encode_unsigned(data)
if byte_size(bin) > size_in_bytes,
do:
raise(
"Data overflow encoding uint, data `#{data}` cannot fit in #{size_in_bytes * 8} bits"
)
bin |> pad(size_in_bytes, :left)
end
defp pad(bin, size_in_bytes, direction) do
# TODO: Create `left_pad` repo, err, add to `ExthCrypto.Math`
total_size =
size_in_bytes + ExthCrypto.Math.mod(ExthCrypto.Math.mod(32 - size_in_bytes, 32), 32)
padding_size_bits = (total_size - byte_size(bin)) * 8
padding = <<0::size(padding_size_bits)>>
case direction do
:left -> padding <> bin
:right -> bin <> padding
end
end
@spec maybe_encode_unsigned(binary() | integer()) :: binary()
defp maybe_encode_unsigned(bin) when is_binary(bin), do: bin
defp maybe_encode_unsigned(int) when is_integer(int), do: :binary.encode_unsigned(int)
end
|
lib/abi/type_encoder.ex
| 0.871666 | 0.484807 |
type_encoder.ex
|
starcoder
|
defmodule Ecto.Query.Builder.OrderBy do
@moduledoc false
alias Ecto.Query.Builder
@doc """
Escapes an order by query.
The query is escaped to a list of `{direction, expression}`
pairs at runtime. Escaping also validates direction is one of
`:asc` or `:desc`.
## Examples
iex> escape(quote do [x.x, foo()] end, [x: 0])
{[asc: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]},
asc: {:{}, [], [:foo, [], []]}],
%{}}
"""
@spec escape(Macro.t, Keyword.t) :: Macro.t
def escape(expr, vars) do
List.wrap(expr)
|> Enum.map_reduce(%{}, &do_escape(&1, &2, vars))
end
defp do_escape({dir, expr}, external, vars) do
check_dir(dir)
{ast, external} = Builder.escape(expr, external, vars)
{{dir, ast}, external}
end
defp do_escape(expr, external, vars) do
{ast, external} = Builder.escape(expr, external, vars)
{{:asc, ast}, external}
end
defp check_dir(dir) when dir in [:asc, :desc], do: :ok
defp check_dir(dir) do
reason = "non-allowed direction `#{dir}`, only `asc` and `desc` allowed"
raise Ecto.QueryError, reason: reason
end
@doc """
Builds a quoted expression.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t
def build(query, binding, expr, env) do
binding = Builder.escape_binding(binding)
{expr, external} = escape(expr, binding)
external = Builder.escape_external(external)
order_by = quote do: %Ecto.Query.QueryExpr{
expr: unquote(expr),
external: unquote(external),
file: unquote(env.file),
line: unquote(env.line)}
Builder.apply_query(query, __MODULE__, [order_by], env)
end
@doc """
The callback applied by `build/4` to build the query.
"""
@spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t
def apply(query, expr) do
query = Ecto.Queryable.to_query(query)
%{query | order_bys: query.order_bys ++ [expr]}
end
end
|
lib/ecto/query/builder/order_by.ex
| 0.862757 | 0.503174 |
order_by.ex
|
starcoder
|
defmodule Aoc2020Day11 do
import Enum
def str_to_map(str) do
str
|> String.trim()
|> String.split("\n", trim: true)
|> with_index
|> map(fn {cs, y} ->
String.split(cs, "", trim: true) |> with_index |> map(fn {v, x} -> {{x, y}, v} end)
end)
|> concat
|> Map.new()
end
def solve1(input) do
lines =
input
|> String.split("\n", trim: true)
world = str_to_map(input)
ymax = lines |> length
[line | _rest] = lines
xmax = String.length(line)
# draw(world, xmax, ymax)
change_til_stable(world, xmax, ymax, 4, &neighbors/4)
|> count_occupied
end
def draw(world, xmax, ymax) do
0..ymax
|> map(fn y ->
0..xmax
|> map(fn x -> Map.get(world, {x, y}) || "X" end)
|> join("")
|> IO.inspect(label: y)
end)
end
def count_occupied(seats) do
seats |> Enum.filter(fn {{_x, _y}, v} -> v == "#" end) |> length
end
def change_til_stable(seats, xmax, ymax, min_occupied_to_empty, neighbors_func) do
new =
seats
|> map(fn i -> next(seats, i, xmax, ymax, min_occupied_to_empty, neighbors_func) end)
|> Map.new()
# draw(new, xmax, ymax)
if new == seats do
new
else
change_til_stable(new, xmax, ymax, min_occupied_to_empty, neighbors_func)
end
end
def next(_m, {{x, y}, "."}, _xmax, _ymax, _min_occupied_to_empty, _neighbors_func) do
{{x, y}, "."}
end
def next(m, {{x, y}, "L"}, xmax, ymax, _min_occupied_to_empty, neighbors_func) do
ns =
neighbors_func.(m, {x, y}, xmax, ymax)
|> map(fn i -> Map.get(m, i) end)
|> filter(fn i -> i != nil end)
if any?(ns, fn x -> x == "#" end) do
{{x, y}, "L"}
else
{{x, y}, "#"}
end
end
def next(m, {{x, y}, "#"}, xmax, ymax, min_occupied_to_empty, neighbors_func) do
ns =
neighbors_func.(m, {x, y}, xmax, ymax)
|> map(fn i -> Map.get(m, i) end)
|> filter(fn i -> i != nil end)
occupied = ns |> filter(fn n -> n == "#" end) |> length
if occupied >= min_occupied_to_empty do
{{x, y}, "L"}
else
{{x, y}, "#"}
end
end
def neighbors(_m, {x, y}, _, _) do
[
{x - 1, y - 1},
{x, y - 1},
{x + 1, y - 1},
{x - 1, y},
{x + 1, y},
{x - 1, y + 1},
{x, y + 1},
{x + 1, y + 1}
]
|> filter(fn {x, y} -> x >= 0 && y >= 0 end)
end
def firstsee(m, xs) do
find(xs, fn p -> Map.get(m, p) != nil && Map.get(m, p) != "." end)
end
def cansee(m, {x, y}, xmax, ymax) do
[
firstsee(m, (x - 1)..0 |> map(&{&1, y})),
firstsee(m, (x + 1)..(xmax - 1) |> map(&{&1, y})),
firstsee(m, (y - 1)..0 |> map(&{x, &1})),
firstsee(m, (y + 1)..(ymax - 1) |> map(&{x, &1})),
firstsee(m, diag1({x, y}, [], xmax, ymax)),
firstsee(m, diag3({x, y}, [], xmax, ymax)),
firstsee(m, diag2({x, y}, [], xmax, ymax)),
firstsee(m, diag4({x, y}, [], xmax, ymax))
]
|> uniq
|> filter(&(&1 != nil))
|> filter(fn {a, b} -> 0 <= a && a < xmax && 0 <= b && b < ymax end)
|> filter(fn {x1, y1} -> {x1, y1} != {x, y} end)
end
@doc """
Top part of \
"""
def diag1({x, y}, acc, _xmax, _ymax) when x < 0 or y < 0 do
acc |> reverse |> drop(1)
end
def diag1({x, y}, acc, xmax, ymax) do
diag1({x - 1, y - 1}, [{x, y} | acc], xmax, ymax)
end
@doc """
Bottom part of \
"""
def diag3({x, y}, acc, xmax, ymax) when x >= xmax or y >= ymax do
acc |> reverse |> drop(1)
end
def diag3({x, y}, acc, xmax, ymax) do
diag3({x + 1, y + 1}, [{x, y} | acc], xmax, ymax)
end
@doc """
Top part of /
"""
def diag2({x, y}, acc, xmax, _ymax) when x >= xmax or y < 0 do
acc |> reverse |> drop(1)
end
def diag2({x, y}, acc, xmax, ymax) do
diag2({x + 1, y - 1}, [{x, y} | acc], xmax, ymax)
end
@doc """
Bottom part of /
"""
def diag4({x, y}, acc, _xmax, ymax) when x < 0 or y >= ymax do
acc |> reverse |> drop(1)
end
def diag4({x, y}, acc, xmax, ymax) do
diag4({x - 1, y + 1}, [{x, y} | acc], xmax, ymax)
end
def solve2(input) do
lines =
input
|> String.split("\n", trim: true)
world = str_to_map(input)
ymax = lines |> length
[line | _rest] = lines
xmax = String.length(line)
# draw(world, xmax, ymax)
change_til_stable(world, xmax, ymax, 5, &cansee/4)
|> count_occupied
end
end
|
lib/2020/aoc2020_day11.ex
| 0.655005 | 0.606149 |
aoc2020_day11.ex
|
starcoder
|
defmodule Advent.D6 do
def part1() do
coordinates = get_coordinates()
{max_column, max_row} = tuple_max_column_and_row(coordinates)
coordinates
|> mark_new_board(max_column, max_row)
|> remove_coordinates_from_the_edge(max_column, max_row)
|> biggest_area()
end
def part2() do
coordinates = get_coordinates()
{max_column, max_row} = tuple_max_column_and_row(coordinates)
coordinates
|> new_board_with_distance(max_column, max_row)
|> Enum.filter(&(&1 < 10000))
|> length
end
def get_coordinates() do
"inputs/d6.txt"
|> File.read!()
|> String.split("\n")
|> Enum.map(&(String.split(&1, ", ", trim: true) |> List.to_tuple()))
|> Enum.map(fn {column, row} -> {String.to_integer(column), String.to_integer(row)} end)
|> Enum.with_index(1)
end
def tuple_max_column_and_row(coordinates) do
column = coordinates |> Enum.map(fn {{column, _}, _} -> column end) |> Enum.max()
row = coordinates |> Enum.map(fn {{_, row}, _} -> row end) |> Enum.max()
{column, row}
end
def new_board_with_distance(coordinates, max_column, max_row) do
for board_column <- 0..max_column,
board_row <- 0..max_row do
calculate_distance({board_column, board_row}, coordinates)
end
end
def calculate_distance({column, row}, coordinates) do
coordinates
|> Enum.map(&taxicab({column, row}, &1))
|> Enum.reduce(0, fn acc, distance -> distance + acc end)
end
def taxicab({first_column, first_row}, {{second_column, second_row}, _index}) do
abs(first_column - second_column) + abs(first_row - second_row)
end
def mark_new_board(coordinates, max_column, max_row) do
for board_column <- 0..max_column,
board_row <- 0..max_row,
do: taxicab_distance({board_column, board_row}, coordinates)
end
def biggest_area(board) do
board
|> Enum.group_by(fn {_, _, index} -> index end)
|> Enum.map(fn {index, matches} -> {length(matches), index} end)
|> Enum.max()
|> elem(0)
end
def taxicab_distance({board_column, board_row} = _board_coordinate, coordinates) do
distances =
coordinates
|> Enum.map(fn {{column, row}, index} ->
{abs(column - board_column) + abs(row - board_row), index}
end)
{shortest_distance, index} =
distances
|> Enum.min_by(fn {distance, _index} -> distance end)
if more_than_one_short_distance?(distances, shortest_distance) do
{board_column, board_row, "."}
else
{board_column, board_row, index}
end
end
def more_than_one_short_distance?(distances, shortest) do
how_many = Enum.filter(distances, fn {distance, _index} -> distance == shortest end)
length(how_many) > 1
end
def remove_coordinates_from_the_edge(board, max_column, max_row) do
infinite_points =
board
|> Enum.filter(fn {column, row, index} ->
column == 0 or row == 0 or column == max_column or row == max_row or index == "."
end)
|> Enum.map(&elem(&1, 2))
|> Enum.uniq()
board |> Enum.filter(fn {_, _, index} -> not (index in infinite_points) end)
end
end
|
lib/advent/d6.ex
| 0.661267 | 0.577108 |
d6.ex
|
starcoder
|
defmodule UeberauthToken do
@moduledoc """
A package for authenticating with an oauth2 token and building an ueberauth struct.
Features:
- Cache the ueberauth struct response using the excellent `whitfin/cachex` library.
- Perform asynchronyous validity checks for each token key in the cache.
See full description of the config options can be found in `UeberauthToken.Config` @moduledoc.
## Defining an provider module
An provider module must be provided in order for UeberauthToken to function correctly. The provider
implements the callbacks specified in the module `UeberauthToken.Strategy`. Read more about the
requirements for the provider in `UeberauthToken.Strategy`.
Read more on basic usage in the `UeberauthToken.Strategy` module.
"""
alias Ueberauth.{Auth, Failure, Strategy}
alias Ueberauth.Failure.Error
alias UeberauthToken.Config
alias Plug.Conn
@token_strategy UeberauthToken.Strategy
@doc """
Execute token validation for an oauth2 bearer token against a
given oauthorization server (provider).
This function may be useful when a token needs to be validated by a resource server
and the validation is taking place outside a `Plug` pipeline. For example, in
a web socket connection.
## Options
* `:validate_provider` - boolean
Defaults to `true`. Validates that the provider has already been configured
in the application configuration. It is recommended to set this
value to `[validate_provider: false]` once it is known that the application
is correctly configured to reduce the runtime burden of checking the
configuration on each token validation event.
## Example usage
@provider UeberauthToken.TestProvider
def connect(%{"authorization" => token} = params, socket) do
case UeberauthToken.token_auth(token, @provider) do
{:ok, %Ueberauth.Auth{} = auth} ->
{:ok, assign(socket, :user_id, auth.uid)}
{:error, %Ueberauth.Failure{} = failure} ->
{:error, failure}
end
end
"""
@spec token_auth(token :: String.t(), provider :: module(), opts :: list()) ::
{:ok, Auth.t()} | {:error, Failure.t()}
def token_auth(token, provider, opts \\ [validate_provider: true])
def token_auth(<<"Bearer ", token::binary>>, provider, opts)
when is_atom(provider) do
with true <- Keyword.get(opts, :validate_provider),
{:ok, :valid} <- Config.validate_provider(provider) do
validate_token(token, provider)
else
false ->
validate_token(token, provider)
{:error, :invalid} ->
invalid_provider_error(provider)
end
end
def token_auth("", provider, _opts) when is_atom(provider) do
empty_token_error(provider)
end
def token_auth(nil, provider, _opts) when is_atom(provider) do
empty_token_error(provider)
end
def token_auth(token, provider, opts) when is_binary(token) and is_atom(provider) do
token_auth("Bearer #{token}", provider, opts)
end
# private
defp parse_ueberauth_struct(%Conn{assigns: %{ueberauth_failure: %Failure{} = auth}}) do
{:error, auth}
end
defp parse_ueberauth_struct(%Conn{assigns: %{ueberauth_auth: %Auth{} = auth}}) do
{:ok, auth}
end
defp invalid_provider_error(provider) do
{:error,
%Failure{
errors: [
%Error{
message: "Invalid provider - #{provider}, ensure the provider ins configured",
message_key: "error"
}
],
provider: provider,
strategy: @token_strategy
}}
end
defp empty_token_error(provider) do
{:error,
%Failure{
errors: [
%Error{
message: "Empty string or null found for token",
message_key: "error"
}
],
provider: provider,
strategy: @token_strategy
}}
end
defp validate_token(token, provider) do
private_fields = %{
provider: provider,
token: %{"authorization" => "Bearer #{token}"}
}
try do
%Conn{}
|> Conn.put_private(:ueberauth_token, private_fields)
|> Strategy.run_callback(@token_strategy)
# ^ leads to invocation of `@token_strategy.handle_callback!/1` and `@token_strategy.auth/1`
|> parse_ueberauth_struct()
rescue
e ->
{:error,
%Failure{
errors: [
%Error{
message: "Failed attempt to verify token due to error: #{inspect(e)}",
message_key: "error"
}
],
provider: provider,
strategy: @token_strategy
}}
end
end
end
|
lib/ueberauth_token.ex
| 0.869922 | 0.605595 |
ueberauth_token.ex
|
starcoder
|
defmodule DataTree.TreePath do
@moduledoc """
A canonical path implementation for tree structures.
The functions in this module handle `TreePath` structs, which encapsulate
path `segments` in a list in reverse order. There is no distinction between
absolute and relative paths. Printable representations of such a struct use
the dot `.` character as separator between segments.
Developers should avoid creating the `TreePath` struct directly and instead
rely on the functions provided by this module, including the provided sigil
macros.
"""
@separator "."
@separator_replacement "_" <> Base.encode16(@separator, case: :lower)
@type t :: %__MODULE__{
segments: list(String.t())
}
defstruct segments: []
@doc ~S"""
Creates a new struct from a singular `segment` or a list of `segments`.
An empty singular `segment` results in a struct with zero segments.
Lists of `segments` are filtered for empty elements. An empty list,
also as a consequence after filtering, results in a struct with zero
segments. Whitespace on each `segment` is preserved.
## Examples
By passing a singular segment:
iex> DataTree.TreePath.new("data")
%DataTree.TreePath{segments: ["data"]}
iex> DataTree.TreePath.new(" da ta ")
%DataTree.TreePath{segments: [" da ta "]}
iex> DataTree.TreePath.new(" ")
%DataTree.TreePath{segments: [" "]}
iex> DataTree.TreePath.new("")
%DataTree.TreePath{segments: []}
By passing a list of segments:
iex> DataTree.TreePath.new([])
%DataTree.TreePath{segments: []}
iex> DataTree.TreePath.new(["data"])
%DataTree.TreePath{segments: ["data"]}
iex> DataTree.TreePath.new([" data ", "lo re", "b4"])
%DataTree.TreePath{segments: ["b4", "lo re", " data "]}
"""
@spec new(segment) :: t when segment: String.t()
def new(segment) when is_binary(segment) do
case segment do
"" -> %__MODULE__{segments: []}
_ -> %__MODULE__{segments: [segment]}
end
end
@spec new(segments) :: t when segments: list()
def new(segments) when is_list(segments) do
filtered_segments =
segments
|> Enum.filter(&(String.length(&1) > 0))
|> Enum.reverse()
%__MODULE__{segments: filtered_segments}
end
@doc ~S"""
Creates a new struct by wrapping the provided list of `segments`.
The given list is taken as-is, i.e. without any filtering and by
expecting it to be already in reversed order.
## Examples
Here wrapping a path named "data.lore.b4":
iex> DataTree.TreePath.wrap(["b4", "lore", "data"])
%DataTree.TreePath{segments: ["b4", "lore", "data"]}
"""
@spec wrap(segments) :: t when segments: list()
def wrap(segments) when is_list(segments) do
%__MODULE__{segments: segments}
end
@doc ~S"""
Handles the sigil `~p` for tree paths.
## Examples
iex> ~p""
%DataTree.TreePath{segments: []}
iex> ~p"data.lore.b4"
%DataTree.TreePath{segments: ["b4", "lore", "data"]}
iex> x = "or"
iex> ~p"da#{:t}a.l#{x}e.b4"
%DataTree.TreePath{segments: ["b4", "lore", "data"]}
"""
defmacro sigil_p({:<<>>, _line, [term]}, []) when is_binary(term) do
reversed = transpose_segments(term)
quote do
DataTree.TreePath.wrap(unquote(reversed))
end
end
defmacro sigil_p({:<<>>, _line, terms}, []) when is_list(terms) do
reversed = transpose_segments(terms)
quote do
DataTree.TreePath.wrap(unquote(reversed))
end
end
@doc false
def transpose_segments(term) when is_binary(term) do
term
|> Macro.unescape_string()
|> String.split(@separator)
|> Stream.filter(&(&1 != ""))
|> Enum.reverse()
end
def transpose_segments(terms) when is_list(terms) do
extractor = fn
{:"::", _, [expr, _]} -> expr
binary when is_binary(binary) -> Macro.unescape_string(binary)
end
transposer = fn term, acc ->
acc_head = List.first(acc)
cond do
is_binary(term) ->
segments = String.split(term, @separator)
if is_tuple(acc_head) do
[segm_head | segm_tail] = segments
acc = List.update_at(acc, 0, fn i -> quote do: unquote(i) <> unquote(segm_head) end)
Enum.reverse(segm_tail) ++ acc
else
Enum.reverse(segments) ++ acc
end
is_tuple(term) ->
if acc_head do
List.update_at(acc, 0, fn i -> quote do: unquote(i) <> unquote(term) end)
else
[term | acc]
end
true ->
acc
end
end
terms
|> Stream.map(&extractor.(&1))
|> Enum.reduce([], transposer)
|> Enum.filter(&(&1 != ""))
end
@doc ~S"""
Returns the path separator character as a `BitString`.
"""
@spec separator() :: String.t()
def separator(), do: @separator
@doc ~S"""
Returns the path separator character replacement as a `BitString`.
The replacement is built using the underscore character `_` followed by
the Base64 encoded form of the separator character.
"""
@spec separator_replacement() :: String.t()
def separator_replacement(), do: @separator_replacement
@doc ~S"""
Returns the level of the path.
The level corresponds to the number of path segments.
"""
@spec level(t) :: integer()
def level(%__MODULE__{segments: segments}) do
length(segments)
end
@doc ~S"""
Returns a new struct which wraps the root segment of the given path.
## Examples
iex> DataTree.TreePath.root(~p"")
%DataTree.TreePath{segments: []}
iex> DataTree.TreePath.root(~p"data")
%DataTree.TreePath{segments: ["data"]}
iex> DataTree.TreePath.root(~p"data.lore.b4")
%DataTree.TreePath{segments: ["data"]}
"""
@spec root(t) :: t
def root(%__MODULE__{} = path) do
path |> rootname |> new
end
@doc ~S"""
Returns the root segment name of the given path as a `BitString`.
## Examples
iex> DataTree.TreePath.rootname(~p"")
""
iex> DataTree.TreePath.rootname(~p"data")
"data"
iex> DataTree.TreePath.rootname(~p"data.lore.b4")
"data"
"""
@spec rootname(t) :: String.t()
def rootname(%__MODULE__{segments: segments}) do
case List.last(segments) do
nil -> ""
x -> x
end
end
@doc ~S"""
Returns a new struct which wraps the parent segment of the given path.
## Examples
iex> DataTree.TreePath.parent(~p"")
%DataTree.TreePath{segments: []}
iex> DataTree.TreePath.parent(~p"data")
%DataTree.TreePath{segments: []}
iex> DataTree.TreePath.parent(~p"data.lore.b4")
%DataTree.TreePath{segments: ["lore", "data"]}
"""
@spec parent(t) :: t
def parent(%__MODULE__{segments: segments} = path) do
case segments do
[_ | tail] -> tail |> wrap
_ -> path
end
end
@doc ~S"""
Returns a new struct which wraps the base segment of the given path.
## Examples
iex> DataTree.TreePath.base(~p"")
%DataTree.TreePath{segments: []}
iex> DataTree.TreePath.base(~p"data")
%DataTree.TreePath{segments: ["data"]}
iex> DataTree.TreePath.base(~p"data.lore.b4")
%DataTree.TreePath{segments: ["b4"]}
"""
@spec base(t) :: t
def base(%__MODULE__{segments: segments} = path) do
case segments do
[head | _] -> head |> new
_ -> path
end
end
@doc ~S"""
Returns the base segment name of the given path as a `BitString`.
## Examples
iex> DataTree.TreePath.basename(~p"")
""
iex> DataTree.TreePath.basename(~p"data")
"data"
iex> DataTree.TreePath.basename(~p"data.lore.b4")
"b4"
"""
@spec basename(t) :: String.t()
def basename(%__MODULE__{segments: segments}) do
case segments do
[head | _] -> head
_ -> ""
end
end
@doc ~S"""
Returns a new struct which wraps a sibling of the given path.
# Examples
iex> DataTree.TreePath.sibling(~p"data.lore", "b4")
%DataTree.TreePath{segments: ["b4", "data"]}
iex> DataTree.TreePath.sibling(~p"", "b4")
%DataTree.TreePath{segments: ["b4"]}
"""
@spec sibling(t, String.t()) :: t
def sibling(%__MODULE__{} = path, segment) when is_binary(segment) do
case segment do
"" -> path
_ -> [segment | parent(path).segments] |> wrap
end
end
@doc ~S"""
Returns a new struct with the `segment` being appended on the given path.
## Examples
iex> DataTree.TreePath.append(~p"", "data")
%DataTree.TreePath{segments: ["data"]}
iex> DataTree.TreePath.append(~p"data.lore", "b4")
%DataTree.TreePath{segments: ["b4", "lore", "data"]}
iex> DataTree.TreePath.append(~p"data.lore", ~p"b4.soong")
%DataTree.TreePath{segments: ["soong", "b4", "lore", "data"]}
"""
@spec append(t, String.t()) :: t
def append(%__MODULE__{segments: segments} = path, segment) when is_binary(segment) do
case segment do
"" -> path
_ -> [segment | segments] |> wrap
end
end
@spec append(t, t) :: t
def append(%__MODULE__{segments: segments}, %__MODULE__{segments: more}) do
(more ++ segments) |> wrap
end
@doc ~S"""
Checks if a path starts with the given `prefix`.
## Examples
iex> DataTree.TreePath.starts_with?(~p"data.lore.b4", "data")
true
iex> DataTree.TreePath.starts_with?(~p"data.lore.b4", "lore")
false
iex> DataTree.TreePath.starts_with?(~p"data.lore.b4", ~p"data.lore")
true
"""
@spec starts_with?(t, String.t() | t) :: boolean()
def starts_with?(%__MODULE__{segments: segments}, prefix) do
fun = &(segments |> Enum.reverse() |> List.starts_with?(&1))
cond do
is_binary(prefix) -> fun.([prefix])
is_struct(prefix, __MODULE__) -> fun.(prefix.segments |> Enum.reverse())
end
end
@doc ~S"""
Checks if a path ends with the given `suffix`.
## Examples
iex> DataTree.TreePath.ends_with?(~p"data.lore.b4", "b4")
true
iex> DataTree.TreePath.ends_with?(~p"data.lore.b4", "lore")
false
iex> DataTree.TreePath.ends_with?(~p"data.lore.b4", ~p"lore.b4")
true
"""
@spec ends_with?(t, String.t() | t) :: boolean()
def ends_with?(%__MODULE__{segments: segments}, suffix) do
fun = &List.starts_with?(segments, &1)
cond do
is_binary(suffix) -> fun.([suffix])
is_struct(suffix, __MODULE__) -> fun.(suffix.segments)
end
end
defimpl String.Chars, for: DataTree.TreePath do
def to_string(path) do
sep = DataTree.TreePath.separator()
repl = DataTree.TreePath.separator_replacement()
path.segments
|> Enum.reverse()
|> Enum.map(&String.replace(&1, sep, repl))
|> Enum.join(DataTree.TreePath.separator())
end
end
end
|
lib/data_tree/tree_path.ex
| 0.917769 | 0.762114 |
tree_path.ex
|
starcoder
|
defmodule Changex.Formatter.Elixir do
use Changex.Formatter
@moduledoc """
Format changelog to the terminal in markdown format that matches the
format of the elixir-lang changelog
"""
@doc """
Take a map of commits in the following format:
%{
fix: %{
scope1: [commit1, commit2],
scope2: [commit5, commit6]
}
feat: %{
scope1: [commit3, commit4],
scope2: [commit7, commit8]
}
}
And return a string in the format:
## v0.0.1
* Enhancements
* [Scope 1] commit 1
* [Scope 1] commit 2
* [Scope 2] commit 5
* [Scope 2] commit 6
* Bug fixes
* [Scope 1] commit 3
* [Scope 1] commit 4
* [Scope 2] commit 7
* [Scope 2] commit 8
"""
def format(commits, options \\ []) do
heading(Keyword.get(options, :version)) <> types(commits)
end
defp heading(version) do
"## #{version || current_version()}\n\n"
end
defp types(commits) do
valid_types()
|> Enum.filter(fn type -> Map.get(commits, type) end)
|> Enum.map(fn type -> build_type(type, Map.get(commits, type)) end)
|> Enum.join("\n")
end
defp build_type(type, commits) when is_map(commits) do
"* #{type |> lookup_type}" <> build_commits(commits)
end
defp build_type(_type, _), do: nil
defp build_commits(commits) do
commits
|> Enum.map(&build_commit_scope/1)
|> Enum.join("")
end
defp build_commit_scope({scope, commits}) do
commits
|> Enum.reduce("", fn commit, acc -> build_commit(commit, scope, acc) end)
end
defp build_commit(commit, scope, acc) do
description = Keyword.get(commit, :description) |> String.split("\n") |> Enum.join("\n ")
acc <> "\n * [#{scope}] #{description}"
end
defp valid_types, do: [:feat, :fix, :break]
defp lookup_type(:fix), do: "Bug fixes"
defp lookup_type(:feat), do: "Enhancements"
defp lookup_type(:break), do: "Breaking changes"
end
|
lib/changex/formatter/elixir.ex
| 0.641871 | 0.420748 |
elixir.ex
|
starcoder
|
defmodule BSV.Transaction do
@moduledoc """
Module for the construction, parsing and serialization of Bitcoin transactions.
"""
alias BSV.Crypto.{Hash, ECDSA}
alias BSV.Address
alias BSV.KeyPair
alias BSV.Extended.PrivateKey
alias BSV.Script.PublicKeyHash
alias BSV.Transaction.{Input, Output, Signature}
alias BSV.Util
alias BSV.Util.VarBin
defstruct version: 1,
lock_time: 0,
inputs: [],
outputs: [],
change_script: nil,
change_index: nil,
fee: nil
@typedoc "Bitcoin Transaction"
@type t :: %__MODULE__{
version: integer,
lock_time: integer,
inputs: list,
outputs: list,
change_script: nil,
change_index: nil,
fee: nil
}
@dust_limit 546
@fee_per_kb 500
@doc """
Parse the given binary into a transaction. Returns a tuple containing the
transaction input and the remaining binary data.
## Options
The accepted options are:
* `:encoding` - Optionally decode the binary with either the `:base64` or `:hex` encoding scheme.
## Examples
BSV.Transaction.parse(data)
{%BSV.Trasaction{}, ""}
"""
@spec parse(binary, keyword) :: {__MODULE__.t, binary}
def parse(data, options \\ []) do
encoding = Keyword.get(options, :encoding)
<<version::little-32, data::binary>> = data |> Util.decode(encoding)
{inputs, data} = data |> VarBin.parse_items(&Input.parse/1)
{outputs, data} = data |> VarBin.parse_items(&Output.parse/1)
<<lock_time::little-32, data::binary>> = data
{struct(__MODULE__, [
version: version,
inputs: inputs,
outputs: outputs,
lock_time: lock_time
]), data}
end
@doc """
Serialises the given transaction into a binary.
## Options
The accepted options are:
* `:encode` - Optionally encode the returned binary with either the `:base64` or `:hex` encoding scheme.
## Examples
BSV.Transaction.Input.serialize(input)
<<binary>>
"""
@spec serialize(__MODULE__.t, keyword) :: binary
def serialize(%__MODULE__{} = tx, options \\ []) do
encoding = Keyword.get(options, :encoding)
inputs = tx.inputs |> VarBin.serialize_items(&Input.serialize/1)
outputs = tx.outputs |> VarBin.serialize_items(&Output.serialize/1)
<<
tx.version::little-32,
inputs::binary,
outputs::binary,
tx.lock_time::little-32
>>
|> Util.encode(encoding)
end
@doc """
Returns the given transaction's txid, which is a double SHA-256 hash of the
serialized transaction, reversed.
## Examples
iex> %BSV.Transaction{}
...> |> BSV.Transaction.spend_to("1B8j21Ym6QbJQ6kRvT1N7pvdBN2qPDhqij", 72000)
...> |> BSV.Transaction.get_txid
"c8e8f4951eb08f9e6e12b92da30b0b9a0849202dcbb5ac35e13acc91b8c4de6d"
"""
@spec get_txid(__MODULE__.t) :: String.t
def get_txid(%__MODULE__{} = tx) do
serialize(tx)
|> Hash.sha256_sha256
|> Util.reverse_bin
|> Util.encode(:hex)
end
@doc """
Returns the size of the given transaction. Where any inputs are without a
signed script, it's size is estimated assuming a P2PKH input.
## Examples
iex> %BSV.Transaction{}
...> |> BSV.Transaction.spend_from(%BSV.Transaction.Input{utxo: %BSV.Transaction.Output{satoshis: 100000}})
...> |> BSV.Transaction.spend_to("1B8j21Ym6QbJQ6kRvT1N7pvdBN2qPDhqij", 75000)
...> |> BSV.Transaction.get_size
192
"""
@spec get_size(__MODULE__.t) :: integer
def get_size(%__MODULE__{} = tx) do
inputs = tx.inputs
|> Enum.map(&Input.get_size/1)
|> Enum.sum
|> Kernel.+(tx.inputs |> length |> VarBin.serialize_int |> byte_size)
outputs = tx.outputs
|> Enum.map(&Output.get_size/1)
|> Enum.sum
|> Kernel.+(tx.outputs |> length |> VarBin.serialize_int |> byte_size)
8 + inputs + outputs
end
@doc """
Returns the fee for the given transaction. If the fee has already been set
using `f:BSV.Transaction.set_fee/2`, then that figure is returned. Otherwise
a fee is calculated based on the result of `f:BSV.Transaction.get_size/1`.
## Examples
iex> %BSV.Transaction{}
...> |> BSV.Transaction.set_fee(500)
...> |> BSV.Transaction.get_fee
500
iex> %BSV.Transaction{}
...> |> BSV.Transaction.spend_from(%BSV.Transaction.Input{utxo: %BSV.Transaction.Output{satoshis: 100000}})
...> |> BSV.Transaction.spend_to("1B8j21Ym6QbJQ6kRvT1N7pvdBN2qPDhqij", 75000)
...> |> BSV.Transaction.get_fee
96
"""
@spec get_fee(__MODULE__.t) :: integer
def get_fee(%__MODULE__{fee: fee}) when is_integer(fee),
do: fee
def get_fee(%__MODULE__{fee: fee} = tx) when is_nil(fee),
do: get_size(tx) * @fee_per_kb / 1000 |> round
@doc """
Sets the fee for the given transaction. Resets the signatures for all inputs.
"""
@spec set_fee(__MODULE__.t, integer) :: __MODULE__.t
def set_fee(%__MODULE__{} = tx, fee) when is_integer(fee) do
Map.put(tx, :fee, fee)
|> update_change_output
end
@doc """
Returns the change output of the given transaction.
"""
@spec get_change_output(__MODULE__.t) :: Output.t
def get_change_output(%__MODULE__{change_index: index})
when is_nil(index),
do: nil
def get_change_output(%__MODULE__{} = tx) do
Enum.at(tx.outputs, tx.change_index)
end
@doc """
Returns the sum from all inputs of the given transaction.
## Examples
iex> inputs = [
...> %BSV.Transaction.Input{utxo: %BSV.Transaction.Output{satoshis: 1575}},
...> %BSV.Transaction.Input{utxo: %BSV.Transaction.Output{satoshis: 3000}}
...> ]
...>
iex> BSV.Transaction.spend_from(%BSV.Transaction{}, inputs)
...> |> BSV.Transaction.get_input_sum
4575
"""
@spec get_input_sum(__MODULE__.t) :: integer
def get_input_sum(%__MODULE__{} = tx),
do: tx.inputs |> Enum.reduce(0, &(&2 + &1.utxo.satoshis))
@doc """
Returns the sum from all outputs of the given transaction.
## Examples
iex> %BSV.Transaction{}
...> |> BSV.Transaction.spend_to("15KgnG69mTbtkx73vNDNUdrWuDhnmfCxsf", 5000)
...> |> BSV.Transaction.spend_to("15KgnG69mTbtkx73vNDNUdrWuDhnmfCxsf", 1325)
...> |> BSV.Transaction.get_output_sum
6325
"""
@spec get_output_sum(__MODULE__.t) :: integer
def get_output_sum(%__MODULE__{} = tx),
do: tx.outputs |> Enum.reduce(0, &(&2 + &1.satoshis))
@doc """
Adds the given input to the transaction. Resets the signatures for all inputs.
## Examples
iex> tx = %BSV.Transaction{}
...> |> BSV.Transaction.add_input(%BSV.Transaction.Input{})
iex> length(tx.inputs) == 1
true
"""
@spec add_input(__MODULE__.t, Input.t) :: __MODULE__.t
def add_input(%__MODULE__{} = tx, %Input{} = input) do
inputs = Enum.concat(tx.inputs, [input])
Map.put(tx, :inputs, inputs)
|> update_change_output
end
@doc """
Adds the given output to the transaction. Resets the signatures for all inputs.
## Examples
iex> tx = %BSV.Transaction{}
...> |> BSV.Transaction.add_output(%BSV.Transaction.Output{})
iex> length(tx.outputs) == 1
true
"""
@spec add_output(__MODULE__.t, Output.t) :: __MODULE__.t
def add_output(%__MODULE__{} = tx, %Output{} = output) do
outputs = Enum.concat(tx.outputs, [output])
Map.put(tx, :outputs, outputs)
|> update_change_output
end
@doc """
Adds the given input or list of inputs to the transaction. Each input must be
complete with a spendable UTXO or the function will raise an error. Resets the
signatures for all inputs.
"""
@spec spend_from(__MODULE__.t, Input.t | list) :: __MODULE__.t
def spend_from(%__MODULE__{}, %Input{utxo: utxo})
when is_nil(utxo),
do: raise "Invalid input. Must have spendable UTXO."
def spend_from(%__MODULE__{} = tx, %Input{} = input) do
case Enum.member?(tx.inputs, input) do
true -> tx
false -> add_input(tx, input)
end
end
def spend_from(%__MODULE__{} = tx, inputs) when is_list(inputs),
do: inputs |> Enum.reduce(tx, &(spend_from(&2, &1)))
@doc """
Creates a P2PKH output using the given address and spend amount, and adds the
output to the transaction. Resets the signatures for all inputs.
## Examples
iex> tx = %BSV.Transaction{}
...> |> BSV.Transaction.spend_to("15KgnG69mTbtkx73vNDNUdrWuDhnmfCxsf", 1000)
iex> length(tx.outputs) == 1
true
"""
@spec spend_to(__MODULE__.t, Address.t | binary, integer) :: __MODULE__.t
def spend_to(%__MODULE__{} = tx, address, satoshis) do
output = struct(Output, [
satoshis: satoshis,
script: PublicKeyHash.build_output_script(address)
])
add_output(tx, output)
end
@doc """
Specifies the change address for the given transaction. Resets the signatures
for all inputs.
## Examples
iex> %BSV.Transaction{}
...> |> BSV.Transaction.spend_from(%BSV.Transaction.Input{utxo: %BSV.Transaction.Output{satoshis: 100000}})
...> |> BSV.Transaction.spend_to("1B8j21Ym6QbJQ6kRvT1N7pvdBN2qPDhqij", 75000)
...> |> BSV.Transaction.change_to("1G26ZnsXQpL9cdqCKE6vViMdW9QwRQTcTJ")
...> |> BSV.Transaction.get_change_output
%BSV.Transaction.Output{
satoshis: 24887,
script: %BSV.Script{
chunks: [
:OP_DUP,
:OP_HASH160,
<<164, 190, 242, 205, 108, 224, 228, 253, 144, 102, 35, 209, 230, 33, 135, 143, 211, 21, 79, 82>>,
:OP_EQUALVERIFY,
:OP_CHECKSIG
]
}
}
"""
@spec change_to(__MODULE__.t, Address.t | binary) :: __MODULE__.t
def change_to(%__MODULE__{} = tx, address) do
script = PublicKeyHash.build_output_script(address)
Map.put(tx, :change_script, script)
|> update_change_output
end
@doc """
Signs the transaction using the given private key or list of keys. Each input
is iterrated over verifying that the key can sign the input.
"""
@spec sign(
__MODULE__.t,
KeyPair.t | PrivateKey.t | {binary, binary} | binary | list
) :: __MODULE__.t
def sign(%__MODULE__{} = tx, %KeyPair{} = key),
do: sign(tx, {key.public_key, key.private_key})
def sign(%__MODULE__{} = tx, %PrivateKey{} = private_key) do
public_key = PrivateKey.get_public_key(private_key)
sign(tx, {public_key.key, private_key.key})
end
def sign(%__MODULE__{} = tx, private_key) when is_binary(private_key) do
keypair = private_key
|> ECDSA.generate_key_pair
|> KeyPair.from_ecdsa_key
sign(tx, keypair)
end
def sign(%__MODULE__{} = tx, {public_key, private_key}) do
pubkey_hash = Address.from_public_key(public_key)
|> Map.get(:hash)
inputs = tx.inputs
|> Enum.with_index()
|> Enum.map(fn {input, vin} ->
case pubkey_hash == PublicKeyHash.get_hash(input.utxo.script) do
false -> input
true ->
script = tx
|> Signature.sign_input(vin, private_key)
|> PublicKeyHash.build_input_script(public_key)
Map.put(input, :script, script)
end
end)
Map.put(tx, :inputs, inputs)
end
def sign(%__MODULE__{} = tx, keys) when is_list(keys),
do: keys |> Enum.reduce(tx, &(sign(&2, &1)))
@spec is_coinbase(__MODULE__.t()) :: boolean
def is_coinbase(%__MODULE__{inputs: [first_input | _] = inputs}), do:
length(inputs) == 1 and Input.is_null(first_input)
# Needs to be called every time a change is made to inputs or outputs
defp update_change_output(%__MODULE__{change_script: script} = tx)
when is_nil(script),
do: tx
defp update_change_output(%__MODULE__{} = tx) do
tx = tx
|> remove_change_output
|> clear_signatures
|> add_change_output
change_amount = get_input_sum(tx) - get_output_sum(tx) - get_fee(tx)
case change_amount > @dust_limit do
false -> remove_change_output(tx)
true ->
tx
|> remove_change_output
|> add_change_output(change_amount)
end
end
defp remove_change_output(%__MODULE__{change_index: index} = tx)
when is_nil(index),
do: tx
defp remove_change_output(%__MODULE__{change_index: index} = tx) do
outputs = List.delete_at(tx.outputs, index)
tx
|> Map.put(:outputs, outputs)
|> Map.put(:change_index, nil)
end
defp add_change_output(%__MODULE__{} = tx, satoshis \\ 0) do
index = length(tx.outputs)
output = struct(Output, script: tx.change_script, satoshis: satoshis)
outputs = tx.outputs ++ [output]
tx
|> Map.put(:outputs, outputs)
|> Map.put(:change_index, index)
end
defp clear_signatures(%__MODULE__{} = tx) do
inputs = tx.inputs
|> Enum.map(&(Map.put(&1, :script, nil)))
Map.put(tx, :inputs, inputs)
end
end
|
lib/bsv/transaction.ex
| 0.888656 | 0.495239 |
transaction.ex
|
starcoder
|
defmodule AshGraphql.Resource.Mutation do
@moduledoc "Represents a configured mutation on a resource"
defstruct [:name, :action, :type, :identity, :read_action, :upsert?]
@create_schema [
name: [
type: :atom,
doc: "The name to use for the mutation.",
default: :get
],
action: [
type: :atom,
doc: "The action to use for the mutation.",
required: true
],
upsert?: [
type: :boolean,
default: false,
doc: "Whether or not to use the `upsert?: true` option when calling `YourApi.create/2`."
]
]
@update_schema [
name: [
type: :atom,
doc: "The name to use for the mutation.",
default: :get
],
action: [
type: :atom,
doc: "The action to use for the mutation.",
required: true
],
identity: [
type: :atom,
doc: """
The identity to use to fetch the record to be updated.
If no identity is required (e.g for a read action that already knows how to fetch the item to be updated), use `false`.
"""
],
read_action: [
type: :atom,
doc:
"The read action to use to fetch the record to be updated. Defaults to the primary read action."
]
]
@destroy_schema [
name: [
type: :atom,
doc: "The name to use for the mutation.",
default: :get
],
action: [
type: :atom,
doc: "The action to use for the mutation.",
required: true
],
read_action: [
type: :atom,
doc:
"The read action to use to fetch the record to be destroyed. Defaults to the primary read action."
],
identity: [
type: :atom,
doc: """
The identity to use to fetch the record to be destroyed.
If no identity is required (e.g for a read action that already knows how to fetch the item to be updated), use `false`.
"""
]
]
def create_schema, do: @create_schema
def update_schema, do: @update_schema
def destroy_schema, do: @destroy_schema
end
|
lib/resource/mutation.ex
| 0.85269 | 0.51623 |
mutation.ex
|
starcoder
|
defmodule Gim.Query do
@moduledoc """
Defines queries on schemas.
"""
defstruct type: nil,
filter: {:and, []},
expand: []
import Gim.Queryable
alias Gim.Index
def query(%__MODULE__{} = query) do
query
end
def query(node) when is_map(node) do
query([node])
end
def query([%type{} | _rest] = nodes) do
type
|> to_query()
|> __query__(nodes)
end
def query(queryable) do
to_query(queryable)
end
def query(node, edge) when is_map(node) do
query([node], edge)
end
def query([%type{} | _rest] = nodes, edge) do
case type.__schema__(:association, edge) do
{_, _, type, _} ->
__query__(to_query(type), edge, nodes)
_ ->
raise Gim.QueryError, "No edge #{inspect(edge)} in #{type}"
end
end
def __query__(query, []) do
query
end
def __query__(%__MODULE__{type: type} = query, [%type{__id__: id} | nodes])
when not is_nil(id) do
query
|> filter(:or, __id__: id)
|> __query__(nodes)
end
def __query__(query, _edge, []) do
query
end
def __query__(%__MODULE__{} = query, edge, [%{} = node | nodes]) do
edge = Map.fetch!(node, edge)
edge
|> List.wrap()
|> Enum.reduce(query, fn node_or_id, query ->
if is_integer(node_or_id) do
filter(query, :or, __id__: node_or_id)
else
__query__(query, node_or_id)
end
end)
|> __query__(nodes)
end
@doc """
Adds a new filter to the query.
"""
def filter(queryable, op \\ nil, filter)
def filter(%__MODULE__{} = query, nil, {op, _} = filter) when op in [:and, :or] do
%__MODULE__{query | filter: __join_filter__(query.filter, filter)}
end
def filter(%__MODULE__{} = query, op, {op, _} = filter) when op in [:and, :or] do
%__MODULE__{query | filter: __join_filter__(query.filter, filter)}
end
def filter(%__MODULE__{} = query, opx, {op, _} = filter) when op in [:and, :or] do
%__MODULE__{query | filter: __join_filter__(query.filter, {opx, [filter]})}
end
def filter(%__MODULE__{} = query, op, filter) when is_list(filter) do
%__MODULE__{query | filter: __join_filter__(query.filter, {op || :and, filter})}
end
@doc false
def __join_filter__({_, []}, filter) do
filter
end
def __join_filter__(filter, {_, []}) do
filter
end
def __join_filter__({op, filter_left}, {op, filter_right}) do
{op, filter_left ++ filter_right}
end
def __join_filter__(left_filter, {op, filter_right}) do
{op, [left_filter | filter_right]}
end
def expand(queryable, edge_or_path)
def expand(%__MODULE__{type: type, expand: expand} = query, path) do
%__MODULE__{query | expand: __join_expand__(type, expand, path)}
end
@doc false
def __join_expand__(type, expand, edge) when not is_list(edge) do
__join_expand__(type, expand, [edge])
end
def __join_expand__(type, expand, [{edge, nested} | path]) do
case type.__schema__(:association, edge) do
{_name, _cardinality, nested_type, _} ->
nested_expand = Keyword.get(expand, edge, [])
expand = Keyword.put(expand, edge, __join_expand__(nested_type, nested_expand, nested))
__join_expand__(type, expand, path)
nil ->
raise Gim.QueryError, "No edge #{inspect(edge)} in #{type}"
end
end
def __join_expand__(type, expand, [edge | path]) do
__join_expand__(type, expand, [{edge, []} | path])
end
def __join_expand__(_type, expand, []) do
expand
end
@doc """
Returns the target nodes following the edges of given label for the given node.
"""
def edges([%{__repo__: repo} | _] = nodes, assoc) do
nodes
|> query(assoc)
|> repo.resolve!()
end
def edges(node, assoc) when is_map(node) do
edges([node], assoc)
end
@doc """
Returns wether the given node has any outgoing edges.
"""
def has_edges?(%type{} = node) do
assocs = type.__schema__(:associations)
Enum.any?(assocs, &has_edge?(node, &1))
end
@doc """
Returns wether the given node has any outgoing edges for the given label.
"""
def has_edge?(%type{} = node, assoc) do
edge = Map.get(node, assoc)
case type.__schema__(:association, assoc) do
{_, :one, _, _} ->
!is_nil(edge)
{_, :many, _, _} ->
length(edge) > 0
_ ->
raise Gim.UnknownEdgeError, "No edge #{inspect(assoc)} in #{inspect(type)}"
end
end
def add_edge(%struct{} = node, assoc, targets) when is_list(targets) do
assoc = struct.__schema__(:association, assoc)
Enum.reduce(targets, node, &__add_edge__(&2, assoc, &1))
end
def add_edge(%struct{} = node, assoc, target) do
assoc = struct.__schema__(:association, assoc)
__add_edge__(node, assoc, target)
end
def __add_edge__(node, {assoc, :one, type, _}, %type{__id__: id}) do
%{node | assoc => id}
end
def __add_edge__(node, {assoc, :many, type, _}, %type{__id__: id}) do
ids = Index.add(Map.fetch!(node, assoc), id)
%{node | assoc => ids}
end
def delete_edge(%struct{} = node, assoc, targets) when is_list(targets) do
assoc = struct.__schema__(:association, assoc)
Enum.reduce(targets, node, &__delete_edge__(&2, assoc, &1))
end
def delete_edge(%struct{} = node, assoc, target) do
assoc = struct.__schema__(:association, assoc)
__delete_edge__(node, assoc, target)
end
@doc false
def __delete_edge__(node, {assoc, :one, type, _}, %type{}) do
%{node | assoc => nil}
end
def __delete_edge__(node, {assoc, :many, type, _}, %type{__id__: id}) do
ids = Index.remove(Map.fetch!(node, assoc), id)
%{node | assoc => ids}
end
def clear_edges(%struct{} = node) do
assocs = struct.__schema__(:associations)
Enum.reduce(assocs, node, fn assoc, node ->
clear_edge(node, assoc)
end)
end
def clear_edge(%struct{} = node, assoc) do
case struct.__schema__(:association, assoc) do
{_, :one, _, _} ->
Map.put(node, assoc, nil)
{_, :many, _, _} ->
Map.put(node, assoc, [])
_ ->
node
end
end
# Node set operations
def intersection(nodes1, nodes2) when is_list(nodes1) and is_list(nodes2) do
# TODO: check node type
Enum.filter(nodes1, fn %{__id__: a} ->
Enum.any?(nodes2, fn %{__id__: b} ->
a == b
end)
end)
end
def reachable(nodes, edge, target) when is_list(nodes) do
Enum.filter(nodes, fn node ->
reachable(node, edge, target)
end)
end
def reachable(node, edge, target) do
# TODO: check node type
edges = Map.fetch!(node, edge)
reachable(edges, target)
end
defp reachable(edges, target) when is_list(edges) do
Enum.any?(edges, fn e ->
reachable(e, target)
end)
end
defp reachable(edge, %{__id__: id}) do
edge == id
end
# Repo operations
@doc """
Computes all isolated nodes from a repo.
"""
def isolated(repo) do
all_nodes = repo.dump()
lonely =
all_nodes
|> Enum.reject(&has_edges?/1)
|> Enum.map(fn %{__struct__: struct, __id__: id} -> {struct, id} end)
|> Enum.into(MapSet.new())
Enum.reduce(all_nodes, lonely, fn %{__struct__: struct} = node, lonely ->
assocs = struct.__schema__(:associations)
Enum.reduce(assocs, lonely, fn assoc, lonely ->
type = struct.__schema__(:type, assoc)
edges = Map.fetch!(node, assoc)
set_delete(lonely, type, edges)
end)
end)
all_nodes
|> Enum.filter(fn %{__struct__: struct, __id__: id} ->
MapSet.member?(lonely, {struct, id})
end)
end
defp set_delete(set, type, edges) when is_list(edges) do
Enum.reduce(edges, set, fn edge, set ->
set_delete(set, type, edge)
end)
end
defp set_delete(set, type, edge) do
MapSet.delete(set, {type, edge})
end
end
|
lib/gim/query.ex
| 0.836555 | 0.682362 |
query.ex
|
starcoder
|
defmodule Manic.TX do
@moduledoc """
Send transactions directly to miners and query the status of any transaction.
By giving a transaction directly to a miner (instead of broadcasting it to the
Bitcoin peer network), you are pushing the transaction directly to the centre
of the network. As the miner will have already provided the correct fees to
ensure the transaction is relayed and mined, you can confidently accept the
transaction on a "zero confirmation" basis.
This module allows developers to push transactions directly to miners and
query the status of any transaction. As each payload from the Merchant API
includes and is signed by the miner's [Miner ID](https://github.com/bitcoin-sv/minerid-reference),
the response can be treated as a legally binding signed message backed by the
miner's own proof of work.
"""
alias Manic.{JSONEnvelope, Miner, Multi}
@typedoc "Hex-encoded transaction ID."
@type txid :: String.t
@doc """
Sends the given [`transaction`](`t:BSV.Tx.t/0`) directly to a [`miner`](`t:Manic.miner/0`).
Returns the result in an `:ok` / `:error` tuple pair.
The transaction can be passed as either a `t:BSV.Tx.t/0` or as a hex
encoded binary.
## Options
The `:as` option can be used to speficy how to recieve the fees. The accepted
values are:
* `:payload` - The decoded JSON [`payload`](`t:Manic.JSONEnvelope.payload/0`) **(Default)**
* `:envelope` - The raw [`JSON envolope`](`t:Manic.JSONEnvelope.t/0`)
## Examples
To push a transaction to the minder.
iex> Manic.TX.push(miner, tx)
{:ok, %{
"api_version" => "0.1.0",
"current_highest_block_hash" => "00000000000000000397a5a37c1f9b409b4b58e76fd6bcac06db1a3004cccb38",
"current_highest_block_height" => 631603,
"miner_id" => "<KEY>",
"result_description" => "",
"return_result" => "success",
"timestamp" => "2020-04-21T14:04:39.563Z",
"tx_second_mempool_expiry" => 0,
"txid" => "9c8c5cf37f4ad1a82891ff647b13ec968f3ccb44af2d9deaa205b03ab70a81fa",
"verified" => true
}}
Using the `:as` option to return the [`JSON envolope`](`t:Manic.JSONEnvelope.t/0`).
iex> Manic.TX.push(miner, tx, as: :envelope)
{:ok, %Manic.JSONEnvelope{
encoding: "UTF-8",
mimetype: "application/json",
payload: "{\\"apiVersion\\":\\"0.1.0\\",\\"timestamp\\":\\"2020-04-21T14:04:39.563Z\\",\\"txid\\":\\"\\"9c8c5cf37f4ad1a82891ff647b13ec968f3ccb44af2d9deaa205b03ab70a81fa\\"\\",\\"returnResult\\":\\"success\\",\\"resultDescription\\":\\"\\",\\"minerId\\":\\"03e92d3e5c3f7bd945dfbf48e7a99393b1bfb3f11f380ae30d286e7ff2aec5a270\\",\\"currentHighestBlockHash\\":\\"00000000000000000397a5a37c1f9b409b4b58e76fd6bcac06db1a3004cccb38\\",\\"currentHighestBlockHeight\\":631603,\\"txSecondMempoolExpiry\\":0}",
public_key: "<KEY>",
signature: "3045022100a490e469426f34fcf62d0f095c10039cf5a1d535c042172786c364d41de65b3a0220654273ca42b5e955179d617ea8252e64ddf74657aa0caebda7372b40a0f07a53",
verified: true
}}
"""
@spec push(Manic.miner | Manic.multi_miner, BSV.Tx.t | String.t, keyword) ::
{:ok, JSONEnvelope.payload | JSONEnvelope.t} |
{:error, Exception.t} |
Multi.result
def push(miner, tx, options \\ [])
def push(%Miner{} = miner, %BSV.Tx{} = tx, options),
do: push(miner, BSV.Tx.to_binary(tx, encoding: :hex), options)
def push(%Miner{} = miner, tx, options) when is_binary(tx) do
format = Keyword.get(options, :as, :payload)
with {:ok, _tx} <- validate_tx(tx),
{:ok, %{body: body, status: status}} when status in 200..202 <- Tesla.post(miner.client, "/mapi/tx", %{"rawtx" => tx}),
{:ok, body} <- JSONEnvelope.verify(body),
{:ok, payload} <- JSONEnvelope.parse_payload(body)
do
res = case format do
:envelope -> body
_ -> payload
end
{:ok, res}
else
{:ok, res} ->
{:error, "HTTP Error: #{res.status}"}
{:error, err} ->
{:error, err}
end
end
def push(%Multi{} = multi, tx, options) do
multi
|> Multi.async(__MODULE__, :push, [tx, options])
|> Multi.yield
end
@doc """
As `push/3` but returns the result or raises an exception if it fails.
"""
@spec push!(Manic.miner | Manic.multi_miner, BSV.Tx.t | String.t, keyword) ::
JSONEnvelope.payload | JSONEnvelope.t
def push!(miner, tx, options \\ []) do
case push(miner, tx, options) do
{:ok, res} -> res
{:error, error} -> raise error
end
end
@doc """
Query the status of a transaction by its [`txid`](`t:txid/0`), from the given
[`miner`](`t:Manic.miner/0`).
Returns the result in an `:ok` / `:error` tuple pair.
## Options
The `:as` option can be used to speficy how to recieve the fees. The accepted
values are:
* `:payload` - The decoded JSON [`payload`](`t:Manic.JSONEnvelope.payload/0`) **(Default)**
* `:envelope` - The raw [`JSON envolope`](`t:Manic.JSONEnvelope.t/0`)
## Examples
To get the status of a transaction/
iex> Manic.TX.boradcast(miner, "e4763d71925c2ac11a4de0b971164b099dbdb67221f03756fc79708d53b8800e")
{:ok, %{
"api_version" => "0.1.0",
"block_hash" => "000000000000000000983dee680071d63939f4690a8a797c022eddadc88f925e",
"block_height" => 630712,
"confirmations" => 765,
"miner_id" => "<KEY>",
"result_description" => "",
"return_result" => "success",
"timestamp" => "2020-04-20T21:45:38.808Z",
"tx_second_mempool_expiry" => 0,
"verified" => true
}}
Using the `:as` option to return the [`JSON envolope`](`t:Manic.JSONEnvelope.t/0`).
iex> Manic.TX.boradcast(miner, tx, as: :envelope)
{:ok, %Manic.JSONEnvelope{
encoding: "UTF-8",
mimetype: "application/json",
payload: "{\\"apiVersion\\":\\"0.1.0\\",\\"timestamp\\":\\"2020-04-20T21:45:38.808Z\\",\\"returnResult\\":\\"success\\",\\"resultDescription\\":\\"\\",\\"blockHash\\":\\"000000000000000000983dee680071d63939f4690a8a797c022eddadc88f925e\\",\\"blockHeight\\":630712,\\"confirmations\\":765,\\"minerId\\":\\"<KEY>\\",\\"txSecondMempoolExpiry\\":0}",
public_key: "03e92d3e5c3f7bd945dfbf48e7a99393b1bfb3f11f380ae30d286e7ff2aec5a270",
signature: "304502210092b822497cfe065136522b33b0fbec790c77f62818bd252583a615efd35697af022059c4ca7e97c90960860ed9d7b0ff4a1601cfe207b638c672c60a44027aed1f2d",
verified: true
}}
"""
@spec status(Manic.miner | Manic.multi_miner, TX.txid, keyword) ::
{:ok, JSONEnvelope.payload | JSONEnvelope.t} |
{:error, Exception.t} |
Multi.result
def status(miner, txid, options \\ [])
def status(%Miner{} = miner, txid, options) when is_binary(txid) do
format = Keyword.get(options, :as, :payload)
with {:ok, txid} <- validate_txid(txid),
{:ok, %{body: body, status: status}} when status in 200..202 <- Tesla.get(miner.client, "/mapi/tx/" <> txid),
{:ok, body} <- JSONEnvelope.verify(body),
{:ok, payload} <- JSONEnvelope.parse_payload(body)
do
res = case format do
:envelope -> body
_ -> payload
end
{:ok, res}
else
{:ok, res} ->
{:error, "HTTP Error: #{res.status}"}
{:error, err} ->
{:error, err}
end
end
def status(%Multi{} = multi, txid, options) do
multi
|> Multi.async(__MODULE__, :status, [txid, options])
|> Multi.yield
end
@doc """
As `status/3` but returns the result or raises an exception if it fails.
"""
@spec status!(Manic.miner | Manic.multi_miner, String.t, keyword) ::
JSONEnvelope.payload | JSONEnvelope.t
def status!(miner, txid, options \\ []) do
case status(miner, txid, options) do
{:ok, res} -> res
{:error, error} -> raise error
end
end
# Validates the given transaction binary by attempting to parse it.
defp validate_tx(tx) when is_binary(tx) do
try do
{:ok, BSV.Tx.from_binary!(tx, encoding: :hex)}
rescue
_err -> {:error, "Not valid transaction"}
end
end
# Validates the given txid binary by regex matching it.
defp validate_txid(txid) do
case String.match?(txid, ~r/^[a-f0-9]{64}$/i) do
true -> {:ok, txid}
false -> {:error, "Not valid TXID"}
end
end
end
|
lib/manic/tx.ex
| 0.880071 | 0.701767 |
tx.ex
|
starcoder
|
defmodule ExInsights.Decoration.Attributes do
@moduledoc """
Injects decorator functions into parent module to streamline telemetry logging in aspect-oriented style
"""
use Decorator.Define,
track_event: 0,
track_dependency: 1,
track_exception: 0
def track_event(body, %{name: name}) do
quote do
unquote(name)
|> to_string()
|> ExInsights.track_event()
unquote(body)
end
end
def track_dependency(type, body, %{module: module, name: name, args: args}) do
quote do
module = unquote(module)
name = unquote(name)
args = unquote(args)
type = unquote(type)
start = :os.timestamp()
try do
result = unquote(body)
finish = :os.timestamp()
# success = true
success = ExInsights.Decoration.Attributes.success?(result)
ExInsights.Decoration.Attributes.do_track_dependency(
start,
finish,
module,
name,
args,
type,
success
)
result
rescue
e ->
finish = :os.timestamp()
trace = System.stacktrace()
ExInsights.Decoration.Attributes.do_track_dependency(
start,
finish,
module,
name,
args,
type,
false
)
reraise(e, trace)
catch
:exit, reason ->
finish = :os.timestamp()
ExInsights.Decoration.Attributes.do_track_dependency(
start,
finish,
module,
name,
args,
type,
false
)
:erlang.exit(self(), reason)
end
end
end
def success?({:error, _}), do: false
def success?(_), do: true
def do_track_dependency(start, finish, module, name, args, type, success) do
diff = ExInsights.Utils.diff_timestamp_millis(start, finish)
"#{inspect(module)}.#{name}"
|> ExInsights.track_dependency(inspect(args), diff, success, type)
end
def track_exception(body, _context) do
quote do
try do
unquote(body)
rescue
e ->
trace = System.stacktrace()
ExInsights.track_exception(e, trace)
reraise(e, trace)
catch
# see format_exit https://github.com/elixir-lang/elixir/blob/master/lib/elixir/lib/exception.ex#L364
:exit, {{%{} = exception, maybe_stacktrace}, {m, f, a}} = reason ->
msg = "#{Exception.message(exception)} @ #{Exception.format_mfa(m, f, a)}}"
trace =
case ExInsights.Utils.stacktrace?(maybe_stacktrace) do
true -> maybe_stacktrace
false -> []
end
ExInsights.track_exception(msg, trace)
:erlang.exit(self(), reason)
:exit, reason ->
msg = inspect(reason)
ExInsights.track_exception(msg, [])
:erlang.exit(self(), reason)
end
end
end
end
|
lib/decoration/attributes.ex
| 0.502441 | 0.407982 |
attributes.ex
|
starcoder
|
defmodule RePG2.Worker do
@moduledoc false
use GenServer
require Logger
alias RePG2.Impl
def start_link, do: GenServer.start_link(__MODULE__, [], name: __MODULE__)
@doc """
Make a globally locked multi call to all `RePG2.Worker`s in the cluster.
This function acquires a cluster-wide lock on the group `name`, ensuring
that only one node can update the group at a time. Then, a
`GenServer.multi_call` is made to all `RePG2.Worker`s with the given
`message`.
"""
def globally_locked_multi_call(name, message) do
:global.trans({{__MODULE__, name}, self()}, fn ->
all_nodes = Node.list([:visible, :this])
GenServer.multi_call(all_nodes, RePG2.Worker, message)
end)
end
def init([]) do
nodes = Node.list()
:ok = :net_kernel.monitor_nodes(true)
for new_node <- nodes do
send(worker_for(new_node), {:new_repg2, Node.self()})
send(self(), {:nodeup, new_node})
end
:ok = Impl.init()
{:ok, %{}}
end
def handle_call({:create, name}, _from, state) do
Impl.assure_group(name)
{:reply, :ok, state}
end
def handle_call({:join, name, pid}, _from, state) do
if Impl.group_exists?(name), do: Impl.join_group(name, pid)
{:reply, :ok, state}
end
def handle_call({:leave, name, pid}, _from, state) do
if Impl.group_exists?(name), do: Impl.leave_group(name, pid)
{:reply, :ok, state}
end
def handle_call({:delete, name}, _from, state) do
Impl.delete_group(name)
{:reply, :ok, state}
end
def handle_call(message, from, state) do
_ =
Logger.warn("""
The RePG2 server received an unexpected message:
handle_call(#{inspect(message)}, #{inspect(from)}, #{inspect(state)})
""")
{:noreply, state}
end
def handle_cast({:exchange, _node, all_memberships}, state) do
for {name, members} <- all_memberships,
Impl.assure_group(name),
member <- members -- Impl.group_members(name),
do: Impl.join_group(name, member)
{:noreply, state}
end
def handle_cast(_, state), do: {:noreply, state}
def handle_info({:DOWN, _ref, :process, pid, _info}, state) do
for name <- Impl.member_groups(pid),
membership <- Impl.memberships_in_group(pid, name),
do: Impl.leave_group(name, membership)
{:noreply, state}
end
def handle_info({:nodeup, new_node}, state) do
exchange_all_memberships(new_node)
{:noreply, state}
end
def handle_info({:new_repg2, new_node}, state) do
exchange_all_memberships(new_node)
{:noreply, state}
end
def handle_info(_, state), do: {:noreply, state}
defp exchange_all_memberships(node_name) do
all_memberships = for group <- Impl.all_groups(), do: {group, Impl.group_members(group)}
node_name
|> worker_for()
|> GenServer.cast({:exchange, Node.self(), all_memberships})
end
defp worker_for(node_name), do: {__MODULE__, node_name}
end
|
lib/repg2/worker.ex
| 0.697506 | 0.406685 |
worker.ex
|
starcoder
|
defmodule Exq.Stats.Server do
@moduledoc """
Stats process is responsible for recording all stats into Redis.
The stats format is compatible with the Sidekiq stats format, so that
The Sidekiq UI can be also used to view Exq status as well, and Exq
can run side by side with Sidekiq without breaking any of it's UI.
This includes job success/failure as well as in-progress jobs
"""
use GenServer
alias Exq.Redis.JobStat
alias Exq.Support.Config
alias Exq.Support.Process
alias Exq.Support.Time
alias Exq.Redis.Connection
require Logger
defmodule State do
defstruct redis: nil, queue: :queue.new
end
@doc """
Add in progress worker process
"""
def add_process(stats, namespace, worker, host, job_serialized) do
process_info = %Process{pid: worker,
host: host,
job: Exq.Support.Config.serializer.decode_job(job_serialized),
started_at: Time.unix_seconds}
serialized = Exq.Support.Process.encode(process_info)
GenServer.cast(stats, {:add_process, namespace, process_info, serialized})
{:ok, process_info}
end
@doc """
Remove in progress worker process
"""
def process_terminated(stats, namespace, process_info) do
serialized = Exq.Support.Process.encode(process_info)
GenServer.cast(stats, {:process_terminated, namespace, process_info, serialized})
:ok
end
@doc """
Record job as successfully processes
"""
def record_processed(stats, namespace, job) do
GenServer.cast(stats, {:record_processed, namespace, job})
:ok
end
@doc """
Record job as failed
"""
def record_failure(stats, namespace, error, job) do
GenServer.cast(stats, {:record_failure, namespace, error, job})
:ok
end
@doc """
Cleanup stats on boot. This includes cleaning up busy workers.
"""
def cleanup_host_stats(stats, namespace, host) do
GenServer.call(stats, {:cleanup_host_stats, namespace, host})
:ok
end
def server_name(name) do
name = name || Exq.Support.Config.get(:name)
"#{name}.Stats" |> String.to_atom
end
def force_flush(stats) do
GenServer.call(stats, :force_flush)
end
##===========================================================
## gen server callbacks
##===========================================================
def start_link(opts \\[]) do
GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name]))
end
def init(opts) do
Elixir.Process.flag(:trap_exit, true)
Elixir.Process.send_after(self(), :flush, Config.get(:stats_flush_interval))
{:ok, %State{redis: opts[:redis]}}
end
def handle_cast(msg, state) do
state = %{state | queue: :queue.in(msg, state.queue)}
{:noreply, state}
end
def handle_call(:force_flush, _from, state) do
queue = process_queue(state.queue, state, [])
state = %{state | queue: queue}
{:reply, :ok, state}
end
def handle_call({:cleanup_host_stats, namespace, host}, _from, state) do
try do
JobStat.cleanup_processes(state.redis, namespace, host)
rescue
e -> Logger.error("Error cleaning up processes - #{Kernel.inspect e}")
end
{:reply, :ok, state}
end
def handle_info(:flush, state) do
queue = process_queue(state.queue, state, [])
state = %{state | queue: queue}
Elixir.Process.send_after(self(), :flush, Config.get(:stats_flush_interval))
{:noreply, state}
end
def terminate(_reason, state) do
# flush any pending stats
process_queue(state.queue, state, [])
:ok
end
##===========================================================
## Methods
##===========================================================
def process_queue(queue, state, redis_batch, size \\ 0) do
case :queue.out(queue) do
{:empty, q} ->
if size > 0 do
Connection.qp!(state.redis, redis_batch)
end
q
{{:value, msg}, q} ->
if size < Config.get(:stats_batch_size) do
redis_batch = redis_batch ++ generate_instructions(msg)
process_queue(q, state, redis_batch, size + 1)
else
Connection.qp!(state.redis, redis_batch)
redis_batch = [] ++ generate_instructions(msg)
process_queue(q, state, redis_batch, 1)
end
end
end
def generate_instructions({:add_process, namespace, process_info, serialized}) do
JobStat.add_process_commands(namespace, process_info, serialized)
end
def generate_instructions({:record_processed, namespace, job}) do
JobStat.record_processed_commands(namespace, job)
end
def generate_instructions({:record_failure, namespace, error, job}) do
JobStat.record_failure_commands(namespace, error, job)
end
def generate_instructions({:process_terminated, namespace, process, serialized}) do
JobStat.remove_process_commands(namespace, process, serialized)
end
end
|
lib/exq/stats/server.ex
| 0.662469 | 0.408129 |
server.ex
|
starcoder
|
defmodule Grizzly.CommandClass.ScheduleEntryLock.YearDaySet do
@moduledoc """
Command for working with SCHEDULE_ENTRY_LOCK command class YEAR_DAY_SET command
Command Options:
* `:action` - The action either to erase or to modify the slot
* `:slot_id` - The schedule slot id
* `:user_id` - The schedule user id for the EntryLock
* `:start_year` - A value from 0 to 99 that represents the 2 year in the century
* `:start_month` - A value from 1 to 12 that represents the month in a year
* `:start_day` - A value from 1 to 31 that represents the date of the month
* `:start_hour` - A value from 0 to 23 representing the starting hour of the time fence
* `:start_minute` - A value from 0 to 59 representing the starting minute of the time fence
* `:stop_year` - A value from 0 to 99 that represents the 2 year in the century
* `:stop_month` - A value from 1 to 12 that represents the month in a year
* `:stop_day` - A value from 1 to 31 that represents the date of the month
* `:stop_hour` - A value from 0 to 23 representing the starting hour of the time fence
* `:stop_minute` - A value from 0 to 59 representing the starting minute of the time fence
* `:seq_number` - The sequence number used in the Z/IP packet
* `:retries` - The number of attempts to send the command (default 2)
"""
@behaviour Grizzly.Command
alias Grizzly.Packet
alias Grizzly.Command.{EncodeError, Encoding}
alias Grizzly.CommandClass.ScheduleEntryLock
@type t :: %__MODULE__{
seq_number: Grizzly.seq_number(),
retries: non_neg_integer(),
action: ScheduleEntryLock.enable_action(),
user_id: non_neg_integer,
slot_id: non_neg_integer,
start_year: non_neg_integer,
start_month: non_neg_integer,
start_day: non_neg_integer,
start_hour: non_neg_integer,
start_hour: non_neg_integer,
start_minute: non_neg_integer,
stop_year: non_neg_integer,
stop_month: non_neg_integer,
stop_day: non_neg_integer,
stop_hour: non_neg_integer,
stop_hour: non_neg_integer,
stop_minute: non_neg_integer
}
@type opt ::
{:seq_number, Grizzly.seq_number()}
| {:retries, non_neg_integer()}
| {:user_id, non_neg_integer()}
| {:slot_id, non_neg_integer()}
| {:action, ScheduleEntryLock.enable_action()}
| {:start_year, non_neg_integer()}
| {:start_month, non_neg_integer()}
| {:start_hour, non_neg_integer()}
| {:start_minute, non_neg_integer()}
| {:stop_year, non_neg_integer()}
| {:stop_month, non_neg_integer()}
| {:stop_hour, non_neg_integer()}
| {:stop_minute, non_neg_integer()}
defstruct seq_number: nil,
retries: 2,
user_id: nil,
slot_id: nil,
action: nil,
start_year: nil,
start_month: nil,
start_day: nil,
start_hour: nil,
start_minute: nil,
stop_year: nil,
stop_month: nil,
stop_day: nil,
stop_hour: nil,
stop_minute: nil
@spec init([opt]) :: {:ok, t}
def init(opts) do
{:ok, struct(__MODULE__, opts)}
end
@spec encode(t) :: {:ok, binary} | {:error, EncodeError.t()}
def encode(
%__MODULE__{
user_id: user_id,
slot_id: slot_id,
action: _action,
start_year: _start_year,
start_month: start_month,
start_day: start_day,
start_hour: start_hour,
start_minute: start_minute,
stop_year: _stop_year,
stop_month: stop_month,
stop_day: stop_day,
stop_hour: stop_hour,
stop_minute: stop_minute,
seq_number: seq_number
} = command
) do
with {:ok, encoded} <-
Encoding.encode_and_validate_args(command, %{
user_id: :byte,
slot_id: :byte,
action: {:encode_with, ScheduleEntryLock, :encode_enable_action},
start_year: {:encode_with, ScheduleEntryLock, :encode_year},
start_month: {:range, 1, 12},
start_day: {:range, 1, 31},
start_hour: {:range, 0, 23},
start_minute: {:range, 0, 59},
stop_year: {:encode_with, ScheduleEntryLock, :encode_year},
stop_month: {:range, 1, 12},
stop_day: {:range, 1, 31},
stop_hour: {:range, 0, 23},
stop_minute: {:range, 0, 59}
}) do
binary =
Packet.header(seq_number) <>
<<
0x4E,
0x06,
encoded.action::size(8),
user_id,
slot_id,
encoded.start_year,
start_month,
start_day,
start_hour,
start_minute,
encoded.stop_year,
stop_month,
stop_day,
stop_hour,
stop_minute
>>
{:ok, binary}
end
end
@spec handle_response(t, Packet.t()) ::
{:continue, t}
| {:done, {:error, :nack_response}}
| {:done, :ok}
| {:retry, t}
def handle_response(
%__MODULE__{seq_number: seq_number} = _command,
%Packet{
seq_number: seq_number,
types: [:ack_response]
}
) do
{:done, :ok}
end
def handle_response(
%__MODULE__{seq_number: seq_number, retries: 0},
%Packet{
seq_number: seq_number,
types: [:nack_response]
}
) do
{:done, {:error, :nack_response}}
end
def handle_response(
%__MODULE__{seq_number: seq_number, retries: n} = command,
%Packet{
seq_number: seq_number,
types: [:nack_response]
}
) do
{:retry, %{command | retries: n - 1}}
end
def handle_response(
_,
%Packet{
body: %{
command_class: :schedule_entry_lock,
command: :year_day_report,
value: report
}
}
) do
{:done, {:ok, report}}
end
def handle_response(
%__MODULE__{seq_number: seq_number} = command,
%Packet{
seq_number: seq_number,
types: [:nack_response, :nack_waiting]
} = packet
) do
if Packet.sleeping_delay?(packet) do
{:queued, command}
else
{:continue, command}
end
end
def handle_response(command, _), do: {:continue, command}
end
|
lib/grizzly/command_class/schedule_entry_lock/year_day_set.ex
| 0.857037 | 0.457561 |
year_day_set.ex
|
starcoder
|
defmodule Zaryn.TransactionChain.Transaction.ValidationStamp do
@moduledoc """
Represents a validation stamp created by a coordinator on a pending transaction
"""
alias Zaryn.Crypto
alias __MODULE__.LedgerOperations
defstruct [
:timestamp,
:signature,
:proof_of_work,
:proof_of_integrity,
:proof_of_election,
ledger_operations: %LedgerOperations{},
recipients: [],
errors: []
]
@type error :: :contract_validation | :oracle_validation
@typedoc """
Validation performed by a coordinator:
- Timestamp: DateTime instance representing the timestamp of the transaction validation
- Proof of work: Origin public key matching the origin signature
- Proof of integrity: Integrity proof from the entire transaction chain
- Proof of election: Digest which define the election's order of validation nodes
- Ledger Operations: Set of ledger operations taken by the network such as fee, node movements, transaction movements and unspent outputs
- Recipients: List of the last smart contract chain resolved addresses
- Contract validation: Determine if the transaction coming from a contract is valid according to the constraints
- Signature: generated from the coordinator private key to avoid non-repudiation of the stamp
- Errors: list of errors returned by the pending transaction validation or after mining context
"""
@type t :: %__MODULE__{
timestamp: DateTime.t(),
signature: nil | binary(),
proof_of_work: Crypto.key(),
proof_of_integrity: Crypto.versioned_hash(),
proof_of_election: binary(),
ledger_operations: LedgerOperations.t(),
recipients: list(Crypto.versioned_hash()),
errors: list(atom())
}
@spec sign(__MODULE__.t()) :: __MODULE__.t()
def sign(stamp = %__MODULE__{}) do
sig =
stamp
|> extract_for_signature()
|> serialize()
|> Crypto.sign_with_last_node_key()
%{stamp | signature: sig}
end
@doc """
Extract fields to prepare serialization for the signature
"""
@spec extract_for_signature(__MODULE__.t()) :: __MODULE__.t()
def extract_for_signature(%__MODULE__{
timestamp: timestamp,
proof_of_work: pow,
proof_of_integrity: poi,
proof_of_election: poe,
ledger_operations: ops,
recipients: recipients,
errors: errors
}) do
%__MODULE__{
timestamp: timestamp,
proof_of_work: pow,
proof_of_integrity: poi,
proof_of_election: poe,
ledger_operations: ops,
recipients: recipients,
errors: errors
}
end
@doc """
Serialize a validation stamp info binary format
## Examples
iex> %ValidationStamp{
...> timestamp: ~U[2021-05-07 13:11:19Z],
...> proof_of_work: <<0, 0, 34, 248, 200, 166, 69, 102, 246, 46, 84, 7, 6, 84, 66, 27, 8, 78, 103, 37,
...> 155, 114, 208, 205, 40, 44, 6, 159, 178, 5, 186, 168, 237, 206>>,
...> proof_of_integrity: <<0, 49, 174, 251, 208, 41, 135, 147, 199, 114, 232, 140, 254, 103, 186, 138, 175,
...> 28, 156, 201, 30, 100, 75, 172, 95, 135, 167, 180, 242, 16, 74, 87, 170>>,
...> proof_of_election: <<195, 51, 61, 55, 140, 12, 138, 246, 249, 106, 198, 175, 145, 9, 255, 133, 67,
...> 240, 175, 53, 236, 65, 151, 191, 128, 11, 58, 103, 82, 6, 218, 31, 220, 114,
...> 65, 3, 151, 209, 9, 84, 209, 105, 191, 180, 156, 157, 95, 25, 202, 2, 169,
...> 112, 109, 54, 99, 40, 47, 96, 93, 33, 82, 40, 100, 13>>,
...> ledger_operations: %LedgerOperations{
...> fee: 0.1,
...> transaction_movements: [],
...> node_movements: [],
...> unspent_outputs: []
...> },
...> signature: <<67, 12, 4, 246, 155, 34, 32, 108, 195, 54, 139, 8, 77, 152, 5, 55, 233, 217,
...> 126, 181, 204, 195, 215, 239, 124, 186, 99, 187, 251, 243, 201, 6, 122, 65,
...> 238, 221, 14, 89, 120, 225, 39, 33, 95, 95, 225, 113, 143, 200, 47, 96, 239,
...> 66, 182, 168, 35, 129, 240, 35, 183, 47, 69, 154, 37, 172>>
...> }
...> |> ValidationStamp.serialize()
<<
# Timestamp
96, 149, 60, 119,
# Proof of work
0, 0, 34, 248, 200, 166, 69, 102, 246, 46, 84, 7, 6, 84, 66, 27, 8, 78, 103, 37,
155, 114, 208, 205, 40, 44, 6, 159, 178, 5, 186, 168, 237, 206,
# Proof of integrity
0, 49, 174, 251, 208, 41, 135, 147, 199, 114, 232, 140, 254, 103, 186, 138, 175,
28, 156, 201, 30, 100, 75, 172, 95, 135, 167, 180, 242, 16, 74, 87, 170,
# Proof of election
195, 51, 61, 55, 140, 12, 138, 246, 249, 106, 198, 175, 145, 9, 255, 133, 67,
240, 175, 53, 236, 65, 151, 191, 128, 11, 58, 103, 82, 6, 218, 31, 220, 114,
65, 3, 151, 209, 9, 84, 209, 105, 191, 180, 156, 157, 95, 25, 202, 2, 169,
112, 109, 54, 99, 40, 47, 96, 93, 33, 82, 40, 100, 13,
# Fee
63, 185, 153, 153, 153, 153, 153, 154,
# Nb of transaction movements
0,
# Nb of node movements
0,
# Nb of unspent outputs
0,
# Nb of resolved recipients addresses
0,
# Nb errors reported
0,
# Signature size
64,
# Signature
67, 12, 4, 246, 155, 34, 32, 108, 195, 54, 139, 8, 77, 152, 5, 55, 233, 217,
126, 181, 204, 195, 215, 239, 124, 186, 99, 187, 251, 243, 201, 6, 122, 65,
238, 221, 14, 89, 120, 225, 39, 33, 95, 95, 225, 113, 143, 200, 47, 96, 239,
66, 182, 168, 35, 129, 240, 35, 183, 47, 69, 154, 37, 172
>>
"""
@spec serialize(__MODULE__.t()) :: bitstring()
def serialize(%__MODULE__{
timestamp: timestamp,
proof_of_work: pow,
proof_of_integrity: poi,
proof_of_election: poe,
ledger_operations: ledger_operations,
recipients: recipients,
errors: errors,
signature: nil
}) do
pow =
if pow == "" do
# Empty public key if the no public key matching the origin signature
<<fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0::256>>
else
pow
end
<<DateTime.to_unix(timestamp)::32, pow::binary, poi::binary, poe::binary,
LedgerOperations.serialize(ledger_operations)::binary, length(recipients)::8,
:erlang.list_to_binary(recipients)::binary, length(errors)::8,
serialize_errors(errors)::bitstring>>
end
def serialize(%__MODULE__{
timestamp: timestamp,
proof_of_work: pow,
proof_of_integrity: poi,
proof_of_election: poe,
ledger_operations: ledger_operations,
recipients: recipients,
errors: errors,
signature: signature
}) do
pow =
if pow == "" do
# Empty public key if the no public key matching the origin signature
<<fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0::256>>
else
pow
end
<<DateTime.to_unix(timestamp)::32, pow::binary, poi::binary, poe::binary,
LedgerOperations.serialize(ledger_operations)::binary, length(recipients)::8,
:erlang.list_to_binary(recipients)::binary, length(errors)::8,
serialize_errors(errors)::bitstring, byte_size(signature)::8, signature::binary>>
end
@doc """
Deserialize an encoded validation stamp
## Examples
iex> <<96, 149, 60, 119, 0, 0, 34, 248, 200, 166, 69, 102, 246, 46, 84, 7, 6, 84, 66, 27, 8, 78, 103, 37,
...> 155, 114, 208, 205, 40, 44, 6, 159, 178, 5, 186, 168, 237, 206,
...> 0, 49, 174, 251, 208, 41, 135, 147, 199, 114, 232, 140, 254, 103, 186, 138, 175,
...> 28, 156, 201, 30, 100, 75, 172, 95, 135, 167, 180, 242, 16, 74, 87, 170,
...> 195, 51, 61, 55, 140, 12, 138, 246, 249, 106, 198, 175, 145, 9, 255, 133, 67,
...> 240, 175, 53, 236, 65, 151, 191, 128, 11, 58, 103, 82, 6, 218, 31, 220, 114,
...> 65, 3, 151, 209, 9, 84, 209, 105, 191, 180, 156, 157, 95, 25, 202, 2, 169,
...> 112, 109, 54, 99, 40, 47, 96, 93, 33, 82, 40, 100, 13,
...> 63, 185, 153, 153, 153, 153, 153, 154, 0, 0, 0, 0, 0, 64,
...> 67, 12, 4, 246, 155, 34, 32, 108, 195, 54, 139, 8, 77, 152, 5, 55, 233, 217,
...> 126, 181, 204, 195, 215, 239, 124, 186, 99, 187, 251, 243, 201, 6, 122, 65,
...> 238, 221, 14, 89, 120, 225, 39, 33, 95, 95, 225, 113, 143, 200, 47, 96, 239,
...> 66, 182, 168, 35, 129, 240, 35, 183, 47, 69, 154, 37, 172>>
...> |> ValidationStamp.deserialize()
{
%ValidationStamp{
timestamp: ~U[2021-05-07 13:11:19Z],
proof_of_work: <<0, 0, 34, 248, 200, 166, 69, 102, 246, 46, 84, 7, 6, 84, 66, 27, 8, 78, 103, 37,
155, 114, 208, 205, 40, 44, 6, 159, 178, 5, 186, 168, 237, 206,>>,
proof_of_integrity: << 0, 49, 174, 251, 208, 41, 135, 147, 199, 114, 232, 140, 254, 103, 186, 138, 175,
28, 156, 201, 30, 100, 75, 172, 95, 135, 167, 180, 242, 16, 74, 87, 170>>,
proof_of_election: <<195, 51, 61, 55, 140, 12, 138, 246, 249, 106, 198, 175, 145, 9, 255, 133, 67,
240, 175, 53, 236, 65, 151, 191, 128, 11, 58, 103, 82, 6, 218, 31, 220, 114,
65, 3, 151, 209, 9, 84, 209, 105, 191, 180, 156, 157, 95, 25, 202, 2, 169,
112, 109, 54, 99, 40, 47, 96, 93, 33, 82, 40, 100, 13>>,
ledger_operations: %ValidationStamp.LedgerOperations{
fee: 0.1,
transaction_movements: [],
node_movements: [],
unspent_outputs: []
},
recipients: [],
errors: [],
signature: <<67, 12, 4, 246, 155, 34, 32, 108, 195, 54, 139, 8, 77, 152, 5, 55, 233, 217,
126, 181, 204, 195, 215, 239, 124, 186, 99, 187, 251, 243, 201, 6, 122, 65,
238, 221, 14, 89, 120, 225, 39, 33, 95, 95, 225, 113, 143, 200, 47, 96, 239,
66, 182, 168, 35, 129, 240, 35, 183, 47, 69, 154, 37, 172>>
},
""
}
"""
def deserialize(<<timestamp::32, rest::bitstring>>) do
<<pow_curve_id::8, pow_origin_id::8, rest::bitstring>> = rest
pow_key_size = Crypto.key_size(pow_curve_id)
<<pow_key::binary-size(pow_key_size), rest::bitstring>> = rest
pow = <<pow_curve_id::8, pow_origin_id::8, pow_key::binary>>
<<poi_hash_id::8, rest::bitstring>> = rest
poi_hash_size = Crypto.hash_size(poi_hash_id)
<<poi_hash::binary-size(poi_hash_size), poe::binary-size(64), rest::bitstring>> = rest
{ledger_ops, <<recipients_length::8, rest::bitstring>>} = LedgerOperations.deserialize(rest)
{recipients, <<nb_errors::8, rest::bitstring>>} =
deserialize_list_of_recipients_addresses(rest, recipients_length, [])
{errors, rest} = deserialize_errors(rest, nb_errors)
<<signature_size::8, signature::binary-size(signature_size), rest::bitstring>> = rest
{
%__MODULE__{
timestamp: DateTime.from_unix!(timestamp),
proof_of_work: pow,
proof_of_integrity: <<poi_hash_id::8, poi_hash::binary>>,
proof_of_election: poe,
ledger_operations: ledger_ops,
recipients: recipients,
errors: errors,
signature: signature
},
rest
}
end
@spec from_map(map()) :: __MODULE__.t()
def from_map(stamp = %{}) do
%__MODULE__{
timestamp: Map.get(stamp, :timestamp),
proof_of_work: Map.get(stamp, :proof_of_work),
proof_of_integrity: Map.get(stamp, :proof_of_integrity),
proof_of_election: Map.get(stamp, :proof_of_election),
ledger_operations:
Map.get(stamp, :ledger_operations, %LedgerOperations{}) |> LedgerOperations.from_map(),
recipients: Map.get(stamp, :recipients, []),
signature: Map.get(stamp, :signature),
errors: Map.get(stamp, :errors, [])
}
end
def from_map(nil), do: nil
@spec to_map(__MODULE__.t()) :: map()
def to_map(%__MODULE__{
timestamp: timestamp,
proof_of_work: pow,
proof_of_integrity: poi,
proof_of_election: poe,
ledger_operations: ledger_operations,
recipients: recipients,
signature: signature,
errors: errors
}) do
%{
timestamp: timestamp,
proof_of_work: pow,
proof_of_integrity: poi,
proof_of_election: poe,
ledger_operations: LedgerOperations.to_map(ledger_operations),
recipients: recipients,
signature: signature,
errors: errors
}
end
def to_map(nil), do: nil
@doc """
Determine if the validation stamp signature is valid
"""
@spec valid_signature?(__MODULE__.t(), Crypto.key()) :: boolean()
def valid_signature?(%__MODULE__{signature: nil}, _public_key), do: false
def valid_signature?(stamp = %__MODULE__{signature: signature}, public_key)
when is_binary(signature) do
raw_stamp =
stamp
|> extract_for_signature
|> serialize
Crypto.verify?(signature, raw_stamp, public_key)
end
defp deserialize_list_of_recipients_addresses(rest, 0, _acc), do: {[], rest}
defp deserialize_list_of_recipients_addresses(rest, nb_recipients, acc)
when length(acc) == nb_recipients do
{Enum.reverse(acc), rest}
end
defp deserialize_list_of_recipients_addresses(
<<hash_id::8, rest::bitstring>>,
nb_recipients,
acc
) do
hash_size = Crypto.hash_size(hash_id)
<<hash::binary-size(hash_size), rest::bitstring>> = rest
deserialize_list_of_recipients_addresses(rest, nb_recipients, [
<<hash_id::8, hash::binary>> | acc
])
end
defp serialize_errors(errors, acc \\ [])
defp serialize_errors([], acc), do: :erlang.list_to_bitstring(acc)
defp serialize_errors([error | rest], acc) do
serialize_errors(rest, [serialize_error(error) | acc])
end
defp deserialize_errors(bitstring, nb_errors, acc \\ [])
defp deserialize_errors(rest, nb_errors, acc) when length(acc) == nb_errors do
{Enum.reverse(acc), rest}
end
defp deserialize_errors(<<error::8, rest::bitstring>>, nb_errors, acc) do
deserialize_errors(rest, nb_errors, [deserialize_error(error) | acc])
end
defp serialize_error(:pending_transaction), do: 0
defp serialize_error(:contract_validation), do: 1
defp serialize_error(:oracle_validation), do: 2
defp deserialize_error(0), do: :pending_transaction
defp deserialize_error(1), do: :contract_validation
defp deserialize_error(2), do: :oracle_validation
end
|
lib/zaryn/transaction_chain/transaction/validation_stamp.ex
| 0.900075 | 0.435061 |
validation_stamp.ex
|
starcoder
|
defmodule LearnKit.Preprocessing do
@moduledoc """
Module for data preprocessing
"""
alias LearnKit.{Preprocessing, Math}
use Preprocessing.Normalize
@type row :: [number]
@type matrix :: [row]
@doc """
Normalize data set with minimax normalization
## Parameters
- features: list of features for normalization
## Examples
iex> LearnKit.Preprocessing.normalize([[1, 2], [3, 4], [5, 6]])
[
[0.0, 0.0],
[0.5, 0.5],
[1.0, 1.0]
]
"""
@spec normalize(matrix) :: matrix
def normalize(features) when is_list(features), do: normalize(features, [type: "minimax"])
@doc """
Normalize data set
## Parameters
- features: list of features for normalization
- options: keyword list with options
## Options
- type: minimax/z_normalization, default is minimax, optional
## Examples
iex> LearnKit.Preprocessing.normalize([[1, 2], [3, 4], [5, 6]], [type: "z_normalization"])
[
[-1.224744871391589, -1.224744871391589],
[0.0, 0.0],
[1.224744871391589, 1.224744871391589]
]
"""
@spec normalize(matrix, list) :: matrix
def normalize(features, options) when is_list(features) and is_list(options) do
options = Keyword.merge([type: "minimax"], options)
case options[:type] do
"z_normalization" -> normalization(features, "z_normalization")
_ -> normalization(features, "minimax")
end
end
@doc """
Prepare coefficients for normalization
## Parameters
- features: features grouped by index
- type: minimax/z_normalization
## Examples
iex> LearnKit.Preprocessing.coefficients([[1, 2], [3, 4], [5, 6]], "minimax")
[{1, 5}, {2, 6}]
iex> LearnKit.Preprocessing.coefficients([[1, 2], [3, 4], [5, 6]], "z_normalization")
[{3.0, 1.632993161855452}, {4.0, 1.632993161855452}]
"""
@spec coefficients(matrix, String.t()) :: matrix
def coefficients(features, type) when is_list(features) and is_binary(type) do
features
|> Math.transpose()
|> Enum.map(fn list -> return_params(list, type) end)
end
@doc """
Normalize 1 feature with predefined coefficients
## Parameters
- feature: feature for normalization
- coefficients: predefined coefficients
- type: minimax/z_normalization
## Examples
iex> LearnKit.Preprocessing.normalize_feature([1, 2], [{1, 5}, {2, 6}], "minimax")
[0.0, 0.0]
"""
@spec normalize_feature(list, list(tuple), String.t()) :: list
def normalize_feature(feature, coefficients, type) when is_list(feature) and is_list(coefficients) and is_binary(type) do
Enum.zip(feature, coefficients)
|> Enum.map(fn {point, params_for_point} ->
divider = define_divider(params_for_point, type)
case divider do
0 -> point
_ -> (point - elem(params_for_point, 0)) / divider
end
end)
end
end
|
lib/learn_kit/preprocessing.ex
| 0.870405 | 0.778818 |
preprocessing.ex
|
starcoder
|
defmodule Base2 do
@moduledoc """
This module provides data encoding and decoding functions for Base2.
## Overview
Converting to and from Base2 is very simple in Elixir and Erlang. Unfortunately, most approaches use generic methodologies that are suitable for any Base, and thus do not optimize for any Base typically.
Working with Base2 is a relatively simple task chaining a few Elixir built-in functions or using a third-party generic "BaseX" type library, but most of these implementations leave a lot to be desired. Generally, most built-in methods and third-party libraries are often not very optimized. Using built-in functions also is not uniform with other ways of handling Bases such as via the Elixir `Base` module. Most of these methods are great for scratch work, but less suitable for bulk encoding and decoding. Further, the multiple ways of approaching different bases lead to very inconsistent interfaces, for instance `Integer.to_string()` vs. `Base` vs a third-party module with its own conventions.
`Base2` includes the following functionality:
* Encodes and Decodes binaries as Base2.
* Consistent with the `Base` module interface design.
* Optionally preserves transparency encoding and decoding leading zeroes.
* Reasonably fast, because every library claims it is fast.
* Faster than libraries that take generic approaches to encode any Base either via module generation or runtime alphabets
* Faster than doing `:binary.encode_unsigned("hello") |> Integer.to_string(2)`
* Faster than methods using power-based functions that can also overflow when using the BIF
* Avoids the div/rem operation that is the source of many slowdowns, such as in `Integer.to_string/2` and its wrapped `:erlang.integer_to_binary/2`
* Uses less memory than most other methods
* Option to remove extra padding if losing losing leading zeroes is desirable or wanted to produce smaller resulting binaries
* Loses transparency
* Control over shape of output binary
* Force padding always, only for leading zeroes, or return a smaller binary representation
## Padding
`Base2` allows full control over padding behavior when encoding a binary into a Base2 string.
There are three options for padding:
* `:zeroes` (default) - Allows zeroes to be padded to ensure transparent decode of leading zeroes.
* `:all` - Uniformly pads the data with zeroes. String length will always a multiple of 8 and fast, but at the cost of an increased output size.
* `:none` - Produces a smaller representation by dropping all leading zeroes, but at the cost of fully transparent decode if there are leading zeroes.
`:zeroes` is good for general usage, while typically being smaller. Zeroes are fully padded to byte boundaries. Transparency is fully preserved.
`:all` is good for uniformly working with Base2 string output. Transparency is fully preserved.
`:none` exhibit the same behavior as methods such as `Integer.to_string("hello", 2)` that try to use a smaller representation. This method should be used with caution as it comes at a cost of transparency. Simply, if you want the exact same output when decoding as your input, do not use this option. If, however, you want the smallest representation, then it is a good choice.
"""
@typedoc """
A Base2 encoded string.
"""
@type base2_binary() :: binary()
@doc """
Encodes a binary string into a base 2 encoded string.
Accepts a `:padding` option which will control the padding behavior of the resulting strings.
The options for `:padding` can be:
* `:zeroes` (default) - Allows zeroes to be padded to ensure transparent decode of leading zeroes.
* `:all` - Uniformly pads the data with zeroes. String length will always a multiple of 8 and fast, but at the cost of an increased output size.
* `:none` - Produces a smaller representation by dropping all leading zeroes, but at the cost of fully transparent decode if there are leading zeroes.
## Examples
iex> Base2.encode2("hello world")
"110100001100101011011000110110001101111001000000111011101101111011100100110110001100100"
iex> Base2.encode2(<<0, 1>>, padding: :zeroes)
"0000000000000001"
iex> Base2.encode2(<<0, 1>>, padding: :none)
"1"
iex> Base2.encode2(<<0, 1>>, padding: :all)
"0000000000000001"
iex> Base2.encode2(<<1>>, padding: :all)
"00000001"
"""
@spec encode2(binary(), keyword()) :: base2_binary()
def encode2(binary, opts \\ [])
def encode2(binary, opts) when is_binary(binary) do
pad_type = Keyword.get(opts, :padding, :zeroes)
do_encode(binary, pad_type)
end
@doc """
Decodes a base 2 encoded string as a binary string.
An ArgumentError is raised if the string is not a base 2 encoded string.
## Examples
iex> Base2.decode2!("110100001100101011011000110110001101111001000000111011101101111011100100110110001100100")
"hello world"
iex> Base2.decode2!("1")
<<1>>
iex> Base2.decode2!("0000000000000001")
<<0, 1>>
iex> Base2.decode2!("00000001")
<<1>>
"""
@spec decode2!(base2_binary()) :: binary()
def decode2!(string) when is_binary(string) do
do_decode(string)
end
@doc """
Decodes a base 2 encoded string as a binary string.
Returns `:error` if the string is not a base 2 encoded string.
## Examples
iex> Base2.decode2("110100001100101011011000110110001101111001000000111011101101111011100100110110001100100")
{:ok, "hello world"}
iex> Base2.decode2("1")
{:ok, <<1>>}
iex> Base2.decode2("0000000000000001")
{:ok, <<0, 1>>}
iex> Base2.decode2("00000001")
{:ok, <<1>>}
iex> Base2.decode2("hello world")
:error
iex> Base2.decode2("00101015")
:error
"""
@spec decode2(base2_binary()) :: {:ok, binary()} | :error
def decode2(string) when is_binary(string) do
{:ok, decode2!(string)}
rescue
ArgumentError -> :error
end
#===============================================================================
# Private Island
#===============================================================================
defp do_encode(data, :none) do
trim_encode(data, false)
end
defp do_encode(data, :zeroes) do
trim_encode(data, true)
end
defp do_encode(data, :all) do
encode_body(data, [])
end
defp trim_encode(<<>>, _pad?) do
<<>>
end
defp trim_encode(<<0>>, _pad?) do
"0"
end
defp trim_encode(data, pad?) do
trim_encode(data, [], pad?)
end
defp trim_encode(<<0, rest::binary>>, _acc, true) do
# here we could try to make a more compact representation, but it complicates decoding so not sure if it's worth it
encode_body(rest, '00000000')
end
defp trim_encode(<<1::1, rest::bitstring>>, _acc, _pad?) do
encode_body(rest, ['1'])
end
defp trim_encode(<<0::1, rest::bitstring>>, acc, pad?) do
trim_encode(rest, acc, pad?)
end
defp trim_encode(<<>>, [], _pad?) do
<<>>
end
defp encode_body(<<>>, acc) do
acc |> Enum.reverse() |> to_string()
end
defp encode_body(<<1::1, rest::bitstring>>, acc) do
encode_body(rest, ['1' | acc])
end
defp encode_body(<<0::1, rest::bitstring>>, acc) do
encode_body(rest, ['0' | acc])
end
defp count_leading_zeroes(<<"00000000", rest::binary>>, acc) do
# This call here could be tweaked with another few pattern matches if we want a smaller representation for the case of `:zeroes` padding, however experimenting, the decoding speed was a bit less
count_leading_zeroes(rest, acc + 1)
end
defp count_leading_zeroes(_string, acc) do
acc
end
defp do_decode(string) do
# Using String.to_integer + encode_unsigned is much faster than manual recursive decode to build individual bits
leading_zeroes = count_leading_zeroes(string, 0)
<<0::size(leading_zeroes)-unit(8), (String.to_integer(string, 2) |> :binary.encode_unsigned())::binary>>
end
end
|
lib/base2.ex
| 0.936263 | 0.861596 |
base2.ex
|
starcoder
|
defmodule Sippet.Message.StatusLine do
@moduledoc """
A SIP Status-Line struct, composed by the SIP-Version, Status-Code and the
Reason-Phrase.
The `start_line` of responses are represented by this struct. The RFC 3261
represents the Status-Line as:
Status-Line = SIP-Version SP Status-Code SP Reason-Phrase CRLF
The above `SIP-Version` is represented by a `{major, minor}` tuple, which
assumes the value `{2, 0}` in standard implementations.
The `Status-Code` is a 3-digit integer in the interval 100-699 indicating the
outcome of an attempt to understand and satisfy a request.
The `Reason-Phrase` is a binary representing a short textual description of
the `Status-Code`.
The `Status-Code` is intended for use by automata, whereas the
`Reason-Phrase` is intended for the human user.
"""
defstruct [
status_code: nil,
reason_phrase: nil,
version: nil
]
@type status_code :: 100..699
@type version :: {integer, integer}
@type t :: %__MODULE__{
status_code: status_code,
reason_phrase: binary,
version: version
}
@default_status_codes %{
100 => "Trying",
180 => "Ringing",
181 => "Call Is Being Forwarded",
182 => "Queued",
183 => "Session Progress",
199 => "Early Dialog Terminated",
200 => "OK",
202 => "Accepted",
204 => "No Notification",
300 => "Multiple Choices",
301 => "Moved Permanently",
302 => "Moved Temporarily",
305 => "Use Proxy",
380 => "Alternative Service",
400 => "Bad Request",
401 => "Unauthorized",
402 => "Payment Required",
403 => "Forbidden",
404 => "Not Found",
405 => "Method Not Allowed",
406 => "Not Acceptable",
407 => "Proxy Authentication Required",
408 => "Request Timeout",
410 => "Gone",
412 => "Conditional Request Failed",
413 => "Request Entity Too Large",
414 => "Request-URI Too Long",
415 => "Unsupported Media Type",
416 => "Unsupported URI Scheme",
417 => "Unknown Resource-Priority",
420 => "Bad Extension",
421 => "Extension Required",
422 => "Session Interval Too Small",
423 => "Interval Too Brief",
424 => "Bad Location Information",
428 => "Use Identity Header",
429 => "Provide Referrer Identity",
430 => "Flow Failed",
433 => "Anonymity Disallowed",
436 => "Bad Identity-Info",
437 => "Unsupported Certificate",
438 => "Invalid Identity Header",
439 => "First Hop Lacks Outbound Support",
440 => "Max-Breadth Exceeded",
469 => "Bad Info Package",
470 => "Consent Needed",
480 => "Temporarily Unavailable",
481 => "Call/Transaction Does Not Exist",
482 => "Loop Detected",
483 => "Too Many Hops",
484 => "Address Incomplete",
485 => "Ambiguous",
486 => "Busy Here",
487 => "Request Terminated",
488 => "Not Acceptable Here",
489 => "Bad Event",
491 => "Request Pending",
493 => "Undecipherable",
494 => "Security Agreement Required",
500 => "Server Internal Error",
501 => "Not Implemented",
502 => "Bad Gateway",
503 => "Service Unavailable",
504 => "Server Time-out",
505 => "Version Not Supported",
513 => "Message Too Large",
580 => "Precondition Failure",
600 => "Busy Everywhere",
603 => "Decline",
604 => "Does Not Exist Anywhere",
606 => "Not Acceptable"
}
@doc """
Returns a Status-Line struct.
The `reason_phrase` is obtained from default values.
The function will throw an exception if the `status_code` is not in the valid
range `100..699` or if the `status_code` does not have a default reason
phrase.
The version will assume the default value `{2, 0}`.
## Examples
iex> Sippet.Message.StatusLine.new(400)
%Sippet.Message.StatusLine{reason_phrase: "Bad Request", status_code: 400,
version: {2, 0}}
"""
@spec new(status_code) :: t | no_return
def new(status_code) when is_integer(status_code),
do: new(status_code, default_reason!(status_code))
@doc """
Creates a Status-Line struct using a given reason phrase.
In this function, the `reason_phrase` can be anything the application wants.
The function will throw an exception if the `status_code` is not in the valid
range `100..699`.
The version will assume the default value `{2, 0}`.
## Examples
iex> Sippet.Message.StatusLine.new(499, "Foobar")
%Sippet.Message.StatusLine{reason_phrase: "Foobar", status_code: 499,
version: {2, 0}}
"""
@spec new(status_code, reason_phrase :: binary) :: t
def new(status_code, reason_phrase)
when is_integer(status_code) and is_binary(reason_phrase) do
%__MODULE__{
status_code: do_raise_if_invalid(status_code),
reason_phrase: reason_phrase,
version: {2, 0}
}
end
defp do_raise_if_invalid(status_code) do
if status_code < 100 || status_code >= 700 do
raise "invalid status code, got: #{inspect(status_code)}"
else
status_code
end
end
@doc """
Returns an integer representing the status code class in the range `[1, 6]`.
## Examples
iex> alias Sippet.Message.StatusLine
iex> StatusLine.new(202) |> StatusLine.status_code_class()
2
"""
@spec status_code_class(t) :: 1..6
def status_code_class(%__MODULE__{status_code: status_code}) do
div(status_code, 100)
end
@doc """
Returns a binary representing the default reason phrase for the given
`status_code`.
If the `status_code` does not have a corresponding default reason phrase,
returns `nil`.
## Examples
iex> Sippet.Message.StatusLine.default_reason(202)
"Accepted"
iex> Sippet.Message.StatusLine.default_reason(499)
nil
"""
@spec default_reason(status_code) :: binary | nil
def default_reason(status_code) do
defaults = @default_status_codes
if defaults |> Map.has_key?(status_code) do
defaults[status_code]
else
nil
end
end
@doc """
Returns a binary representing the default reason phrase for the given
`status_code`.
If the `status_code` does not have a corresponding default reason phrase,
throws an exception.
## Examples
iex> Sippet.Message.StatusLine.default_reason!(202)
"Accepted"
iex> Sippet.Message.StatusLine.default_reason!(499)
** (ArgumentError) status code 499 does not have a default reason phrase
"""
@spec default_reason!(status_code) :: binary | no_return
def default_reason!(status_code) do
case status_code |> do_raise_if_invalid() |> default_reason() do
nil ->
raise ArgumentError, "status code #{inspect status_code} " <>
"does not have a default reason phrase"
reason_phrase ->
reason_phrase
end
end
@doc """
Returns a binary which corresponds to the text representation of the given
Status-Line.
It does not includes an ending line CRLF.
## Examples
iex> alias Sippet.StatusLine
iex> StatusLine.new(202) |> StatusLine.to_string
"SIP/2.0 202 Accepted"
"""
@spec to_string(t) :: binary
defdelegate to_string(value), to: String.Chars.Sippet.Message.StatusLine
@doc """
Returns an iodata which corresponds to the text representation of the given
Status-Line.
It does not includes an ending line CRLF.
## Examples
iex> alias Sippet.StatusLine
iex> StatusLine.new(202) |> StatusLine.to_iodata
["SIP/", "2", ".", "0", " ", "202", " ", "Accepted"]
"""
@spec to_iodata(t) :: iodata
def to_iodata(%Sippet.Message.StatusLine{version: {major, minor},
status_code: status_code, reason_phrase: reason_phrase}) do
["SIP/", Integer.to_string(major), ".", Integer.to_string(minor),
" ", Integer.to_string(status_code),
" ", reason_phrase]
end
end
defimpl String.Chars, for: Sippet.Message.StatusLine do
alias Sippet.Message.StatusLine, as: StatusLine
def to_string(%StatusLine{} = status_line) do
status_line
|> StatusLine.to_iodata()
|> IO.iodata_to_binary
end
end
|
lib/sippet/message/status_line.ex
| 0.89568 | 0.50354 |
status_line.ex
|
starcoder
|
defmodule AWS.IoTJobsDataPlane do
@moduledoc """
AWS IoT Jobs is a service that allows you to define a set of jobs — remote
operations that are sent to and executed on one or more devices connected to AWS
IoT.
For example, you can define a job that instructs a set of devices to download
and install application or firmware updates, reboot, rotate certificates, or
perform remote troubleshooting operations.
To create a job, you make a job document which is a description of the remote
operations to be performed, and you specify a list of targets that should
perform the operations. The targets can be individual things, thing groups or
both.
AWS IoT Jobs sends a message to inform the targets that a job is available. The
target starts the execution of the job by downloading the job document,
performing the operations it specifies, and reporting its progress to AWS IoT.
The Jobs service provides commands to track the progress of a job on a specific
target and for all the targets of the job
"""
@doc """
Gets details of a job execution.
"""
def describe_job_execution(client, job_id, thing_name, execution_number \\ nil, include_job_document \\ nil, options \\ []) do
path_ = "/things/#{URI.encode(thing_name)}/jobs/#{URI.encode(job_id)}"
headers = []
query_ = []
query_ = if !is_nil(include_job_document) do
[{"includeJobDocument", include_job_document} | query_]
else
query_
end
query_ = if !is_nil(execution_number) do
[{"executionNumber", execution_number} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets the list of all jobs for a thing that are not in a terminal status.
"""
def get_pending_job_executions(client, thing_name, options \\ []) do
path_ = "/things/#{URI.encode(thing_name)}/jobs"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets and starts the next pending (status IN_PROGRESS or QUEUED) job execution
for a thing.
"""
def start_next_pending_job_execution(client, thing_name, input, options \\ []) do
path_ = "/things/#{URI.encode(thing_name)}/jobs/$next"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Updates the status of a job execution.
"""
def update_job_execution(client, job_id, thing_name, input, options \\ []) do
path_ = "/things/#{URI.encode(thing_name)}/jobs/#{URI.encode(job_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "iot-jobs-data"}
host = build_host("data.jobs.iot", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/iot_jobs_data_plane.ex
| 0.716814 | 0.522689 |
iot_jobs_data_plane.ex
|
starcoder
|
defmodule RecurringEvents.Monthly do
@moduledoc """
Handles `:monthly` frequency rule
"""
alias RecurringEvents.Date
@doc """
Returns monthly stream of dates with respect to `:interval`, `:count` and
`:until` rules. Time and day in date provided as `:until` is ignored.
# Example
iex> RecurringEvents.Monthly.unfold(~N[2017-01-22 10:11:11],
...> %{freq: :monthly, until: ~N[2017-02-03 05:00:00]})
...> |> Enum.take(10)
[~N[2017-01-22 10:11:11], ~N[2017-02-22 10:11:11]]
"""
def unfold(date, %{freq: :monthly} = rules), do: do_unfold(date, rules)
defp do_unfold(date, %{} = rules) do
step = get_step(rules)
count = get_count(rules)
until_date = until_date(rules)
Stream.resource(
fn -> {date, 0} end,
fn {date, iteration} ->
return_invalid = Map.get(rules, :invalid, :fallback) != :fallback
{[next_date], _} = next_result = next_iteration(date, step, iteration, return_invalid)
cond do
iteration == count -> {:halt, nil}
until_reached(next_date, until_date) -> {:halt, nil}
true -> next_result
end
end,
fn _ -> nil end
)
end
defp next_iteration(date, step, iteration, return_invalid) do
next_date = Date.shift_date( date, step * iteration, :months, return_invalid: return_invalid)
acc = {date, iteration + 1}
{[next_date], acc}
end
defp until_reached(_date, :forever), do: false
defp until_reached(date, until_date) do
Date.compare(date, until_date) == :gt
end
defp until_date(%{until: until_date}) do
last_day = Date.last_day_of_the_month(until_date)
%{until_date | day: last_day}
end
defp until_date(%{}), do: :forever
defp get_step(%{interval: interval}), do: interval
defp get_step(%{}), do: 1
defp add_count(%{exclude_date: dates}), do: dates |> Enum.count()
defp add_count(%{}), do: 0
defp get_count(%{count: count} = rules), do: count + add_count(rules)
defp get_count(%{}), do: :infinity
end
|
lib/recurring_events/monthly.ex
| 0.88311 | 0.460532 |
monthly.ex
|
starcoder
|
defmodule IVCU.Definition do
@moduledoc ~S"""
An interface for file processing.
## Example
Suppose we have defined [storage](`IVCU.Storage`) and
[converter](`IVCU.Converter`) modules as `MyApp.FileStorage` and
`MyApp.ImageConverter` respectively. Then we can provide a specific
definition for some images.
defmodule MyApp.Image do
@behaviour IVCU.Definition
def versions, do: [:thumb, :original]
def storage, do: MyApp.FileStorage
def converter, do: MyApp.ImageConverter
def validate(%{filename: filename}) do
if Path.extname(filename) in ~w(.png .jpg .jpeg) do
:ok
else
{:error, :invalid_image_extension}
end
end
def filename(version, filename) do
extname = Path.extname(filename)
base = filename |> String.replace(extname, "")
"#{base}_#{version}#{extname}"
end
end
Using that definition you get two file formats: `:thumb` and
`:original` and pass only files with `.png`, `.jpg`, or `.jpeg`
extensions.
Also you stored filenames will be looking like
`<original base filename>_<version>.<original extension>`.
"""
alias IVCU.File
@typedoc """
A name for the version of the processed image.
"""
@type version :: atom
@doc """
Return [Storage](`IVCU.Storage`) module.
"""
@callback storage :: module
@doc """
Return [Converter](`IVCU.Converter`) module.
"""
@callback converter :: module
@doc """
Check if the file is allowed to be processed and put into a
storage.
"""
@callback validate(File.t()) :: :ok | {:error, term}
@doc """
Return a list of versions.
> #### Note {: .info}
>
> If you provide `:original` among versions, the file with this
> version won't be modified by the converter.
"""
@callback versions :: [version]
@doc """
Get a new filename for the provided version.
"""
@callback filename(version, File.t()) :: String.t()
end
|
lib/ivcu/definition.ex
| 0.822439 | 0.438966 |
definition.ex
|
starcoder
|
defmodule D09.Challenge do
@moduledoc """
Solution sketch:
The first part is quite simple: Create a 2D array (I use a 1D array with indexing) and scan every points if their
direct neighbors have lower values (or the scanned point is 9 itself, which cannot be lower than the rest). If so, it
is a low point and it's height value is added to the list. The list itself is summed by adding 1 to each value.
The second part reuses the list of part 1 but instead of storing the value of the point the point itself is stored.
This list of low points is used to create basins. Each point is visited and by using breath-first search the basin
is explored until there are no valid points (outside the map) or a 9 is visited.
We now have a list of basin consisting a point lists. We count the points of points in the basin, sort them by their
size to find the top 3 and build the product of their size for the answer.
"""
require Logger
def run(1) do
height_map = Utils.read_input(9, &map_input/1) |> build_height_map
result =
height_map.values
|> Stream.with_index()
|> Enum.reduce([], fn {val, index}, low_points ->
if low_point?(height_map, index) do
low_points ++ [val]
else
low_points
end
end)
|> Enum.reduce(0, fn low_point, sum ->
sum + low_point + 1
end)
Logger.info("The sum of risk levels is #{result}")
end
def run(2) do
height_map = Utils.read_input(9, &map_input/1) |> build_height_map
result =
height_map.values
|> Stream.with_index()
|> Enum.reduce([], fn {_val, index}, low_points ->
if low_point?(height_map, index) do
low_points ++ [to_point(height_map, index)]
else
low_points
end
end)
|> Stream.map(&create_basin(height_map, &1))
|> Stream.map(&length/1)
|> Enum.sort(:desc)
|> Enum.take(3)
|> Enum.product()
Logger.info("The product of the three biggest basins is #{result}")
end
defp map_input(line), do: String.codepoints(line) |> Enum.map(&String.to_integer/1)
defp build_height_map(numbers) do
%{
width: length(hd(numbers)),
height: length(numbers),
values: Enum.concat(numbers)
}
end
defp at(%{width: width, height: height}, {x, y})
when x < 0 or x >= width or y < 0 or y >= height,
do: nil
defp at(%{width: width, values: values}, {x, y}) do
Enum.at(values, x + y * width)
end
defp to_point(%{width: width}, index) do
x = Kernel.rem(index, width)
y = (index / width) |> trunc()
{x, y}
end
defp neighbors({x, y}) do
[
{x + 1, y},
{x, y + 1},
{x - 1, y},
{x, y - 1}
]
end
defp low_point?(%{values: values} = height_map, index) do
val = Enum.at(values, index)
# The highest value of the heightmap cannot be a low point, no need to check the neighbors
if val == 9 do
false
else
height_map
|> to_point(index)
|> neighbors()
|> Stream.map(&at(height_map, &1))
|> Stream.reject(&is_nil/1)
|> Enum.all?(fn n -> n > val end)
end
end
defp create_basin(height_map, low_point) do
neighbors = valid_neighbors(height_map, low_point)
1..length(height_map.values)
|> Enum.reduce_while({MapSet.new([low_point]), neighbors}, fn _, {visited_points, queue} ->
if queue == [] do
{:halt, MapSet.to_list(visited_points)}
else
point = hd(queue)
queue = tl(queue)
possible_neighbors =
height_map
|> valid_neighbors(point)
|> Enum.reject(fn neighbor -> MapSet.member?(visited_points, neighbor) end)
{:cont, {MapSet.put(visited_points, point), Enum.uniq(queue ++ possible_neighbors)}}
end
end)
end
defp valid_neighbors(height_map, point) do
point
|> neighbors()
|> Stream.map(fn new_neighbor -> {new_neighbor, at(height_map, new_neighbor)} end)
|> Stream.reject(fn {_point, val} -> val == nil or val == 9 end)
|> Enum.map(fn {point, _} -> point end)
end
end
|
lib/d09/challenge.ex
| 0.722331 | 0.828072 |
challenge.ex
|
starcoder
|
defmodule NeoNode.Parser do
@address_version "17"
defp parse16("0x" <> rest), do: parse16(rest)
defp parse16(string) do
Base.decode16!(string, case: :mixed)
end
# Base.decode64!(string, padding: false)
defp parse58(string), do: Base58.decode(string)
defp parse_asset_type("GoverningToken"), do: :governing_token
defp parse_asset_type("UtilityToken"), do: :utility_token
defp parse_asset_type("Token"), do: :token
defp parse_asset_type("Share"), do: :share
defp parse_transaction_type("PublishTransaction"), do: :publish_transaction
defp parse_transaction_type("RegisterTransaction"), do: :register_transaction
defp parse_transaction_type("IssueTransaction"), do: :issue_transaction
defp parse_transaction_type("MinerTransaction"), do: :miner_transaction
defp parse_transaction_type("ContractTransaction"), do: :contract_transaction
defp parse_transaction_type("ClaimTransaction"), do: :claim_transaction
defp parse_transaction_type("InvocationTransaction"), do: :invocation_transaction
defp parse_transaction_type("EnrollmentTransaction"), do: :enrollment_transaction
defp parse_transaction_type("StateTransaction"), do: :state_transaction
defp parse_decimal(nil), do: nil
defp parse_decimal(string), do: Decimal.new(string)
defp parse_claims(nil), do: []
defp parse_claims(claims), do: Enum.map(claims, &parse_vin/1)
defp ensure_integer(integer) when is_integer(integer), do: integer
defp parse_vin(vin) do
%{
vout_transaction_hash: parse16(vin["txid"]),
vout_n: vin["vout"]
}
end
defp parse_vout(vout) do
%{
address: parse58(vout["address"]),
asset: parse16(vout["asset"]),
n: vout["n"],
value: parse_decimal(vout["value"])
}
end
defp parse_transaction_asset(nil, _), do: nil
defp parse_transaction_asset(asset, transaction) do
asset
|> Map.merge(%{"id" => transaction["txid"], "issuer" => asset["admin"]})
|> parse_asset()
end
def parse_contract(contract) do
%{
author: contract["author"],
code_version: contract["code_version"],
email: contract["email"],
hash: parse16(contract["hash"]),
name: contract["name"],
parameters: contract["parameters"],
properties: contract["properties"],
return_type: contract["returntype"],
script: if(is_nil(contract["script"]), do: nil, else: parse16(contract["script"])),
version: contract["version"]
}
end
def parse_asset(asset) do
%{
admin: parse58(asset["admin"]),
amount: parse_decimal(asset["amount"]),
available: parse_decimal(asset["available"]),
expiration: asset["expiration"],
frozen: asset["frozen"],
transaction_hash: parse16(asset["id"]),
issuer: parse58(asset["issuer"]),
name: parse_asset_name(asset["name"]),
owner: asset["owner"],
precision: asset["precision"],
type: parse_asset_type(asset["type"]),
version: asset["version"]
}
end
defp parse_asset_name(list) when is_list(list) do
Enum.reduce(list, %{}, fn %{"lang" => lang, "name" => name}, acc ->
Map.put(acc, lang, name)
end)
end
def parse_block(block) do
%{
confirmations: block["confirmations"],
hash: parse16(block["hash"]),
index: block["index"],
merkle_root: parse16(block["merkleroot"]),
next_block_hash:
if(is_nil(block["nextblockhash"]), do: nil, else: parse16(block["nextblockhash"])),
previous_block_hash: parse16(block["previousblockhash"]),
next_consensus: parse58(block["nextconsensus"]),
version: block["version"],
nonce: parse16(block["nonce"]),
script: block["script"],
size: ensure_integer(block["size"]),
time: DateTime.from_unix!(block["time"]),
tx: Enum.map(block["tx"], &parse_block_transaction(&1, block))
}
end
defp parse_block_transaction(transaction, block) do
transaction
|> Map.merge(%{"blockhash" => block["hash"], "blocktime" => block["time"]})
|> parse_transaction()
end
def parse_transaction(transaction) do
%{
asset: parse_transaction_asset(transaction["asset"], transaction),
nonce: transaction["nonce"],
extra: %{
scripts: transaction["scripts"],
script: transaction["script"],
contract: transaction["contract"],
attributes: transaction["attributes"]
},
block_time: DateTime.from_unix!(transaction["blocktime"]),
block_hash: parse16(transaction["blockhash"]),
size: transaction["size"],
sys_fee: parse_decimal(transaction["sys_fee"]),
net_fee: parse_decimal(transaction["net_fee"]),
hash: parse16(transaction["txid"]),
type: parse_transaction_type(transaction["type"]),
version: transaction["version"],
vins: Enum.map(transaction["vin"], &parse_vin/1),
vouts: Enum.map(transaction["vout"], &parse_vout/1),
claims: parse_claims(transaction["claims"])
}
end
def parse_application_log(%{"executions" => executions}) do
executions
|> Enum.map(&parse_execution/1)
|> List.flatten()
|> Enum.filter(&(not is_nil(&1)))
end
defp parse_execution(%{"notifications" => notifications}) do
Enum.map(notifications, &parse_notification/1)
end
defp parse_execution(_), do: nil
def parse_invoke(%{"stack" => stack}) when is_list(stack) do
Enum.map(stack, &parse_invoke_elem/1)
end
defp parse_invoke_elem(%{"type" => "ByteArray", "value" => string}) do
Base.decode16!(string, case: :mixed)
end
defp parse_invoke_elem(%{"type" => "Integer", "value" => integer}) do
String.to_integer(integer)
end
defp parse_notification(%{
"contract" => contract,
"state" => %{
"type" => "Array",
"value" => [
%{"type" => "ByteArray", "value" => "7472616e73666572"},
%{
"type" => "ByteArray",
"value" => address_from
},
%{
"type" => "ByteArray",
"value" => address_to
},
amount
]
}
}) do
case parse_notification_amount(amount) do
nil ->
nil
value ->
%{
address_from: parse_address(address_from),
address_to: parse_address(address_to),
value: value,
contract: parse16(contract)
}
end
end
defp parse_notification(_), do: nil
defp parse_notification_amount(%{"type" => "ByteArray", "value" => value}) do
parse_integer_value(value)
end
defp parse_notification_amount(%{"type" => "Integer", "value" => value}) do
String.to_integer(value)
end
defp parse_notification_amount(_) do
nil
end
defp parse_integer_value(value) do
value
|> Base.decode16!(case: :mixed)
|> :binary.decode_unsigned(:little)
end
defp parse_address(address) do
address
|> (&(@address_version <> &1)).()
|> Base.decode16!(case: :mixed)
|> hash256()
|> Base.encode16()
|> String.slice(0..7)
|> (&(@address_version <> address <> &1)).()
|> Base.decode16!(case: :mixed)
|> (&if(&1 == <<23, 27, 182, 49, 176>>, do: <<0>>, else: &1)).()
end
defp hash256(binary) do
:crypto.hash(:sha256, :crypto.hash(:sha256, binary))
end
# export const getAddressFromScriptHash = (scriptHash: string): string => {
# scriptHash = reverseHex(scriptHash);
# const shaChecksum = hash256(ADDR_VERSION + scriptHash).substr(0, 8);
# return base58.encode(
# Buffer.from(ADDR_VERSION + scriptHash + shaChecksum, "hex")
# );
# };
def parse_version(%{"useragent" => user_agent}) do
case String.upcase(user_agent) do
"/NEO:" <> version ->
{:csharp, version}
_ ->
{:python, nil}
end
end
def parse_version(_), do: {:unknown, nil}
end
|
apps/neo_node/lib/neo_node/parser.ex
| 0.539226 | 0.513303 |
parser.ex
|
starcoder
|
defmodule Dynamo.Cowboy do
@moduledoc """
Provides the interface to Cowboy webserver.
Check `run/2` for more information.
"""
@doc """
Runs the given module with the given options:
* `port` - the port to run the server (defaults to 4000)
* `acceptors` - the number of acceptors for the listener
* `max_connections` - max number of connections supported
* `ssl` - SSL options for the server. It accepts all options
mentioned above plus the configuration options accepted
by the [`ssl` erlang module](http://www.erlang.org/doc/man/ssl.html)
(like keyfile, certfile, cacertfile and others).
## Example
Dynamo.Cowboy.run MyApp, port: 80
"""
def run(main, options // []) do
:application.start(:ranch)
:application.start(:cowboy)
env = options[:env]
ssl = options[:ssl]
host = options[:host] || "localhost"
options = Enum.reduce [:env, :ssl, :host], options, Keyword.delete(&2, &1)
case host do
_ in ["localhost", nil] ->
:ok
host when is_binary(host) ->
ip = host |> String.split(".") |> Enum.map(binary_to_integer(&1)) |> list_to_tuple
options = Keyword.put(options, :ip, ip)
end
if ssl do
:application.start(:crypto)
:application.start(:public_key)
:application.start(:ssl)
https = https_options(main, Keyword.merge(options, ssl))
log(main, :https, env, host, https)
start_listener(:https, main, https)
end
http = http_options(main, options)
log(main, :http, env, host, http)
start_listener(:http, main, http)
end
def shutdown(main) do
:cowboy.stop_listener(main.HTTP)
:cowboy.stop_listener(main.HTTPS)
:ok
end
## Helpers
@http_options [port: 4000]
@https_options [port: 4040]
defp start_listener(kind, main, options) do
acceptors = options[:acceptors] || 100
dispatch = :cowboy_router.compile(options[:dispatch] || dispatch_for(main))
options = Enum.reduce [:acceptors, :dispatch], options, Keyword.delete(&2, &1)
ref = Module.concat(main, kind |> to_binary |> String.upcase)
apply(:cowboy, :"start_#{kind}", [ref, acceptors, options, [env: [dispatch: dispatch]]])
end
defp http_options(_main, options) do
Keyword.merge @http_options, options
end
defp https_options(main, options) do
options = Keyword.merge @https_options, options
options = Enum.reduce [:keyfile, :certfile, :cacertfile], options, normalize_ssl_file(main, &2, &1)
options = Enum.reduce [:password], options, to_char_list(&2, &1)
options
end
defp log(main, kind, env, host, options) do
unless options[:verbose] == false do
IO.puts "Running #{inspect main} at #{kind}://#{host}:#{options[:port]} with Cowboy on #{env}"
end
end
defp dispatch_for(main) do
[{ :_, [ {:_, Dynamo.Cowboy.Handler, main } ] }]
end
defp normalize_ssl_file(main, options, key) do
value = options[key]
if nil?(value) do
options
else
new = Path.expand(value, main.root) |> to_char_list
Keyword.put(options, key, new)
end
end
defp to_char_list(options, key) do
if value = options[key] do
Keyword.put options, key, to_char_list(value)
else
options
end
end
end
|
lib/dynamo/cowboy.ex
| 0.846229 | 0.446133 |
cowboy.ex
|
starcoder
|
defmodule Lexthink.AST do
@typep datum_arg :: :null | boolean | number | binary
@typep expr_arg :: Dict.t | {any, any} | [expr_arg] | fun | atom | :term.t | :term_assocpair.t | datum_arg
@typep key_arg :: binary | number | atom
@spec db_create(binary) :: :term.t
def db_create(name), do: :term.new(type: :'DB_CREATE', args: expr(name))
@spec db_drop(binary) :: :term.t
def db_drop(name), do: :term.new(type: :'DB_DROP', args: expr(name))
@spec db_list() :: :term.t
def db_list(), do: :term.new(type: :'DB_LIST')
@spec table_create(binary) :: :term.t
def table_create(name) when is_binary(name) do
table_create(name, [])
end
@spec table_create(binary | :term.t, Keyword.t | binary) :: :term.t
def table_create(name, options) when is_binary(name) do
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'TABLE_CREATE', args: expr(name), optargs: optargs)
end
def table_create(db, name) when is_binary(name) do
table_create(db, name, [])
end
@spec table_create(:term.t, binary, Keyword.t) :: :term.t
def table_create(db, name, options) do
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'TABLE_CREATE', args: [db, expr(name)], optargs: optargs)
end
@spec table_drop(binary) :: :term.t
def table_drop(name) do
:term.new(type: :'TABLE_DROP', args: expr(name))
end
@spec table_drop(:term.t, binary) :: :term.t
def table_drop(db, name) do
:term.new(type: :'TABLE_DROP', args: [db, expr(name)])
end
@spec table_list() :: :term.t
def table_list(), do: :term.new(type: :'TABLE_LIST')
@spec table_list(:term.t) :: :term.t
def table_list(db) do
:term.new(type: :'TABLE_LIST', args: db)
end
@spec index_create(:term.t, binary) :: :term.t
def index_create(table, name) do
args = [table, expr(name)]
:term.new(type: :'INDEX_CREATE', args: args)
end
@spec index_create(:term.t, binary, fun) :: :term.t
def index_create(table, name, fun) do
args = [table, expr(name), expr(fun)]
:term.new(type: :'INDEX_CREATE', args: args)
end
@spec index_drop(:term.t, binary) :: :term.t
def index_drop(table, name) do
args = [table, expr(name)]
:term.new(type: :'INDEX_DROP', args: args)
end
@spec index_list(:term.t) :: :term.t
def index_list(table) do
args = table
:term.new(type: :'INDEX_LIST', args: args)
end
#%% @doc Specify a DB. Must be first operation in query list
#%% Optional if a default database has been specified via
#%% @see lethink:use/2
@spec db(binary) :: :term.t
def db(name), do: :term.new(type: :'DB', args: expr(name))
@spec table(binary) :: :term.t
def table(name), do: table(name, [])
@spec table(:term.t | binary, binary | Keyword.t) :: :term.t
def table(db, name) when is_record(db, :term), do: table(db, name, [])
def table(name, options) when is_binary(name) do
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'TABLE', args: expr(name), optargs: optargs)
end
@spec table(:term.t, binary, Keyword.t) :: :term.t
def table(db, name, options) do
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'TABLE', args: [db, expr(name)], optargs: optargs)
end
@spec insert(:term.t, Dict.t | [Dict.t]) :: :term.t
def insert(table, data) when is_record(table, :term) do
insert(table, data, [])
end
@spec insert(:term.t, Dict.t | [Dict.t], Keyword.t) :: :term.t
def insert(table, data, options) when is_record(table, :term) do
args = [table, expr(data)]
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'INSERT', args: args, optargs: optargs)
end
@spec get(:term.t, key_arg) :: :term.t
def get(table, key) when is_record(table, :term) and
(is_binary(key) or is_number(key)) do
args = [table, expr(key)]
:term.new(type: :'GET', args: args)
end
@spec get_all(:term.t, key_arg | [key_arg], Keyword.t) :: :term.t
def get_all(table, key, options // []) when is_record(table, :term) do
args = [table, expr(key)]
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'GET_ALL', args: args, optargs: optargs)
end
@spec between(:term.t, key_arg, key_arg, Keyword.t) :: :term.t
def between(:term[] = selection, lower_key, upper_key, options // []) do
args = [selection, expr(lower_key), expr(upper_key)]
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'BETWEEN', args: args, optargs: optargs)
end
@spec update(:term.t, Dict.t | fun, Keyword.t) :: :term.t
def update(selection, data, options // []) do
args = [selection, func_wrap(data)]
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'UPDATE', args: args, optargs: optargs)
end
@spec replace(:term.t, Dict.t | fun, Keyword.t) :: :term.t
def replace(selection, data, options // []) do
args = [selection, func_wrap(data)]
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'REPLACE', args: args, optargs: optargs)
end
@spec delete(:term.t, Keyword.t) :: :term.t
def delete(selection, options // []) do
args = selection
optargs = lc opt inlist options, do: option_term(opt)
:term.new(type: :'DELETE', args: args, optargs: optargs)
end
@spec row() :: :term.t
def row(), do: :term.new(type: :'IMPLICIT_VAR')
@spec get_field(:term.t, binary) :: :term.t
def get_field(term, attr) do
args = [term, expr(attr)]
:term.new(type: :'GET_FIELD', args: args)
end
#%% Math and Logic Operations
@spec add(:term.t, number | binary) :: :term.t
def add(term, value) do
:term.new(type: :'ADD', args: [term, expr(value)])
end
@spec sub(:term.t, number) :: :term.t
def sub(term, value) do
:term.new(type: :'SUB', args: [term, expr(value)])
end
@spec mul(:term.t, number) :: :term.t
def mul(term, value) do
:term.new(type: :'MUL', args: [term, expr(value)])
end
@spec div(:term.t, number) :: :term.t
def div(term, value) do
:term.new(type: :'DIV', args: [term, expr(value)])
end
@spec mod(:term.t, number) :: :term.t
def mod(term, value) do
:term.new(type: :'MOD', args: [term, expr(value)])
end
@spec and_(:term.t, number) :: :term.t
def and_(term, value) do
:term.new(type: :'AND', args: [term, expr(value)])
end
@spec or_(:term.t, number) :: :term.t
def or_(term, value) do
:term.new(type: :'OR', args: [term, expr(value)])
end
@spec eq(:term.t, number) :: :term.t
def eq(term, value) do
:term.new(type: :'EQ', args: [term, expr(value)])
end
@spec ne(:term.t, number) :: :term.t
def ne(term, value) do
:term.new(type: :'NE', args: [term, expr(value)])
end
@spec gt(:term.t, number) :: :term.t
def gt(term, value) do
:term.new(type: :'GT', args: [term, expr(value)])
end
@spec ge(:term.t, number) :: :term.t
def ge(term, value) do
:term.new(type: :'GE', args: [term, expr(value)])
end
@spec lt(:term.t, number) :: :term.t
def lt(term, value) do
:term.new(type: :'LT', args: [term, expr(value)])
end
@spec le(:term.t, number) :: :term.t
def le(term, value) do
:term.new(type: :'LE', args: [term, expr(value)])
end
@spec not_(:term.t) :: :term.t
def not_(term) do
:term.new(type: :'NOT', args: [term])
end
@spec expr(expr_arg) :: :term.t | :term_assocpair.t
def expr(item) when is_record(item, :term), do: item
def expr(item) when is_record(item, :term_assocpair), do: item
def expr(doc) when is_record(doc, HashDict) do
optargs = Enum.map(doc, expr(&1))
:term.new(type: :'MAKE_OBJ', optargs: optargs)
end
def expr({key, value}), do: build_term_assocpair(key, value)
def expr(items) when is_list(items) do
make_array(items)
end
def expr(f) when is_function(f), do: func(f)
def expr(value), do: :term.new(type: :'DATUM', datum: datum(value))
@spec make_array([expr_arg]) :: :term.t
def make_array(items) when is_list(items) do
args = lc i inlist items, do: expr(i)
:term.new(type: :'MAKE_ARRAY', args: args)
end
# @doc create Datums from the four basic types. Arrays and objects
# are created via MAKE_ARRAY and MAKE_OBJ on the server since it's
# cheaper that way.
@spec datum(datum_arg) :: :datum.t
defp datum(:null), do: :datum.new(type: :'R_NULL')
defp datum(v) when is_boolean(v), do: :datum.new(type: :'R_BOOL', r_bool: v)
defp datum(v) when is_number(v), do: :datum.new(type: :'R_NUM', r_num: v)
defp datum(v) when is_binary(v), do: :datum.new(type: :'R_STR', r_str: v)
defp datum(v) when is_atom(v) do
:datum.new(type: :'R_STR', r_str: atom_to_binary(v))
end
@spec var(integer) :: :term.t
def var(n), do: :term.new(type: :'VAR', args: expr(n))
@spec func(fun) :: :term.t
def func(func) do
{_, arity} = :erlang.fun_info(func, :arity)
arg_count_list = :lists.seq(1, arity)
func_args = lc n inlist arg_count_list, do: var(n)
args = [make_array(arg_count_list), expr(apply(func, func_args))]
:term.new(type: :'FUNC', args: args)
end
@spec func_wrap(expr_arg) :: :term.t | :term_assocpair.t
defp func_wrap(data) do
value = expr(data)
case ivar_scan(value) do
true -> func(fn(_) -> value end)
false -> value
end
end
# Scan for IMPLICIT_VAR or JS
@spec ivar_scan(:term.t | :term_assocpair.t | [:term.t] | [:term_assocpair.t]) :: boolean
defp ivar_scan(:term[type: :'IMPLICIT_VAR']), do: :true
defp ivar_scan(list) when is_list(list), do: Enum.any?(list, ivar_scan(&1))
defp ivar_scan(term) when is_record(term, :term) do
ivar_scan(term.args) or ivar_scan(term.optargs)
end
defp ivar_scan(term_pair) when is_record(term_pair, :term_assocpair) do
ivar_scan(term_pair.val)
end
defp ivar_scan(_), do: :false
@spec build_term_assocpair(binary | atom, expr_arg) :: :term_assocpair.t
defp build_term_assocpair(key, value) when is_atom(key) do
build_term_assocpair(atom_to_binary(key, :utf8), value)
end
defp build_term_assocpair(key, value) when is_binary(key) do
:term_assocpair.new(key: key, val: expr(value))
end
@spec option_term({atom | binary, atom | binary}) :: :term_assocpair.t
defp option_term({key, value}) when is_atom(value) do
option_term({key, atom_to_binary(value, :utf8)})
end
defp option_term({key, value}) when is_atom(key) do
option_term({atom_to_binary(key, :utf8), value})
end
defp option_term({key, value}) do
build_term_assocpair(key, value)
end
end
|
lib/lexthink/ast.ex
| 0.761361 | 0.42925 |
ast.ex
|
starcoder
|
defmodule Explorer.DataFrame do
@moduledoc """
The DataFrame struct and API.
Dataframes are two-dimensional tabular data structures similar to a spreadsheet.
For example, the Iris dataset:
iex> Explorer.Datasets.iris()
#Explorer.DataFrame<
Polars[150 x 5]
sepal_length float [5.1, 4.9, 4.7, 4.6, 5.0, ...]
sepal_width float [3.5, 3.0, 3.2, 3.1, 3.6, ...]
petal_length float [1.4, 1.4, 1.3, 1.5, 1.4, ...]
petal_width float [0.2, 0.2, 0.2, 0.2, 0.2, ...]
species string ["Iris-setosa", "Iris-setosa", "Iris-setosa", "Iris-setosa", "Iris-setosa", ...]
>
This dataframe has 150 rows and five columns. Each column is an `Explorer.Series`
of the same size (150):
iex> df = Explorer.Datasets.iris()
iex> df["sepal_length"]
#Explorer.Series<
float[150]
[5.1, 4.9, 4.7, 4.6, 5.0, 5.4, 4.6, 5.0, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5.0, 5.0, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5.0, 5.5, 4.9, 4.4, 5.1, 5.0, 4.5, 4.4, 5.0, 5.1, 4.8, 5.1, 4.6, 5.3, 5.0, ...]
>
## Creating dataframes
Dataframes can be created from normal Elixir objects. The main ways you might do this are
`from_columns/1` and `from_rows/1`. For example:
iex> Explorer.DataFrame.new(a: ["a", "b"], b: [1, 2])
#Explorer.DataFrame<
Polars[2 x 2]
a string ["a", "b"]
b integer [1, 2]
>
## Verbs
Explorer uses the idea of a consistent set of SQL-like `verbs` like [`dplyr`](https://dplyr.tidyverse.org)
which can help solve common data manipulation challenges. These are split into single table verbs and multiple table verbs.
### Single table verbs
Single table verbs are (unsurprisingly) used for manipulating a single dataframe. These are:
- `select/3` for picking variables
- `filter/2` for picking rows based on predicates
- `mutate/2` for adding or replacing columns that are functions of existing columns
- `arrange/2` for changing the ordering of rows
- `distinct/2` for picking unique rows
- `summarise/2` for reducing multiple rows down to a single summary
- `pivot_longer/3` and `pivot_wider/4` for massaging dataframes into longer or wider forms, respectively
Each of these combine with `Explorer.DataFrame.group_by/2` for operating by group.
### Multiple table verbs
Multiple table verbs are used for combining tables. These are:
- `join/3` for performing SQL-like joins
- `concat_rows/1` for vertically "stacking" dataframes
## IO
Explorer supports reading and writing of:
- delimited files (such as CSV)
- [Parquet](https://databricks.com/glossary/what-is-parquet)
- [Arrow IPC](https://arrow.apache.org/docs/format/Columnar.html#ipc-file-format)
- [Newline Delimited JSON](http://ndjson.org)
The convention Explorer uses is to have `from_*` and `to_*` functions to read and write
to files in the formats above. `load_*` and `dump_*` versions are also available to read
and write those formats directly in memory.
## Access
In addition to this "grammar" of data manipulation, you'll find useful functions for
slicing and dicing dataframes such as `pull/2`, `head/2`, `sample/3`, `slice/3`, and
`take/2`.
`Explorer.DataFrame` also implements the `Access` behaviour (also known as the brackets
syntax). This should be familiar for users coming from other language with dataframes
such as R or Python. For example:
iex> df = Explorer.Datasets.wine()
iex> df["class"]
#Explorer.Series<
integer[178]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...]
>
"""
alias __MODULE__, as: DataFrame
alias Explorer.Series
alias Explorer.Shared
@valid_dtypes Explorer.Shared.dtypes()
@type data :: Explorer.Backend.DataFrame.t()
@type t :: %DataFrame{data: data, groups: [String.t()]}
@enforce_keys [:data, :groups]
defstruct [:data, :groups]
@type column_name :: atom() | String.t()
@type column :: column_name() | non_neg_integer()
@type columns :: [column] | Range.t()
@type column_names :: [column_name]
@type column_pairs(other) :: [{column(), other}] | %{column() => other}
@default_infer_schema_length 1000
# Guards and helpers for columns
defguard is_column(column) when is_binary(column) or is_atom(column) or is_integer(column)
defguard is_column_name(column) when is_binary(column) or is_atom(column)
defguard is_column_pairs(columns) when is_list(columns) or is_map(columns)
# Normalize a column name to string
defp to_column_name(column) when is_binary(column), do: column
defp to_column_name(column) when is_atom(column), do: Atom.to_string(column)
# Normalize pairs of `{column, value}` where value can be anything.
# The `column` is only validated if it's an integer. We check that the index is present.
defp to_column_pairs(df, pairs), do: to_column_pairs(df, pairs, & &1)
# The function allows to change the `value` for each pair.
defp to_column_pairs(df, pairs, value_fun)
when is_column_pairs(pairs) and is_function(value_fun, 1) do
existing_columns = names(df)
pairs
|> Enum.map_reduce(nil, fn
{column, value}, maybe_map when is_integer(column) ->
map = maybe_map || column_index_map(existing_columns)
existing_column = fetch_column_at!(map, column)
{{existing_column, value_fun.(value)}, map}
{column, value}, maybe_map when is_atom(column) ->
column = Atom.to_string(column)
{{column, value_fun.(value)}, maybe_map}
{column, value}, maybe_map when is_binary(column) ->
{{column, value_fun.(value)}, maybe_map}
end)
|> then(fn {pairs, _} -> pairs end)
end
defp fetch_column_at!(map, index) do
normalized = if index < 0, do: index + map_size(map), else: index
case map do
%{^normalized => column} -> column
%{} -> raise ArgumentError, "no column exists at index #{index}"
end
end
defp column_index_map(names),
do: for({name, idx} <- Enum.with_index(names), into: %{}, do: {idx, name})
# Normalize column names without verifying if they exist.
defp to_column_names(names) when is_list(names),
do: Enum.map(names, &to_column_name/1)
# Normalize column names and raise if column does not exist.
defp to_existing_columns(df, columns) when is_list(columns) do
existing_columns = names(df)
columns
|> Enum.map_reduce(nil, fn
column, maybe_map when is_integer(column) ->
map = maybe_map || column_index_map(existing_columns)
existing_column = fetch_column_at!(map, column)
{existing_column, map}
column, maybe_map when is_atom(column) ->
column = Atom.to_string(column)
maybe_raise_column_not_found(existing_columns, column)
{column, maybe_map}
column, maybe_map when is_binary(column) ->
maybe_raise_column_not_found(existing_columns, column)
{column, maybe_map}
end)
|> then(fn {columns, _} -> columns end)
end
defp to_existing_columns(df, %Range{} = columns) do
Enum.slice(names(df), columns)
end
defp check_dtypes!(dtypes) do
Enum.map(dtypes, fn
{key, value} when is_atom(key) ->
{Atom.to_string(key), check_dtype!(key, value)}
{key, value} when is_binary(key) ->
{key, check_dtype!(key, value)}
_ ->
raise ArgumentError,
"dtypes must be a list/map of column names as keys and types as values, " <>
"where the keys are atoms or binaries. Got: #{inspect(dtypes)}"
end)
end
defp check_dtype!(_key, value) when value in @valid_dtypes, do: value
defp check_dtype!(key, value) do
raise ArgumentError,
"invalid dtype #{inspect(value)} for #{inspect(key)} (expected one of #{inspect(@valid_dtypes)})"
end
# Access
@behaviour Access
@impl true
def fetch(df, column) when is_column(column) do
{:ok, pull(df, column)}
end
def fetch(df, columns) do
columns = to_existing_columns(df, columns)
{:ok, select(df, columns)}
end
@impl true
def pop(df, column) when is_column(column) do
[column] = to_existing_columns(df, [column])
{pull(df, column), select(df, [column], :drop)}
end
def pop(df, columns) do
columns = to_existing_columns(df, columns)
{select(df, columns), select(df, columns, :drop)}
end
@impl true
def get_and_update(df, column, fun) when is_column(column) do
[column] = to_existing_columns(df, [column])
value = pull(df, column)
{current_value, new_value} = fun.(value)
new_data = mutate(df, %{column => new_value})
{current_value, new_data}
end
# IO
@doc """
Reads a delimited file into a dataframe.
If the CSV is compressed, it is automatically decompressed.
## Options
* `delimiter` - A single character used to separate fields within a record. (default: `","`)
* `dtypes` - A list/map of `{"column_name", dtype}` tuples. Any non-specified column has its type
imputed from the first 1000 rows. (default: `[]`)
* `header` - Does the file have a header of column names as the first row or not? (default: `true`)
* `max_rows` - Maximum number of lines to read. (default: `nil`)
* `null_character` - The string that should be interpreted as a nil value. (default: `"NA"`)
* `skip_rows` - The number of lines to skip at the beginning of the file. (default: `0`)
* `columns` - A list of column names or indexes to keep. If present, only these columns are read into the dataframe. (default: `nil`)
* `infer_schema_length` Maximum number of rows read for schema inference. Setting this to nil will do a full table scan and will be slow (default: `1000`).
* `parse_dates` - Automatically try to parse dates/ datetimes and time. If parsing fails, columns remain of dtype `string`
"""
@doc type: :io
@spec from_csv(filename :: String.t(), opts :: Keyword.t()) ::
{:ok, DataFrame.t()} | {:error, term()}
def from_csv(filename, opts \\ []) do
opts =
Keyword.validate!(opts,
delimiter: ",",
dtypes: [],
encoding: "utf8",
header: true,
max_rows: nil,
null_character: "NA",
skip_rows: 0,
columns: nil,
infer_schema_length: @default_infer_schema_length,
parse_dates: false
)
backend = backend_from_options!(opts)
backend.from_csv(
filename,
check_dtypes!(opts[:dtypes]),
opts[:delimiter],
opts[:null_character],
opts[:skip_rows],
opts[:header],
opts[:encoding],
opts[:max_rows],
opts[:columns],
opts[:infer_schema_length],
opts[:parse_dates]
)
end
@doc """
Similar to `from_csv/2` but raises if there is a problem reading the CSV.
"""
@doc type: :io
@spec from_csv!(filename :: String.t(), opts :: Keyword.t()) :: DataFrame.t()
def from_csv!(filename, opts \\ []) do
case from_csv(filename, opts) do
{:ok, df} -> df
{:error, error} -> raise "#{error}"
end
end
@doc """
Reads a parquet file into a dataframe.
"""
@doc type: :io
@spec from_parquet(filename :: String.t(), opts :: Keyword.t()) ::
{:ok, DataFrame.t()} | {:error, term()}
def from_parquet(filename, opts \\ []) do
backend = backend_from_options!(opts)
backend.from_parquet(filename)
end
@doc """
Writes a dataframe to a parquet file.
"""
@doc type: :io
@spec to_parquet(df :: DataFrame.t(), filename :: String.t()) ::
{:ok, String.t()} | {:error, term()}
def to_parquet(df, filename) do
Shared.apply_impl(df, :to_parquet, [filename])
end
@doc """
Reads an IPC file into a dataframe.
## Options
* `columns` - List with the name or index of columns to be selected. Defaults to all columns.
"""
@doc type: :io
@spec from_ipc(filename :: String.t()) :: {:ok, DataFrame.t()} | {:error, term()}
def from_ipc(filename, opts \\ []) do
opts =
Keyword.validate!(opts,
columns: nil
)
backend = backend_from_options!(opts)
backend.from_ipc(
filename,
opts[:columns]
)
end
@doc """
Similar to `from_ipc/2` but raises if there is a problem reading the IPC file.
"""
@doc type: :io
@spec from_ipc!(filename :: String.t(), opts :: Keyword.t()) :: DataFrame.t()
def from_ipc!(filename, opts \\ []) do
case from_ipc(filename, opts) do
{:ok, df} -> df
{:error, error} -> raise "#{error}"
end
end
@doc """
Writes a dataframe to a IPC file.
Apache IPC is a language-agnostic columnar data structure that can be used to store data frames.
It excels as a format for quickly exchange data between different programming languages.
## Options
* `compression` - Sets the algorithm used to compress the IPC file.
It accepts `"ZSTD"` or `"LZ4"` compression. (default: `nil`)
"""
@doc type: :io
@spec to_ipc(df :: DataFrame.t(), filename :: String.t()) ::
{:ok, String.t()} | {:error, term()}
def to_ipc(df, filename, opts \\ []) do
opts =
Keyword.validate!(opts,
compression: nil
)
backend = backend_from_options!(opts)
backend.to_ipc(
df,
filename,
opts[:compression]
)
end
@doc """
Writes a dataframe to a delimited file.
## Options
* `header` - Should the column names be written as the first line of the file? (default: `true`)
* `delimiter` - A single character used to separate fields within a record. (default: `","`)
"""
@doc type: :io
@spec to_csv(df :: DataFrame.t(), filename :: String.t(), opts :: Keyword.t()) ::
{:ok, String.t()} | {:error, term()}
def to_csv(df, filename, opts \\ []) do
opts = Keyword.validate!(opts, header: true, delimiter: ",")
Shared.apply_impl(df, :to_csv, [filename, opts[:header], opts[:delimiter]])
end
@doc """
Similar to `to_csv/3` but raises if there is a problem reading the CSV.
"""
@doc type: :io
@spec to_csv!(df :: DataFrame.t(), filename :: String.t(), opts :: Keyword.t()) :: String.t()
def to_csv!(df, filename, opts \\ []) do
case to_csv(df, filename, opts) do
{:ok, filename} -> filename
{:error, error} -> raise "#{error}"
end
end
@doc """
Read a file of JSON objects or lists separated by new lines
## Options
* `batch_size` - Sets the batch size for reading rows.
This value may have significant impact in performance, so adjust it for your needs (default: `1000`).
* `infer_schema_length` - Maximum number of rows read for schema inference.
Setting this to nil will do a full table scan and will be slow (default: `1000`).
"""
@doc type: :io
@spec from_ndjson(filename :: String.t(), opts :: Keyword.t()) ::
{:ok, DataFrame.t()} | {:error, term()}
def from_ndjson(filename, opts \\ []) do
opts =
Keyword.validate!(opts,
batch_size: 1000,
infer_schema_length: @default_infer_schema_length
)
backend = backend_from_options!(opts)
backend.from_ndjson(
filename,
opts[:infer_schema_length],
opts[:batch_size]
)
end
@doc """
Writes a dataframe to a ndjson file.
"""
@doc type: :io
@spec to_ndjson(df :: DataFrame.t(), filename :: String.t()) ::
{:ok, String.t()} | {:error, term()}
def to_ndjson(df, filename) do
Shared.apply_impl(df, :to_ndjson, [filename])
end
@doc """
Writes a dataframe to a binary representation of a delimited file.
## Options
* `header` - Should the column names be written as the first line of the file? (default: `true`)
* `delimiter` - A single character used to separate fields within a record. (default: `","`)
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> df |> Explorer.DataFrame.head(2) |> Explorer.DataFrame.dump_csv()
"year,country,total,solid_fuel,liquid_fuel,gas_fuel,cement,gas_flaring,per_capita,bunker_fuels\\n2010,AFGHANISTAN,2308,627,1601,74,5,0,0.08,9\\n2010,ALBANIA,1254,117,953,7,177,0,0.43,7\\n"
"""
@doc type: :io
@spec dump_csv(df :: DataFrame.t(), opts :: Keyword.t()) :: String.t()
def dump_csv(df, opts \\ []) do
opts = Keyword.validate!(opts, header: true, delimiter: ",")
Shared.apply_impl(df, :dump_csv, [opts[:header], opts[:delimiter]])
end
## Conversion
@doc """
Converts the dataframe to the lazy version of the current backend.
If already lazy, this is a noop.
"""
@spec to_lazy(df :: DataFrame.t()) :: DataFrame.t()
def to_lazy(df), do: Shared.apply_impl(df, :to_lazy)
@doc """
This collects the lazy data frame into an eager one, computing the query.
If already eager, this is a noop.
"""
@spec collect(df :: DataFrame.t()) :: {:ok, DataFrame.t()} | {:error, term()}
def collect(df), do: Shared.apply_impl(df, :collect)
@doc """
Creates a new dataframe.
Accepts any tabular data adhering to the `Table.Reader` protocol, as well as a map or a keyword list with series.
## Options
* `backend` - The Explorer backend to use. Defaults to the value returned by `Explorer.Backend.get/0`.
## Examples
From series:
iex> Explorer.DataFrame.new(%{floats: Explorer.Series.from_list([1.0, 2.0]), ints: Explorer.Series.from_list([1, nil])})
#Explorer.DataFrame<
Polars[2 x 2]
floats float [1.0, 2.0]
ints integer [1, nil]
>
From columnar data:
iex> Explorer.DataFrame.new(%{floats: [1.0, 2.0], ints: [1, nil]})
#Explorer.DataFrame<
Polars[2 x 2]
floats float [1.0, 2.0]
ints integer [1, nil]
>
iex> Explorer.DataFrame.new(floats: [1.0, 2.0], ints: [1, nil])
#Explorer.DataFrame<
Polars[2 x 2]
floats float [1.0, 2.0]
ints integer [1, nil]
>
iex> Explorer.DataFrame.new(%{floats: [1.0, 2.0], ints: [1, "wrong"]})
** (ArgumentError) cannot create series "ints": the value "wrong" does not match the inferred series dtype :integer
From row data:
iex> rows = [%{id: 1, name: "José"}, %{id: 2, name: "Christopher"}, %{id: 3, name: "Cristine"}]
iex> Explorer.DataFrame.new(rows)
#Explorer.DataFrame<
Polars[3 x 2]
id integer [1, 2, 3]
name string ["José", "Christopher", "Cristine"]
>
iex> rows = [[id: 1, name: "José"], [id: 2, name: "Christopher"], [id: 3, name: "Cristine"]]
iex> Explorer.DataFrame.new(rows)
#Explorer.DataFrame<
Polars[3 x 2]
id integer [1, 2, 3]
name string ["José", "Christopher", "Cristine"]
>
"""
@doc type: :single
@spec new(
Table.Reader.t() | series_pairs,
opts :: Keyword.t()
) :: DataFrame.t()
when series_pairs: %{column_name() => Series.t()} | [{column_name(), Series.t()}]
def new(data, opts \\ []) do
backend = backend_from_options!(opts)
case data do
%DataFrame{data: %^backend{}} ->
data
data ->
case classify_data(data) do
{:series, series} -> backend.from_series(series)
{:other, tabular} -> backend.from_tabular(tabular)
end
end
end
defp classify_data([{_, %Series{}} | _] = data), do: {:series, data}
defp classify_data(%_{} = data), do: {:other, data}
defp classify_data(data) when is_map(data) do
case :maps.next(:maps.iterator(data)) do
{_key, %Series{}, _} -> {:series, data}
_ -> {:other, data}
end
end
defp classify_data(data), do: {:other, data}
@doc """
Converts a dataframe to a list of columns with lists as values.
See `to_series/2` if you want a list of columns with series as values.
## Options
* `:atom_keys` - Configure if the resultant map should have atom keys. (default: `false`)
## Examples
iex> df = Explorer.DataFrame.new(ints: [1, nil], floats: [1.0, 2.0])
iex> Explorer.DataFrame.to_columns(df)
%{"floats" => [1.0, 2.0], "ints" => [1, nil]}
iex> df = Explorer.DataFrame.new(floats: [1.0, 2.0], ints: [1, nil])
iex> Explorer.DataFrame.to_columns(df, atom_keys: true)
%{floats: [1.0, 2.0], ints: [1, nil]}
"""
@doc type: :single
@spec to_columns(df :: DataFrame.t(), Keyword.t()) :: map()
def to_columns(df, opts \\ []) do
opts = Keyword.validate!(opts, atom_keys: false)
atom_keys = opts[:atom_keys]
for name <- names(df), into: %{} do
series = Shared.apply_impl(df, :pull, [name])
key = if atom_keys, do: String.to_atom(name), else: name
{key, Series.to_list(series)}
end
end
@doc """
Converts a dataframe to a list of columns with series as values.
See `to_columns/2` if you want a list of columns with lists as values.
## Options
* `:atom_keys` - Configure if the resultant map should have atom keys. (default: `false`)
## Examples
iex> df = Explorer.DataFrame.new(ints: [1, nil], floats: [1.0, 2.0])
iex> map = Explorer.DataFrame.to_series(df)
iex> Explorer.Series.to_list(map["floats"])
[1.0, 2.0]
iex> Explorer.Series.to_list(map["ints"])
[1, nil]
"""
@doc type: :single
@spec to_series(df :: DataFrame.t(), Keyword.t()) :: map()
def to_series(df, opts \\ []) do
opts = Keyword.validate!(opts, atom_keys: false)
atom_keys = opts[:atom_keys]
for name <- names(df), into: %{} do
key = if atom_keys, do: String.to_atom(name), else: name
{key, Shared.apply_impl(df, :pull, [name])}
end
end
@doc """
Converts a dataframe to a list of maps (rows).
> #### Warning {: .warning}
>
> This may be an expensive operation because `polars` stores data in columnar format.
## Options
* `:atom_keys` - Configure if the resultant maps should have atom keys. (default: `false`)
## Examples
iex> df = Explorer.DataFrame.new(floats: [1.0, 2.0], ints: [1, nil])
iex> Explorer.DataFrame.to_rows(df)
[%{"floats" => 1.0, "ints" => 1}, %{"floats" => 2.0 ,"ints" => nil}]
iex> df = Explorer.DataFrame.new(floats: [1.0, 2.0], ints: [1, nil])
iex> Explorer.DataFrame.to_rows(df, atom_keys: true)
[%{floats: 1.0, ints: 1}, %{floats: 2.0, ints: nil}]
"""
@doc type: :single
@spec to_rows(df :: DataFrame.t(), Keyword.t()) :: [map()]
def to_rows(df, opts \\ []) do
opts = Keyword.validate!(opts, atom_keys: false)
Shared.apply_impl(df, :to_rows, [opts[:atom_keys]])
end
# Introspection
@doc """
Gets the names of the dataframe columns.
## Examples
iex> df = Explorer.DataFrame.new(floats: [1.0, 2.0], ints: [1, 2])
iex> Explorer.DataFrame.names(df)
["floats", "ints"]
"""
@doc type: :introspection
@spec names(df :: DataFrame.t()) :: [String.t()]
def names(df), do: Shared.apply_impl(df, :names)
@doc """
Gets the dtypes of the dataframe columns.
## Examples
iex> df = Explorer.DataFrame.new(floats: [1.0, 2.0], ints: [1, 2])
iex> Explorer.DataFrame.dtypes(df)
[:float, :integer]
"""
@doc type: :introspection
@spec dtypes(df :: DataFrame.t()) :: [atom()]
def dtypes(df), do: Shared.apply_impl(df, :dtypes)
@doc """
Gets the shape of the dataframe as a `{height, width}` tuple.
## Examples
iex> df = Explorer.DataFrame.new(floats: [1.0, 2.0, 3.0], ints: [1, 2, 3])
iex> Explorer.DataFrame.shape(df)
{3, 2}
"""
@doc type: :introspection
@spec shape(df :: DataFrame.t()) :: {integer(), integer()}
def shape(df), do: Shared.apply_impl(df, :shape)
@doc """
Returns the number of rows in the dataframe.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.n_rows(df)
1094
"""
@doc type: :introspection
@spec n_rows(df :: DataFrame.t()) :: integer()
def n_rows(df), do: Shared.apply_impl(df, :n_rows)
@doc """
Returns the number of columns in the dataframe.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.n_columns(df)
10
"""
@doc type: :introspection
@spec n_columns(df :: DataFrame.t()) :: integer()
def n_columns(df), do: Shared.apply_impl(df, :n_columns)
@doc """
Returns the groups of a dataframe.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> df = Explorer.DataFrame.group_by(df, "country")
iex> Explorer.DataFrame.groups(df)
["country"]
"""
@doc type: :introspection
@spec groups(df :: DataFrame.t()) :: list(String.t())
def groups(%DataFrame{groups: groups}), do: groups
# Single table verbs
@doc """
Returns the first *n* rows of the dataframe.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.head(df)
#Explorer.DataFrame<
Polars[5 x 10]
year integer [2010, 2010, 2010, 2010, 2010]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA"]
total integer [2308, 1254, 32500, 141, 7924]
solid_fuel integer [627, 117, 332, 0, 0]
liquid_fuel integer [1601, 953, 12381, 141, 3649]
gas_fuel integer [74, 7, 14565, 0, 374]
cement integer [5, 177, 2598, 0, 204]
gas_flaring integer [0, 0, 2623, 0, 3697]
per_capita float [0.08, 0.43, 0.9, 1.68, 0.37]
bunker_fuels integer [9, 7, 663, 0, 321]
>
"""
@doc type: :single
@spec head(df :: DataFrame.t(), nrows :: integer()) :: DataFrame.t()
def head(df, nrows \\ 5), do: Shared.apply_impl(df, :head, [nrows])
@doc """
Returns the last *n* rows of the dataframe.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.tail(df)
#Explorer.DataFrame<
Polars[5 x 10]
year integer [2014, 2014, 2014, 2014, 2014]
country string ["VIET NAM", "WALLIS AND FUTUNA ISLANDS", "YEMEN", "ZAMBIA", "ZIMBABWE"]
total integer [45517, 6, 6190, 1228, 3278]
solid_fuel integer [19246, 0, 137, 132, 2097]
liquid_fuel integer [12694, 6, 5090, 797, 1005]
gas_fuel integer [5349, 0, 581, 0, 0]
cement integer [8229, 0, 381, 299, 177]
gas_flaring integer [0, 0, 0, 0, 0]
per_capita float [0.49, 0.44, 0.24, 0.08, 0.22]
bunker_fuels integer [761, 1, 153, 33, 9]
>
"""
@doc type: :single
@spec tail(df :: DataFrame.t(), nrows :: integer()) :: DataFrame.t()
def tail(df, nrows \\ 5), do: Shared.apply_impl(df, :tail, [nrows])
@doc """
Selects a subset of columns by name.
Can optionally return all *but* the named columns if `:drop` is passed as the last argument.
## Examples
You can select columns with a list of names:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.select(df, ["a"])
#Explorer.DataFrame<
Polars[3 x 1]
a string ["a", "b", "c"]
>
You can also use a range or a list of integers:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3], c: [4, 5, 6])
iex> Explorer.DataFrame.select(df, [0, 1])
#Explorer.DataFrame<
Polars[3 x 2]
a string ["a", "b", "c"]
b integer [1, 2, 3]
>
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3], c: [4, 5, 6])
iex> Explorer.DataFrame.select(df, 0..1)
#Explorer.DataFrame<
Polars[3 x 2]
a string ["a", "b", "c"]
b integer [1, 2, 3]
>
Or you can use a callback function that takes the dataframe's names as its first argument:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.select(df, &String.starts_with?(&1, "b"))
#Explorer.DataFrame<
Polars[3 x 1]
b integer [1, 2, 3]
>
If you pass `:drop` as the third argument, it will return all but the named columns:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.select(df, ["b"], :drop)
#Explorer.DataFrame<
Polars[3 x 1]
a string ["a", "b", "c"]
>
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3], c: [4, 5, 6])
iex> Explorer.DataFrame.select(df, ["a", "b"], :drop)
#Explorer.DataFrame<
Polars[3 x 1]
c integer [4, 5, 6]
>
"""
@doc type: :single
@spec select(
df :: DataFrame.t(),
columns_or_callback :: columns() | function(),
keep_or_drop ::
:keep | :drop
) :: DataFrame.t()
def select(df, columns_or_callback, keep_or_drop \\ :keep)
def select(df, callback, keep_or_drop) when is_function(callback),
do: df |> names() |> Enum.filter(callback) |> then(&select(df, &1, keep_or_drop))
def select(df, columns, keep_or_drop) do
columns = to_existing_columns(df, columns)
Shared.apply_impl(df, :select, [columns, keep_or_drop])
end
@doc """
Subset rows using column values.
## Examples
You can pass a mask directly:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.filter(df, Explorer.Series.greater(df["b"], 1))
#Explorer.DataFrame<
Polars[2 x 2]
a string ["b", "c"]
b integer [2, 3]
>
You can combine masks using `Explorer.Series.and/2` or `Explorer.Series.or/2`:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> b_gt = Explorer.Series.greater(df["b"], 1)
iex> a_eq = Explorer.Series.equal(df["a"], "b")
iex> Explorer.DataFrame.filter(df, Explorer.Series.and(a_eq, b_gt))
#Explorer.DataFrame<
Polars[1 x 2]
a string ["b"]
b integer [2]
>
Including a list:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.filter(df, [false, true, false])
#Explorer.DataFrame<
Polars[1 x 2]
a string ["b"]
b integer [2]
>
Or you can invoke a callback on the dataframe:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.filter(df, &Explorer.Series.greater(&1["b"], 1))
#Explorer.DataFrame<
Polars[2 x 2]
a string ["b", "c"]
b integer [2, 3]
>
"""
@doc type: :single
@spec filter(df :: DataFrame.t(), mask :: Series.t() | [boolean()]) :: DataFrame.t()
def filter(df, %Series{} = mask) do
s_len = Series.size(mask)
df_len = n_rows(df)
case s_len == df_len do
false ->
raise(
ArgumentError,
"size of the mask (#{s_len}) must match number of rows in the dataframe (#{df_len})"
)
true ->
Shared.apply_impl(df, :filter, [mask])
end
end
def filter(df, mask) when is_list(mask), do: mask |> Series.from_list() |> then(&filter(df, &1))
@spec filter(df :: DataFrame.t(), callback :: function()) :: DataFrame.t()
def filter(df, callback) when is_function(callback),
do:
df
|> callback.()
|> then(
&filter(
df,
&1
)
)
@doc """
Creates and modifies columns.
Columns are added with keyword list or maps. New variables overwrite existing variables of the
same name. Column names are coerced from atoms to strings.
## Examples
You can pass in a list directly as a new column:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.mutate(df, c: [4, 5, 6])
#Explorer.DataFrame<
Polars[3 x 3]
a string ["a", "b", "c"]
b integer [1, 2, 3]
c integer [4, 5, 6]
>
Or you can pass in a series:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> s = Explorer.Series.from_list([4, 5, 6])
iex> Explorer.DataFrame.mutate(df, c: s)
#Explorer.DataFrame<
Polars[3 x 3]
a string ["a", "b", "c"]
b integer [1, 2, 3]
c integer [4, 5, 6]
>
Or you can invoke a callback on the dataframe:
iex> df = Explorer.DataFrame.new(a: [4, 5, 6], b: [1, 2, 3])
iex> Explorer.DataFrame.mutate(df, c: &Explorer.Series.add(&1["a"], &1["b"]))
#Explorer.DataFrame<
Polars[3 x 3]
a integer [4, 5, 6]
b integer [1, 2, 3]
c integer [5, 7, 9]
>
You can overwrite existing columns:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.mutate(df, a: [4, 5, 6])
#Explorer.DataFrame<
Polars[3 x 2]
a integer [4, 5, 6]
b integer [1, 2, 3]
>
Scalar values are repeated to fill the series:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.mutate(df, a: 4)
#Explorer.DataFrame<
Polars[3 x 2]
a integer [4, 4, 4]
b integer [1, 2, 3]
>
Including when a callback returns a scalar:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.mutate(df, a: &Explorer.Series.max(&1["b"]))
#Explorer.DataFrame<
Polars[3 x 2]
a integer [3, 3, 3]
b integer [1, 2, 3]
>
Alternatively, all of the above works with a map instead of a keyword list:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "c"], b: [1, 2, 3])
iex> Explorer.DataFrame.mutate(df, %{"c" => [4, 5, 6]})
#Explorer.DataFrame<
Polars[3 x 3]
a string ["a", "b", "c"]
b integer [1, 2, 3]
c integer [4, 5, 6]
>
"""
@doc type: :single
@spec mutate(df :: DataFrame.t(), columns :: column_pairs(any())) ::
DataFrame.t()
def mutate(df, columns) when is_column_pairs(columns) do
pairs = to_column_pairs(df, columns)
Shared.apply_impl(df, :mutate, [Map.new(pairs)])
end
@doc """
Arranges/sorts rows by columns.
## Examples
A single column name will sort ascending by that column:
iex> df = Explorer.DataFrame.new(a: ["b", "c", "a"], b: [1, 2, 3])
iex> Explorer.DataFrame.arrange(df, "a")
#Explorer.DataFrame<
Polars[3 x 2]
a string ["a", "b", "c"]
b integer [3, 1, 2]
>
You can also sort descending:
iex> df = Explorer.DataFrame.new(a: ["b", "c", "a"], b: [1, 2, 3])
iex> Explorer.DataFrame.arrange(df, desc: "a")
#Explorer.DataFrame<
Polars[3 x 2]
a string ["c", "b", "a"]
b integer [2, 1, 3]
>
Sorting by more than one column sorts them in the order they are entered:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.arrange(df, asc: "total", desc: "country")
#Explorer.DataFrame<
Polars[1094 x 10]
year integer [2010, 2012, 2011, 2013, 2014, ...]
country string ["ZIMBABWE", "ZIMBABWE", "ZIMBABWE", "ZIMBABWE", "ZIMBABWE", ...]
total integer [2121, 2125, 2608, 3184, 3278, ...]
solid_fuel integer [1531, 917, 1584, 1902, 2097, ...]
liquid_fuel integer [481, 1006, 888, 1119, 1005, ...]
gas_fuel integer [0, 0, 0, 0, 0, ...]
cement integer [109, 201, 136, 162, 177, ...]
gas_flaring integer [0, 0, 0, 0, 0, ...]
per_capita float [0.15, 0.15, 0.18, 0.21, 0.22, ...]
bunker_fuels integer [7, 9, 8, 9, 9, ...]
>
"""
@doc type: :single
@spec arrange(
df :: DataFrame.t(),
columns ::
column() | [column() | {:asc | :desc, column()}]
) :: DataFrame.t()
def arrange(df, columns) when is_list(columns) do
{dirs, columns} =
Enum.map(columns, fn
{dir, column} when dir in [:asc, :desc] and is_column(column) ->
{dir, column}
column when is_column(column) ->
{:asc, column}
other ->
raise ArgumentError, "not a valid column or arrange instruction: #{inspect(other)}"
end)
|> Enum.unzip()
columns = to_existing_columns(df, columns)
Shared.apply_impl(df, :arrange, [Enum.zip(dirs, columns)])
end
def arrange(df, column) when is_column(column), do: arrange(df, [column])
@doc """
Takes distinct rows by a selection of columns.
## Examples
By default will return unique values of the requested columns:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.distinct(df, columns: ["year", "country"])
#Explorer.DataFrame<
Polars[1094 x 2]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
>
If `keep_all?` is set to `true`, then the first value of each column not in the requested
columns will be returned:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.distinct(df, columns: ["year", "country"], keep_all?: true)
#Explorer.DataFrame<
Polars[1094 x 10]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
total integer [2308, 1254, 32500, 141, 7924, ...]
solid_fuel integer [627, 117, 332, 0, 0, ...]
liquid_fuel integer [1601, 953, 12381, 141, 3649, ...]
gas_fuel integer [74, 7, 14565, 0, 374, ...]
cement integer [5, 177, 2598, 0, 204, ...]
gas_flaring integer [0, 0, 2623, 0, 3697, ...]
per_capita float [0.08, 0.43, 0.9, 1.68, 0.37, ...]
bunker_fuels integer [9, 7, 663, 0, 321, ...]
>
A callback on the dataframe's names can be passed instead of a list (like `select/3`):
iex> df = Explorer.DataFrame.new(x1: [1, 3, 3], x2: ["a", "c", "c"], y1: [1, 2, 3])
iex> Explorer.DataFrame.distinct(df, columns: &String.starts_with?(&1, "x"))
#Explorer.DataFrame<
Polars[2 x 2]
x1 integer [1, 3]
x2 string ["a", "c"]
>
"""
@doc type: :single
@spec distinct(df :: DataFrame.t(), opts :: Keyword.t()) :: DataFrame.t()
def distinct(df, opts \\ [])
def distinct(df, opts) do
opts = Keyword.validate!(opts, columns: nil, keep_all?: false)
columns =
case opts[:columns] do
nil ->
names(df)
callback when is_function(callback) ->
Enum.filter(names(df), callback)
columns ->
to_existing_columns(df, columns)
end
if columns != [] do
Shared.apply_impl(df, :distinct, [columns, opts[:keep_all?]])
else
df
end
end
@doc """
Drop nil values.
Optionally accepts a subset of columns.
## Examples
iex> df = Explorer.DataFrame.new(a: [1, 2, nil], b: [1, nil, 3])
iex> Explorer.DataFrame.drop_nil(df)
#Explorer.DataFrame<
Polars[1 x 2]
a integer [1]
b integer [1]
>
iex> df = Explorer.DataFrame.new(a: [1, 2, nil], b: [1, nil, 3], c: [nil, 5, 6])
iex> Explorer.DataFrame.drop_nil(df, [:a, :c])
#Explorer.DataFrame<
Polars[1 x 3]
a integer [2]
b integer [nil]
c integer [5]
>
iex> df = Explorer.DataFrame.new(a: [1, 2, nil], b: [1, nil, 3], c: [nil, 5, 6])
iex> Explorer.DataFrame.drop_nil(df, 0..1)
#Explorer.DataFrame<
Polars[1 x 3]
a integer [1]
b integer [1]
c integer [nil]
>
"""
@doc type: :single
@spec drop_nil(df :: DataFrame.t(), columns_or_column :: column() | columns()) ::
DataFrame.t()
def drop_nil(df, columns_or_column \\ 0..-1)
def drop_nil(df, column) when is_column(column), do: drop_nil(df, [column])
def drop_nil(df, columns) do
columns = to_existing_columns(df, columns)
Shared.apply_impl(df, :drop_nil, [columns])
end
@doc """
Renames columns.
To apply a function to a subset of columns, see `rename_with/3`.
## Examples
You can pass in a list of new names:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "a"], b: [1, 3, 1])
iex> Explorer.DataFrame.rename(df, ["c", "d"])
#Explorer.DataFrame<
Polars[3 x 2]
c string ["a", "b", "a"]
d integer [1, 3, 1]
>
Or you can rename individual columns using keyword args:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "a"], b: [1, 3, 1])
iex> Explorer.DataFrame.rename(df, a: "first")
#Explorer.DataFrame<
Polars[3 x 2]
first string ["a", "b", "a"]
b integer [1, 3, 1]
>
Or you can rename individual columns using a map:
iex> df = Explorer.DataFrame.new(a: ["a", "b", "a"], b: [1, 3, 1])
iex> Explorer.DataFrame.rename(df, %{"a" => "first"})
#Explorer.DataFrame<
Polars[3 x 2]
first string ["a", "b", "a"]
b integer [1, 3, 1]
>
"""
@doc type: :single
@spec rename(
df :: DataFrame.t(),
names :: column_names() | column_pairs(column_name())
) ::
DataFrame.t()
def rename(df, [name | _] = names) when is_column_name(name) do
new_names = to_column_names(names)
check_new_names_length!(df, new_names)
Shared.apply_impl(df, :rename, [new_names])
end
def rename(df, names) when is_column_pairs(names) do
pairs = to_column_pairs(df, names, &to_column_name(&1))
old_names = names(df)
for {name, _} <- pairs do
maybe_raise_column_not_found(old_names, name)
end
pairs_map = Map.new(pairs)
old_names
|> Enum.map(fn name -> Map.get(pairs_map, name, name) end)
|> then(&rename(df, &1))
end
defp check_new_names_length!(df, names) do
width = n_columns(df)
n_new_names = length(names)
if width != n_new_names,
do:
raise(
ArgumentError,
"list of new names must match the number of columns in the dataframe; found " <>
"#{n_new_names} new name(s), but the supplied dataframe has #{width} column(s)"
)
end
@doc """
Renames columns with a function.
## Examples
If no columns are specified, it will apply the function to all column names:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.rename_with(df, &String.upcase/1)
#Explorer.DataFrame<
Polars[1094 x 10]
YEAR integer [2010, 2010, 2010, 2010, 2010, ...]
COUNTRY string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
TOTAL integer [2308, 1254, 32500, 141, 7924, ...]
SOLID_FUEL integer [627, 117, 332, 0, 0, ...]
LIQUID_FUEL integer [1601, 953, 12381, 141, 3649, ...]
GAS_FUEL integer [74, 7, 14565, 0, 374, ...]
CEMENT integer [5, 177, 2598, 0, 204, ...]
GAS_FLARING integer [0, 0, 2623, 0, 3697, ...]
PER_CAPITA float [0.08, 0.43, 0.9, 1.68, 0.37, ...]
BUNKER_FUELS integer [9, 7, 663, 0, 321, ...]
>
A callback can be used to filter the column names that will be renamed, similarly to `select/3`:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.rename_with(df, &String.ends_with?(&1, "_fuel"), &String.trim_trailing(&1, "_fuel"))
#Explorer.DataFrame<
Polars[1094 x 10]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
total integer [2308, 1254, 32500, 141, 7924, ...]
solid integer [627, 117, 332, 0, 0, ...]
liquid integer [1601, 953, 12381, 141, 3649, ...]
gas integer [74, 7, 14565, 0, 374, ...]
cement integer [5, 177, 2598, 0, 204, ...]
gas_flaring integer [0, 0, 2623, 0, 3697, ...]
per_capita float [0.08, 0.43, 0.9, 1.68, 0.37, ...]
bunker_fuels integer [9, 7, 663, 0, 321, ...]
>
Or you can just pass in the list of column names you'd like to apply the function to:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.rename_with(df, ["total", "cement"], &String.upcase/1)
#Explorer.DataFrame<
Polars[1094 x 10]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
TOTAL integer [2308, 1254, 32500, 141, 7924, ...]
solid_fuel integer [627, 117, 332, 0, 0, ...]
liquid_fuel integer [1601, 953, 12381, 141, 3649, ...]
gas_fuel integer [74, 7, 14565, 0, 374, ...]
CEMENT integer [5, 177, 2598, 0, 204, ...]
gas_flaring integer [0, 0, 2623, 0, 3697, ...]
per_capita float [0.08, 0.43, 0.9, 1.68, 0.37, ...]
bunker_fuels integer [9, 7, 663, 0, 321, ...]
>
"""
@doc type: :single
@spec rename_with(
df :: DataFrame.t(),
columns :: columns() | function(),
callback :: function()
) ::
DataFrame.t()
def rename_with(df, columns \\ 0..-1, callback)
def rename_with(df, columns, callback) when is_function(callback) and is_function(columns) do
case df |> names() |> Enum.filter(columns) do
[column | _] = columns when is_column(column) ->
rename_with(df, columns, callback)
[] ->
df
end
end
def rename_with(df, 0..-1//1, callback) when is_function(callback) do
df
|> names()
|> Enum.map(callback)
|> then(&rename(df, &1))
end
def rename_with(df, columns, callback) when is_function(callback) do
columns = to_existing_columns(df, columns)
df
|> names()
|> Enum.map(fn name -> if name in columns, do: callback.(name), else: name end)
|> then(&rename(df, &1))
end
@doc """
Turns a set of columns to dummy variables.
## Examples
iex> df = Explorer.DataFrame.new(a: ["a", "b", "a", "c"], b: ["b", "a", "b", "d"])
iex> Explorer.DataFrame.dummies(df, ["a"])
#Explorer.DataFrame<
Polars[4 x 3]
a_a integer [1, 0, 1, 0]
a_b integer [0, 1, 0, 0]
a_c integer [0, 0, 0, 1]
>
iex> df = Explorer.DataFrame.new(a: ["a", "b", "a", "c"], b: ["b", "a", "b", "d"])
iex> Explorer.DataFrame.dummies(df, ["a", "b"])
#Explorer.DataFrame<
Polars[4 x 6]
a_a integer [1, 0, 1, 0]
a_b integer [0, 1, 0, 0]
a_c integer [0, 0, 0, 1]
b_a integer [0, 1, 0, 0]
b_b integer [1, 0, 1, 0]
b_d integer [0, 0, 0, 1]
>
"""
@doc type: :single
def dummies(df, columns),
do: Shared.apply_impl(df, :dummies, [to_existing_columns(df, columns)])
@doc """
Extracts a single column as a series.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.pull(df, "total")
#Explorer.Series<
integer[1094]
[2308, 1254, 32500, 141, 7924, 41, 143, 51246, 1150, 684, 106589, 18408, 8366, 451, 7981, 16345, 403, 17192, 30222, 147, 1388, 166, 133, 5802, 1278, 114468, 47, 2237, 12030, 535, 58, 1367, 145806, 152, 152, 72, 141, 19703, 2393248, 20773, 44, 540, 19, 2064, 1900, 5501, 10465, 2102, 30428, 18122, ...]
>
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.pull(df, 2)
#Explorer.Series<
integer[1094]
[2308, 1254, 32500, 141, 7924, 41, 143, 51246, 1150, 684, 106589, 18408, 8366, 451, 7981, 16345, 403, 17192, 30222, 147, 1388, 166, 133, 5802, 1278, 114468, 47, 2237, 12030, 535, 58, 1367, 145806, 152, 152, 72, 141, 19703, 2393248, 20773, 44, 540, 19, 2064, 1900, 5501, 10465, 2102, 30428, 18122, ...]
>
"""
@doc type: :single
@spec pull(df :: DataFrame.t(), column :: column()) :: Series.t()
def pull(df, column) when is_column(column) do
[column] = to_existing_columns(df, [column])
Shared.apply_impl(df, :pull, [column])
end
@doc """
Subset a continuous set of rows.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.slice(df, 1, 2)
#Explorer.DataFrame<
Polars[2 x 10]
year integer [2010, 2010]
country string ["ALBANIA", "ALGERIA"]
total integer [1254, 32500]
solid_fuel integer [117, 332]
liquid_fuel integer [953, 12381]
gas_fuel integer [7, 14565]
cement integer [177, 2598]
gas_flaring integer [0, 2623]
per_capita float [0.43, 0.9]
bunker_fuels integer [7, 663]
>
Negative offsets count from the end of the series:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.slice(df, -10, 2)
#Explorer.DataFrame<
Polars[2 x 10]
year integer [2014, 2014]
country string ["UNITED STATES OF AMERICA", "URUGUAY"]
total integer [1432855, 1840]
solid_fuel integer [450047, 2]
liquid_fuel integer [576531, 1700]
gas_fuel integer [390719, 25]
cement integer [11314, 112]
gas_flaring integer [4244, 0]
per_capita float [4.43, 0.54]
bunker_fuels integer [30722, 251]
>
If the length would run past the end of the dataframe, the result may be shorter than the length:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.slice(df, -10, 20)
#Explorer.DataFrame<
Polars[10 x 10]
year integer [2014, 2014, 2014, 2014, 2014, ...]
country string ["UNITED STATES OF AMERICA", "URUGUAY", "UZBEKISTAN", "VANUATU", "VENEZUELA", ...]
total integer [1432855, 1840, 28692, 42, 50510, ...]
solid_fuel integer [450047, 2, 1677, 0, 204, ...]
liquid_fuel integer [576531, 1700, 2086, 42, 28445, ...]
gas_fuel integer [390719, 25, 23929, 0, 12731, ...]
cement integer [11314, 112, 1000, 0, 1088, ...]
gas_flaring integer [4244, 0, 0, 0, 8042, ...]
per_capita float [4.43, 0.54, 0.97, 0.16, 1.65, ...]
bunker_fuels integer [30722, 251, 0, 10, 1256, ...]
>
"""
@doc type: :single
def slice(df, offset, length), do: Shared.apply_impl(df, :slice, [offset, length])
@doc """
Subset rows with a list of indices.
## Examples
iex> df = Explorer.DataFrame.new(a: [1, 2, 3], b: ["a", "b", "c"])
iex> Explorer.DataFrame.take(df, [0, 2])
#Explorer.DataFrame<
Polars[2 x 2]
a integer [1, 3]
b string ["a", "c"]
>
"""
@doc type: :single
def take(df, row_indices) when is_list(row_indices) do
n_rows = n_rows(df)
Enum.each(row_indices, fn idx ->
if idx > n_rows or idx < -n_rows,
do:
raise(
ArgumentError,
"requested row index (#{idx}) out of bounds (-#{n_rows}:#{n_rows})"
)
end)
Shared.apply_impl(df, :take, [row_indices])
end
@doc """
Sample rows from a dataframe.
If given an integer as the second argument, it will return N samples. If given a float, it will
return that proportion of the series.
Can sample with or without replacement.
## Options
* `replacement` - If set to `true`, each sample will be independent and therefore values may repeat.
Required to be `true` for `n` greater then the number of rows in the dataframe or `frac` > 1.0. (default: `false`)
* `seed` - An integer to be used as a random seed. If nil, a random value between 1 and 1e12 will be used. (default: nil)
## Examples
You can sample N rows:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.sample(df, 3, seed: 100)
#Explorer.DataFrame<
Polars[3 x 10]
year integer [2012, 2012, 2013]
country string ["ZIMBABWE", "NICARAGUA", "NIGER"]
total integer [2125, 1260, 529]
solid_fuel integer [917, 0, 93]
liquid_fuel integer [1006, 1176, 432]
gas_fuel integer [0, 0, 0]
cement integer [201, 84, 4]
gas_flaring integer [0, 0, 0]
per_capita float [0.15, 0.21, 0.03]
bunker_fuels integer [9, 18, 19]
>
Or you can sample a proportion of rows:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.sample(df, 0.03, seed: 100)
#Explorer.DataFrame<
Polars[33 x 10]
year integer [2013, 2012, 2013, 2012, 2010, ...]
country string ["BAHAMAS", "POLAND", "SLOVAKIA", "MOZAMBIQUE", "OMAN", ...]
total integer [764, 81792, 9024, 851, 12931, ...]
solid_fuel integer [1, 53724, 3657, 11, 0, ...]
liquid_fuel integer [763, 17353, 2090, 632, 2331, ...]
gas_fuel integer [0, 8544, 2847, 47, 9309, ...]
cement integer [0, 2165, 424, 161, 612, ...]
gas_flaring integer [0, 6, 7, 0, 679, ...]
per_capita float [2.02, 2.12, 1.67, 0.03, 4.39, ...]
bunker_fuels integer [167, 573, 34, 56, 1342, ...]
>
"""
@doc type: :single
@spec sample(df :: DataFrame.t(), n_or_frac :: number(), opts :: Keyword.t()) :: DataFrame.t()
def sample(df, n_or_frac, opts \\ [])
def sample(df, n, opts) when is_integer(n) do
opts = Keyword.validate!(opts, replacement: false, seed: Enum.random(1..1_000_000_000_000))
n_rows = n_rows(df)
case {n > n_rows, opts[:replacement]} do
{true, false} ->
raise ArgumentError,
"in order to sample more rows than are in the dataframe (#{n_rows}), sampling " <>
"`replacement` must be true"
_ ->
:ok
end
Shared.apply_impl(df, :sample, [n, opts[:replacement], opts[:seed]])
end
def sample(df, frac, opts) when is_float(frac) do
n_rows = n_rows(df)
n = round(frac * n_rows)
sample(df, n, opts)
end
@doc """
Pivot data from wide to long.
`Explorer.DataFrame.pivot_longer/3` "lengthens" data, increasing the number of rows and
decreasing the number of columns. The inverse transformation is
`Explorer.DataFrame.pivot_wider/4`.
The second argument (`columns`) can be either an array of column names to use or a filter callback on
the dataframe's names.
`value_columns` must all have the same dtype.
## Options
* `value_columns` - Columns to use for values. May be a filter callback on the dataframe's column names. Defaults to an empty list, using all variables except the columns to pivot.
* `names_to` - A string specifying the name of the column to create from the data stored in the column names of the dataframe. Defaults to `"variable"`.
* `values_to` - A string specifying the name of the column to create from the data stored in series element values. Defaults to `"value"`.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.pivot_longer(df, ["year", "country"], value_columns: &String.ends_with?(&1, "fuel"))
#Explorer.DataFrame<
Polars[3282 x 4]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
variable string ["solid_fuel", "solid_fuel", "solid_fuel", "solid_fuel", "solid_fuel", ...]
value integer [627, 117, 332, 0, 0, ...]
>
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.pivot_longer(df, ["year", "country"], value_columns: ["total"])
#Explorer.DataFrame<
Polars[1094 x 4]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
variable string ["total", "total", "total", "total", "total", ...]
value integer [2308, 1254, 32500, 141, 7924, ...]
>
"""
@doc type: :single
@spec pivot_longer(
df :: DataFrame.t(),
columns :: columns() | function(),
opts :: Keyword.t()
) :: DataFrame.t()
def pivot_longer(df, columns, opts \\ [])
def pivot_longer(df, columns, opts) when is_function(columns),
do:
df
|> names()
|> Enum.filter(columns)
|> then(&pivot_longer(df, &1, opts))
def pivot_longer(df, columns, opts) do
opts = Keyword.validate!(opts, value_columns: [], names_to: "variable", values_to: "value")
existing_columns = to_existing_columns(df, columns)
names = names(df)
dtypes = names |> Enum.zip(dtypes(df)) |> Enum.into(%{})
value_columns =
case opts[:value_columns] do
[] ->
Enum.filter(names, fn name -> name not in columns end)
[_ | _] = columns ->
Enum.each(columns, fn column ->
if column in existing_columns,
do:
raise(
ArgumentError,
"value columns may not also be ID columns but found #{column} in both"
)
end)
columns
callback when is_function(callback) ->
Enum.filter(names, fn name -> name not in columns && callback.(name) end)
end
value_columns = to_existing_columns(df, value_columns)
dtypes
|> Map.take(value_columns)
|> Map.values()
|> Enum.uniq()
|> length()
|> case do
1 ->
:ok
_ ->
raise ArgumentError,
"value columns may only include one dtype but found multiple dtypes"
end
Shared.apply_impl(df, :pivot_longer, [
columns,
value_columns,
opts[:names_to],
opts[:values_to]
])
end
@doc """
Pivot data from long to wide.
`Explorer.DataFrame.pivot_wider/4` "widens" data, increasing the number of columns and
decreasing the number of rows. The inverse transformation is
`Explorer.DataFrame.pivot_longer/3`.
Due to a restriction upstream, `values_from` must be a numeric type.
## Options
* `id_columns` - A set of columns that uniquely identifies each observation.
Defaults to all columns in data except for the columns specified in `names_from` and `values_from`.
Typically used when you have redundant variables, i.e. variables whose values are perfectly correlated
with existing variables. May accept a filter callback, a list or a range of column names.
Default value is `0..-1`. If an empty list is passed, or a range that results in a empty list of
column names, it raises an error.
* `names_prefix` - String added to the start of every variable name.
This is particularly useful if `names_from` is a numeric vector and you want to create syntactic variable names.
## Examples
iex> df = Explorer.DataFrame.new(id: [1, 1], variable: ["a", "b"], value: [1, 2])
iex> Explorer.DataFrame.pivot_wider(df, "variable", "value")
#Explorer.DataFrame<
Polars[1 x 3]
id integer [1]
a integer [1]
b integer [2]
>
"""
@doc type: :single
@spec pivot_wider(
df :: DataFrame.t(),
names_from :: column(),
values_from :: column(),
opts ::
Keyword.t()
) :: DataFrame.t()
def pivot_wider(df, names_from, values_from, opts \\ []) do
opts = Keyword.validate!(opts, id_columns: 0..-1, names_prefix: "")
[values_from, names_from] = to_existing_columns(df, [values_from, names_from])
names = names(df)
dtypes = names |> Enum.zip(dtypes(df)) |> Enum.into(%{})
case Map.get(dtypes, values_from) do
dtype when dtype in [:integer, :float, :date, :datetime] ->
:ok
dtype ->
raise ArgumentError, "the values_from column must be numeric, but found #{dtype}"
end
id_columns =
case opts[:id_columns] do
fun when is_function(fun) ->
Enum.filter(names, fn name -> fun.(name) && name not in [names_from, values_from] end)
names ->
names = to_existing_columns(df, names)
Enum.filter(names, &(&1 not in [names_from, values_from]))
end
if id_columns == [] do
raise ArgumentError,
"id_columns must select at least one existing column, but #{inspect(opts[:id_columns])} selects none"
end
Shared.apply_impl(df, :pivot_wider, [id_columns, names_from, values_from, opts[:names_prefix]])
end
# Two table verbs
@doc """
Join two tables.
## Join types
* `inner` - Returns all rows from `left` where there are matching values in `right`, and all columns from `left` and `right`.
* `left` - Returns all rows from `left` and all columns from `left` and `right`. Rows in `left` with no match in `right` will have `nil` values in the new columns.
* `right` - Returns all rows from `right` and all columns from `left` and `right`. Rows in `right` with no match in `left` will have `nil` values in the new columns.
* `outer` - Returns all rows and all columns from both `left` and `right`. Where there are not matching values, returns `nil` for the one missing.
* `cross` - Also known as a cartesian join. Returns all combinations of `left` and `right`. Can be very computationally expensive.
## Options
* `on` - The columns to join on. Defaults to overlapping columns. Does not apply to cross join.
* `how` - One of the join types (as an atom) described above. Defaults to `:inner`.
## Examples
Inner join:
iex> left = Explorer.DataFrame.new(a: [1, 2, 3], b: ["a", "b", "c"])
iex> right = Explorer.DataFrame.new(a: [1, 2, 2], c: ["d", "e", "f"])
iex> Explorer.DataFrame.join(left, right)
#Explorer.DataFrame<
Polars[3 x 3]
a integer [1, 2, 2]
b string ["a", "b", "b"]
c string ["d", "e", "f"]
>
Left join:
iex> left = Explorer.DataFrame.new(a: [1, 2, 3], b: ["a", "b", "c"])
iex> right = Explorer.DataFrame.new(a: [1, 2, 2], c: ["d", "e", "f"])
iex> Explorer.DataFrame.join(left, right, how: :left)
#Explorer.DataFrame<
Polars[4 x 3]
a integer [1, 2, 2, 3]
b string ["a", "b", "b", "c"]
c string ["d", "e", "f", nil]
>
Right join:
iex> left = Explorer.DataFrame.new(a: [1, 2, 3], b: ["a", "b", "c"])
iex> right = Explorer.DataFrame.new(a: [1, 2, 4], c: ["d", "e", "f"])
iex> Explorer.DataFrame.join(left, right, how: :right)
#Explorer.DataFrame<
Polars[3 x 3]
a integer [1, 2, 4]
c string ["d", "e", "f"]
b string ["a", "b", nil]
>
Outer join:
iex> left = Explorer.DataFrame.new(a: [1, 2, 3], b: ["a", "b", "c"])
iex> right = Explorer.DataFrame.new(a: [1, 2, 4], c: ["d", "e", "f"])
iex> Explorer.DataFrame.join(left, right, how: :outer)
#Explorer.DataFrame<
Polars[4 x 3]
a integer [1, 2, 4, 3]
b string ["a", "b", nil, "c"]
c string ["d", "e", "f", nil]
>
Cross join:
iex> left = Explorer.DataFrame.new(a: [1, 2, 3], b: ["a", "b", "c"])
iex> right = Explorer.DataFrame.new(a: [1, 2, 4], c: ["d", "e", "f"])
iex> Explorer.DataFrame.join(left, right, how: :cross)
#Explorer.DataFrame<
Polars[9 x 4]
a integer [1, 1, 1, 2, 2, ...]
b string ["a", "a", "a", "b", "b", ...]
a_right integer [1, 2, 4, 1, 2, ...]
c string ["d", "e", "f", "d", "e", ...]
>
Inner join with different names:
iex> left = Explorer.DataFrame.new(a: [1, 2, 3], b: ["a", "b", "c"])
iex> right = Explorer.DataFrame.new(d: [1, 2, 2], c: ["d", "e", "f"])
iex> Explorer.DataFrame.join(left, right, on: [{"a", "d"}])
#Explorer.DataFrame<
Polars[3 x 3]
a integer [1, 2, 2]
b string ["a", "b", "b"]
c string ["d", "e", "f"]
>
"""
@doc type: :multi
@spec join(left :: DataFrame.t(), right :: DataFrame.t(), opts :: Keyword.t()) :: DataFrame.t()
def join(%DataFrame{} = left, %DataFrame{} = right, opts \\ []) do
left_columns = names(left)
right_columns = names(right)
opts =
Keyword.validate!(opts,
on: find_overlapping_columns(left_columns, right_columns),
how: :inner
)
{on, how} =
case {opts[:on], opts[:how]} do
{on, :cross} ->
{on, :cross}
{[], _} ->
raise(ArgumentError, "could not find any overlapping columns")
{[_ | _] = on, how} ->
on =
Enum.map(on, fn
{l_name, r_name} ->
[l_column] = to_existing_columns(left, [l_name])
[r_column] = to_existing_columns(right, [r_name])
{l_column, r_column}
name ->
[l_column] = to_existing_columns(left, [name])
[r_column] = to_existing_columns(right, [name])
# This is an edge case for when an index is passed as column selection
if l_column != r_column do
raise ArgumentError,
"the column given to option `:on` is not the same for both dataframes"
end
l_column
end)
{on, how}
other ->
other
end
Shared.apply_impl(left, :join, [right, on, how])
end
defp find_overlapping_columns(left_columns, right_columns) do
left_columns = MapSet.new(left_columns)
right_columns = MapSet.new(right_columns)
left_columns |> MapSet.intersection(right_columns) |> MapSet.to_list()
end
@doc """
Combine two or more dataframes row-wise (stack).
Column names and dtypes must match. The only exception is for numeric
columns that can be mixed together, and casted automatically to float columns.
## Examples
iex> df1 = Explorer.DataFrame.new(x: [1, 2, 3], y: ["a", "b", "c"])
iex> df2 = Explorer.DataFrame.new(x: [4, 5, 6], y: ["d", "e", "f"])
iex> Explorer.DataFrame.concat_rows([df1, df2])
#Explorer.DataFrame<
Polars[6 x 2]
x integer [1, 2, 3, 4, 5, ...]
y string ["a", "b", "c", "d", "e", ...]
>
iex> df1 = Explorer.DataFrame.new(x: [1, 2, 3], y: ["a", "b", "c"])
iex> df2 = Explorer.DataFrame.new(x: [4.2, 5.3, 6.4], y: ["d", "e", "f"])
iex> Explorer.DataFrame.concat_rows([df1, df2])
#Explorer.DataFrame<
Polars[6 x 2]
x float [1.0, 2.0, 3.0, 4.2, 5.3, ...]
y string ["a", "b", "c", "d", "e", ...]
>
"""
@doc type: :multi
def concat_rows([%DataFrame{} | _t] = dfs) do
changed_types = compute_changed_types_concat_rows(dfs)
if Enum.empty?(changed_types) do
Shared.apply_impl(dfs, :concat_rows)
else
dfs
|> cast_numeric_columns_to_float(changed_types)
|> Shared.apply_impl(:concat_rows)
end
end
defp compute_changed_types_concat_rows([head | tail]) do
types = Map.new(Enum.zip(names(head), dtypes(head)))
Enum.reduce(tail, %{}, fn df, changed_types ->
if n_columns(df) != map_size(types) do
raise ArgumentError,
"dataframes must have the same columns"
end
Enum.reduce(Enum.zip(names(df), dtypes(df)), changed_types, fn {name, type},
changed_types ->
cond do
not Map.has_key?(types, name) ->
raise ArgumentError,
"dataframes must have the same columns"
types[name] == type ->
changed_types
types_are_numeric_compatible?(types, name, type) ->
Map.put(changed_types, name, :float)
true ->
raise ArgumentError,
"columns and dtypes must be identical for all dataframes"
end
end)
end)
end
defp types_are_numeric_compatible?(types, name, type) do
numeric_types = [:float, :integer]
types[name] != type and types[name] in numeric_types and type in numeric_types
end
defp cast_numeric_columns_to_float(dfs, changed_types) do
for df <- dfs do
columns =
for {name, :integer} <- Enum.zip(names(df), dtypes(df)),
changed_types[name] == :float,
do: name
if Enum.empty?(columns) do
df
else
changes = for column <- columns, into: %{}, do: {column, Series.cast(df[column], :float)}
mutate(df, changes)
end
end
end
@doc """
Combine two dataframes row-wise.
`concat_rows(df1, df2)` is equivalent to `concat_rows([df1, df2])`.
"""
@doc type: :multi
def concat_rows(%DataFrame{} = df1, %DataFrame{} = df2), do: concat_rows([df1, df2])
def concat_rows(%DataFrame{} = df, [%DataFrame{} | _] = dfs), do: concat_rows([df | dfs])
# Groups
@doc """
Group the dataframe by one or more variables.
When the dataframe has grouping variables, operations are performed per group.
`Explorer.DataFrame.ungroup/2` removes grouping.
## Examples
You can group by a single variable:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.group_by(df, "country")
#Explorer.DataFrame<
Polars[1094 x 10]
Groups: ["country"]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
total integer [2308, 1254, 32500, 141, 7924, ...]
solid_fuel integer [627, 117, 332, 0, 0, ...]
liquid_fuel integer [1601, 953, 12381, 141, 3649, ...]
gas_fuel integer [74, 7, 14565, 0, 374, ...]
cement integer [5, 177, 2598, 0, 204, ...]
gas_flaring integer [0, 0, 2623, 0, 3697, ...]
per_capita float [0.08, 0.43, 0.9, 1.68, 0.37, ...]
bunker_fuels integer [9, 7, 663, 0, 321, ...]
>
Or you can group by multiple:
iex> df = Explorer.Datasets.fossil_fuels()
iex> Explorer.DataFrame.group_by(df, ["country", "year"])
#Explorer.DataFrame<
Polars[1094 x 10]
Groups: ["country", "year"]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
total integer [2308, 1254, 32500, 141, 7924, ...]
solid_fuel integer [627, 117, 332, 0, 0, ...]
liquid_fuel integer [1601, 953, 12381, 141, 3649, ...]
gas_fuel integer [74, 7, 14565, 0, 374, ...]
cement integer [5, 177, 2598, 0, 204, ...]
gas_flaring integer [0, 0, 2623, 0, 3697, ...]
per_capita float [0.08, 0.43, 0.9, 1.68, 0.37, ...]
bunker_fuels integer [9, 7, 663, 0, 321, ...]
>
"""
@doc type: :single
@spec group_by(df :: DataFrame.t(), groups_or_group :: [String.t()] | String.t()) ::
DataFrame.t()
def group_by(df, groups) when is_list(groups) do
names = names(df)
Enum.each(groups, fn name -> maybe_raise_column_not_found(names, name) end)
Shared.apply_impl(df, :group_by, [groups])
end
def group_by(df, group) when is_binary(group), do: group_by(df, [group])
@doc """
Removes grouping variables.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> df = Explorer.DataFrame.group_by(df, ["country", "year"])
iex> Explorer.DataFrame.ungroup(df, ["country"])
#Explorer.DataFrame<
Polars[1094 x 10]
Groups: ["year"]
year integer [2010, 2010, 2010, 2010, 2010, ...]
country string ["AFGHANISTAN", "ALBANIA", "ALGERIA", "ANDORRA", "ANGOLA", ...]
total integer [2308, 1254, 32500, 141, 7924, ...]
solid_fuel integer [627, 117, 332, 0, 0, ...]
liquid_fuel integer [1601, 953, 12381, 141, 3649, ...]
gas_fuel integer [74, 7, 14565, 0, 374, ...]
cement integer [5, 177, 2598, 0, 204, ...]
gas_flaring integer [0, 0, 2623, 0, 3697, ...]
per_capita float [0.08, 0.43, 0.9, 1.68, 0.37, ...]
bunker_fuels integer [9, 7, 663, 0, 321, ...]
>
"""
@doc type: :single
@spec ungroup(df :: DataFrame.t(), groups_or_group :: [String.t()] | String.t()) ::
DataFrame.t()
def ungroup(df, groups \\ [])
def ungroup(df, groups) when is_list(groups) do
current_groups = groups(df)
Enum.each(groups, fn group ->
if group not in current_groups,
do:
raise(
ArgumentError,
"could not find #{group} in current groups (#{current_groups})"
)
end)
Shared.apply_impl(df, :ungroup, [groups])
end
def ungroup(df, group) when is_binary(group), do: ungroup(df, [group])
@supported_aggs ~w[min max sum mean median first last count n_unique]a
@doc """
Summarise each group to a single row.
Implicitly ungroups.
## Supported operations
The following aggregations may be performed:
* `:min` - Take the minimum value within the group. See `Explorer.Series.min/1`.
* `:max` - Take the maximum value within the group. See `Explorer.Series.max/1`.
* `:sum` - Take the sum of the series within the group. See `Explorer.Series.sum/1`.
* `:mean` - Take the mean of the series within the group. See `Explorer.Series.mean/1`.
* `:median` - Take the median of the series within the group. See `Explorer.Series.median/1`.
* `:first` - Take the first value within the group. See `Explorer.Series.first/1`.
* `:last` - Take the last value within the group. See `Explorer.Series.last/1`.
* `:count` - Count the number of rows per group.
* `:n_unique` - Count the number of unique rows per group.
## Examples
iex> df = Explorer.Datasets.fossil_fuels()
iex> df |> Explorer.DataFrame.group_by("year") |> Explorer.DataFrame.summarise(total: [:max, :min], country: [:n_unique])
#Explorer.DataFrame<
Polars[5 x 4]
year integer [2010, 2011, 2012, 2013, 2014]
country_n_unique integer [217, 217, 220, 220, 220]
total_max integer [2393248, 2654360, 2734817, 2797384, 2806634]
total_min integer [1, 2, 2, 2, 3]
>
"""
@doc type: :single
@spec summarise(df :: DataFrame.t(), columns :: Keyword.t() | map()) :: DataFrame.t()
def summarise(%DataFrame{groups: []}, _),
do:
raise(
ArgumentError,
"dataframe must be grouped in order to perform summarisation"
)
def summarise(df, columns) when is_column_pairs(columns) do
column_pairs =
to_column_pairs(df, columns, fn values ->
case values -- @supported_aggs do
[] ->
values
unsupported ->
raise ArgumentError, "found unsupported aggregations #{inspect(unsupported)}"
end
end)
Shared.apply_impl(df, :summarise, [Map.new(column_pairs)])
end
@doc """
Display the DataFrame in a tabular fashion.
## Examples
df = Explorer.Datasets.iris()
Explorer.DataFrame.table(df)
"""
@doc type: :single
def table(df, nrow \\ 5) when nrow >= 0 do
{rows, columns} = shape(df)
headers = names(df)
df = slice(df, 0, nrow)
types =
df
|> dtypes()
|> Enum.map(&"\n<#{Atom.to_string(&1)}>")
values =
headers
|> Enum.map(&Series.to_list(df[&1]))
|> Enum.zip_with(& &1)
name_type = Enum.zip_with(headers, types, fn x, y -> x <> y end)
TableRex.Table.new()
|> TableRex.Table.put_title("Explorer DataFrame: [rows: #{rows}, columns: #{columns}]")
|> TableRex.Table.put_header(name_type)
|> TableRex.Table.put_header_meta(0..columns, align: :center)
|> TableRex.Table.add_rows(values)
|> TableRex.Table.render!(
header_separator_symbol: "=",
horizontal_style: :all
)
|> IO.puts()
end
# Helpers
defp backend_from_options!(opts) do
backend = Explorer.Shared.backend_from_options!(opts) || Explorer.Backend.get()
:"#{backend}.DataFrame"
end
defp maybe_raise_column_not_found(names, name) do
if name not in names,
do:
raise(
ArgumentError,
List.to_string(["could not find column name \"#{name}\""] ++ did_you_mean(name, names))
)
end
@threshold 0.77
@max_suggestions 5
defp did_you_mean(missing_key, available_keys) do
suggestions =
for key <- available_keys,
distance = String.jaro_distance(missing_key, key),
distance >= @threshold,
do: {distance, key}
case suggestions do
[] -> []
suggestions -> [". Did you mean:\n\n" | format_suggestions(suggestions)]
end
end
defp format_suggestions(suggestions) do
suggestions
|> Enum.sort(&(elem(&1, 0) >= elem(&2, 0)))
|> Enum.take(@max_suggestions)
|> Enum.sort(&(elem(&1, 1) <= elem(&2, 1)))
|> Enum.map(fn {_, key} -> [" * ", inspect(key), ?\n] end)
end
defimpl Inspect do
import Inspect.Algebra
def inspect(df, opts) do
force_unfit(
concat([
color("#Explorer.DataFrame<", :map, opts),
nest(
concat([line(), Shared.apply_impl(df, :inspect, [opts])]),
2
),
line(),
color(">", :map, opts)
])
)
end
end
end
defimpl Table.Reader, for: Explorer.DataFrame do
def init(df) do
columns = Explorer.DataFrame.names(df)
data =
Enum.map(columns, fn column ->
df
|> Explorer.DataFrame.pull(column)
|> Explorer.Series.to_enum()
end)
{:columns, %{columns: columns}, data}
end
end
|
lib/explorer/data_frame.ex
| 0.904395 | 0.844985 |
data_frame.ex
|
starcoder
|
defmodule Bow.Exec do
@moduledoc """
Transform files with shell commands
This module allows executing any external command taking care of temporary path generation and error handling.
It is as reliable as [erlexec](https://github.com/saleyn/erlexec) module (very!).
It is also possible to provide custom command timeout. See `exec/4` to see all available options.
"""
@type command :: [String.t() | {:input, integer} | :input | :output]
defp default_timeout, do: Application.get_env(:bow, :exec_timeout, 15_000)
@doc """
Execute command
Arguments:
- `source` - source file to be transformed
- `target_name` - target file
- `command` - the command to be executed. Placeholders `${input}` and `${output}` will be replaced with source and target paths
Options:
- `:timeout` - time in which the command must return. If it's exceeded the command process will be killed.
Examples
# generate image thumbnail from first page of pdf
def transform(file, :pdf_thumbnail) do
Bow.Exec.exec file, filename(file, :pdf_thumbnail),
"convert '${input}[0]' -strip -gravity North -background '#ffffff'" <>
" -resize 250x175^ -extent 250x175 -format png png:${output}"
end
"""
@spec exec(Bow.t(), Bow.t(), command, keyword) :: {:ok, Bow.t()} | {:error, any}
def exec(source, target, command, opts \\ []) do
timeout = opts[:timeout] || default_timeout()
source_path = source.path
target_path = Plug.Upload.random_file!("bow-exec") <> target.ext
cmd =
command
|> Enum.map(fn
{:input, idx} when is_integer(idx) -> "#{source_path}[#{idx}]"
:input -> source_path
:output -> target_path
arg -> arg
end)
|> Enum.map(&to_charlist/1)
trapping(fn ->
case :exec.run_link(cmd, stdout: self(), stderr: self()) do
{:ok, pid, ospid} ->
case wait_for_exit(pid, ospid, timeout) do
{:ok, output} ->
if File.exists?(target_path) do
{:ok, Bow.set(target, :path, target_path)}
else
{:error, reason: :file_not_found, output: output, exit_code: 0, cmd: cmd}
end
{:error, exit_code, output} ->
{:error, output: output, exit_code: exit_code, cmd: cmd}
end
error ->
error
end
end)
end
defp trapping(fun) do
trap = Process.flag(:trap_exit, true)
result = fun.()
Process.flag(:trap_exit, trap)
result
end
defp wait_for_exit(pid, ospid, timout) do
receive do
{:EXIT, ^pid, :normal} -> {:ok, receive_output(ospid)}
{:EXIT, ^pid, {:exit_status, code}} -> {:error, code, receive_output(ospid)}
after
timout ->
:exec.stop_and_wait(pid, 2000)
{:error, :timeout, receive_output(ospid)}
end
end
defp receive_output(ospid, output \\ []) do
receive do
{:stdout, ^ospid, data} -> receive_output(ospid, [output, data])
{:stderr, ^ospid, data} -> receive_output(ospid, [output, data])
after
0 -> output |> to_string
end
end
end
|
lib/bow/exec.ex
| 0.831793 | 0.451266 |
exec.ex
|
starcoder
|
defmodule Sanbase.Clickhouse.Uniswap.MetricAdapter do
@behaviour Sanbase.Metric.Behaviour
import Sanbase.Utils.Transform
alias Sanbase.Transfers.Erc20Transfers
require Sanbase.Utils.Config, as: Config
@aggregations [:sum]
@timeseries_metrics []
@histogram_metrics ["uniswap_top_claimers"]
@table_metrics []
@metrics @histogram_metrics ++ @timeseries_metrics ++ @table_metrics
@access_map Enum.into(@metrics, %{}, fn metric -> {metric, :restricted} end)
@min_plan_map Enum.into(@metrics, %{}, fn metric -> {metric, :free} end)
@free_metrics Enum.filter(@access_map, &match?({_, :free}, &1)) |> Enum.map(&elem(&1, 0))
@restricted_metrics Enum.filter(@access_map, &match?({_, :restricted}, &1))
|> Enum.map(&elem(&1, 0))
@required_selectors Enum.into(@metrics, %{}, &{&1, []})
@default_complexity_weight 0.3
defp address_ordered_table(), do: Config.module_get(Erc20Transfers, :address_ordered_table)
@impl Sanbase.Metric.Behaviour
def has_incomplete_data?(_), do: false
@impl Sanbase.Metric.Behaviour
def complexity_weight(_), do: @default_complexity_weight
@impl Sanbase.Metric.Behaviour
def required_selectors(), do: @required_selectors
@impl Sanbase.Metric.Behaviour
def timeseries_data(_metric, _selector, _from, _to, _interval, _opts) do
{:error, "Timeseries data is not implemented for uniswap metrics."}
end
@impl Sanbase.Metric.Behaviour
def histogram_data("uniswap_top_claimers", %{slug: "uniswap"}, from, to, _interval, limit) do
query = """
SELECT
to AS address,
amount AS value
FROM (
SELECT
to,
SUM(value)/1e18 AS amount
FROM #{address_ordered_table()} FINAL
PREWHERE
assetRefId = (SELECT asset_ref_id FROM asset_metadata FINAL WHERE name = 'uniswap' LIMIT 1) AND
from = '0x090d4613473dee047c3f2706764f49e0821d256e' AND
dt >= toDateTime(?1) AND
dt < toDateTime(?2)
GROUP BY to
ORDER BY amount DESC
LIMIT ?3
)
"""
args = [from |> DateTime.to_unix(), to |> DateTime.to_unix(), limit]
Sanbase.ClickhouseRepo.query_transform(query, args, fn [address, value] ->
%{address: address, value: value}
end)
|> maybe_add_balances(from, to)
|> maybe_apply_function(fn data -> Enum.sort_by(data, & &1.value, :desc) end)
end
@impl Sanbase.Metric.Behaviour
def aggregated_timeseries_data(_metric, _selector, _from, _to, _opts) do
{:error, "Aggregated timeseries data is not implemented for uniswap metrics."}
end
@impl Sanbase.Metric.Behaviour
def slugs_by_filter(_metric, _from, _to, _operator, _threshold, _opts) do
{:error, "Slugs filtering is not implemented for uniswap metrics."}
end
@impl Sanbase.Metric.Behaviour
def slugs_order(_metric, _from, _to, _direction, _opts) do
{:error, "Slugs ordering is not implemented for uniswap metrics."}
end
@impl Sanbase.Metric.Behaviour
def first_datetime(_metric, _slug) do
{:ok, ~U[2020-09-14 00:00:00Z]}
end
@impl Sanbase.Metric.Behaviour
def last_datetime_computed_at(_metric, _slug) do
{:ok, Timex.now()}
end
@impl Sanbase.Metric.Behaviour
def metadata(metric) do
{:ok,
%{
metric: metric,
min_interval: "1h",
default_aggregation: :sum,
available_aggregations: @aggregations,
available_selectors: [:slug],
data_type: :timeseries,
complexity_weight: @default_complexity_weight
}}
end
@impl Sanbase.Metric.Behaviour
def human_readable_name(metric) do
case metric do
"uniswap_top_claimers" ->
{:ok, "Uniswap Top Claimers"}
end
end
@impl Sanbase.Metric.Behaviour
def available_aggregations(), do: @aggregations
@impl Sanbase.Metric.Behaviour
def available_timeseries_metrics(), do: @timeseries_metrics
@impl Sanbase.Metric.Behaviour
def available_histogram_metrics(), do: @histogram_metrics
@impl Sanbase.Metric.Behaviour
def available_table_metrics(), do: @table_metrics
@impl Sanbase.Metric.Behaviour
def available_metrics(), do: @metrics
@impl Sanbase.Metric.Behaviour
def available_metrics(%{slug: slug}) do
case slug do
"uniswap" -> {:ok, @metrics}
_ -> {:ok, []}
end
end
@impl Sanbase.Metric.Behaviour
def available_slugs() do
{:ok, ["uniswap"]}
end
@impl Sanbase.Metric.Behaviour
def available_slugs(metric) when metric in @metrics do
available_slugs()
end
@impl Sanbase.Metric.Behaviour
def free_metrics(), do: @free_metrics
@impl Sanbase.Metric.Behaviour
def restricted_metrics(), do: @restricted_metrics
@impl Sanbase.Metric.Behaviour
def access_map(), do: @access_map
@impl Sanbase.Metric.Behaviour
def min_plan_map(), do: @min_plan_map
# Private functions
defp maybe_add_balances({:ok, data}, _from, to) do
addresses = Enum.map(data, & &1.address)
{:ok, balances} = Sanbase.Balance.last_balance_before(addresses, "uniswap", to)
data =
Enum.map(data, fn %{address: address} = elem ->
Map.put(elem, :balance, Map.get(balances, address))
end)
{:ok, data}
end
defp maybe_add_balances({:error, error}, _from, _to), do: {:error, error}
end
|
lib/sanbase/clickhouse/uniswap/metric_adapter.ex
| 0.786828 | 0.407628 |
metric_adapter.ex
|
starcoder
|
defmodule StructAccess do
@moduledoc """
Provides a standard callback implementation for the `Access` behaviour.
Implements the following callbacks for the struct where this module is used:
- `c:Access.fetch/2`
- `c:Access.get_and_update/3`
- `c:Access.pop/2`
To define these callback and include the proper behavior all you have to do
is add `use StructAccess` to the module defining your struct.
Adding
```
use StructAccess
```
to a module is equivalent to adding the following to that module:
```
@behaviour Access
@impl Access
def fetch(struct, key), do: StructAccess.fetch(struct, key)
@impl Access
def get_and_update(struct, key, fun) when is_function(fun, 1) do
StructAccess.get_and_update(struct, key, fun)
end
@impl Access
def pop(struct, key, default \\\\ nil) do
StructAccess.pop(struct, key, default)
end
defoverridable Access
```
This module is simply a shortcut to avoid that boilerplate.
If any of the implementations in `StructAccess` are not sufficient, they all
can be overridden.
"""
@behaviour Access
defmacro __using__(_opts) do
quote do
@behaviour Access
@impl Access
def fetch(struct, key), do: StructAccess.fetch(struct, key)
@impl Access
def get_and_update(struct, key, fun) when is_function(fun, 1) do
StructAccess.get_and_update(struct, key, fun)
end
@impl Access
def pop(struct, key, default \\ nil) do
StructAccess.pop(struct, key, default)
end
defoverridable Access
end
end
@doc """
Retrieves the given key from the given struct.
Implements the `c:Access.fetch/2` callback.
"""
def fetch(struct, key), do: Map.fetch(struct, key)
@doc """
Retrieves the given key from the given struct with a default.
"""
def get(struct, key, default \\ nil), do: Map.get(struct, key, default)
@doc """
Retrives the given key from the given struct and updates it at the same time.
Implements the `c:Access.get_and_update/3` callback.
"""
def get_and_update(struct, key, fun) when is_function(fun, 1) do
current = get(struct, key)
case fun.(current) do
{get, update} ->
{get, Map.put(struct, key, update)}
:pop ->
pop(struct, key)
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
@doc """
Pops the given key from the given struct. As struct keys can't be deleted
this simply sets the value of the popped key to `nil`.
Implements the `c:Access.pop/2` callback.
"""
def pop(struct, key, default \\ nil) do
case fetch(struct, key) do
{:ok, old_value} ->
{old_value, Map.put(struct, key, nil)}
:error ->
{default, struct}
end
end
end
|
lib/struct_access.ex
| 0.925078 | 0.878105 |
struct_access.ex
|
starcoder
|
defmodule Ppc.Common do
@doc """
To update a paypal entity we need to provide each mutated field with correct annotations.
The plan:
- compare fields of old entity with new one. Obtain a list of entries
(aka. update-list or update-triplets):
- operation (one of {add|remove|replace})
- field-name
- new-value (if applicable)
- consume triplets and create update operations; that will be the request body.
-> It's this function implementation.
- make request to the api
An update operation requires each field to be annotated with mutation kind that will
be performed:
- add
- remove
- replace
When no change is made omit the operation
Everything else will invalidate the request.
"""
def extract_field_changes(prev, next, accepted_fields) do
ensure_str = fn x -> if is_atom(x), do: Atom.to_string(x), else: x end
prev = Map.new(prev, fn {k, v} -> {ensure_str.(k), v} end)
map_to_op = fn key, v_next ->
key_a = ensure_str.(key)
v_prev = Map.get(prev, key_a, "")
case {v_prev, v_next} do
{"", ""} ->
nil
{"", _} ->
{"add", key, v_next}
{_, ""} ->
{"remove", key, nil}
_ ->
{"replace", key, v_next}
end
end
oplist =
next
|> Enum.map(fn {k, v} -> map_to_op.(k, v) end)
|> Enum.filter(&(!is_nil(&1)))
|> Enum.filter(fn {_op, k, _v} -> k in accepted_fields end)
oplist
end
@doc """
Paypal accepts changes as a list of objects with fields: 'op', 'path', and 'value'.
In Elixir we can construct it as following list:
[%{op: "remove", path: "/a"}, %{op: "replace", path: "/b", value: 3}]
"""
def construct_update_operations(triplets) do
path_normalize = fn p ->
path = if is_atom(p), do: Atom.to_string(p), else: String.replace(p, ".", "/")
"/" <> path
end
map_to_dict = fn triplet ->
case triplet do
{op, k, v} ->
%{op: op, path: path_normalize.(k), value: v}
{remove, k} when remove in [:remove, "remove"] ->
%{op: "remove", path: path_normalize.(k)}
end
end
Enum.map(triplets, &map_to_dict.(&1))
end
def construct_headers_for_create(opts) do
headers = []
headers = if opts[:mini], do: headers ++ [{"Prefer", "return=minimal"}], else: headers
headers = if opts[:full], do: headers ++ [{"Prefer", "return=representation"}], else: headers
headers = if opts[:idem], do: headers ++ [{"PayPal-Request-Id", opts[:idem]}], else: headers
headers
end
@doc """
Iterates a map ensuring each value that is an atom is converted to an uppercase string.
Does not visit nested maps.
"""
@spec normalize_atom_values(map) :: map
def normalize_atom_values(map) do
ensure_upcase_str = fn x ->
if is_atom(x) && !is_boolean(x), do: Atom.to_string(x) |> String.upcase(), else: x
end
Map.new(map, fn {k, v} -> {k, ensure_upcase_str.(v)} end)
end
def to_map_if_struct(x) do
if is_struct(x), do: Map.from_struct(x), else: x
end
@spec flat_keys(map) :: map
def flat_keys(map) do
consume_map(map, %{})
end
defp consume_map(map, acc_in, path \\ nil) do
Enum.reduce(map, acc_in, fn {k, v}, acc ->
# We need all keys to be strings
path_next = if is_atom(k), do: Atom.to_string(k), else: k
path_next = if is_nil(path), do: path_next, else: Enum.join([path, path_next], ".")
if is_map(v) do
# treat all structs as maps
v = to_map_if_struct(v)
consume_map(v, acc, path_next)
else
Map.put(acc, path_next, v)
end
end)
end
@doc """
Calculate new DateTime instance.
Arguments:
- days: integer - number of days to add/subtract;
- ref_date: DateTime - reference time point, if null then DateTime.utc_now is used.
"""
def datetime_add_days(days, ref_date \\ nil) do
ref_date = if ref_date, do: ref_date, else: DateTime.utc_now()
ref_date
|> DateTime.add(days * 24 * 60 * 60, :second)
|> DateTime.to_iso8601()
|> String.split(".", parts: 2)
|> Enum.at(0)
# |> (&(&1 <> "Z")).()
|> (&(&1 <> ".940Z")).()
end
end
|
lib/ppc/common.ex
| 0.833325 | 0.558688 |
common.ex
|
starcoder
|
defmodule Terp.Evaluate.List do
@moduledoc """
Provides functionality for working with lists.
"""
alias Terp.Evaluate
@doc """
Build a list by prepending an item to it.
"""
def cons([], environment), do: Evaluate.eval_expr(nil, environment)
def cons([x | xs], environment) do
e = Evaluate.eval_expr(x, environment)
acc = case xs do
[] -> []
[t] -> Evaluate.eval_expr(t, environment)
end
[e | acc]
end
@doc """
Take the first element from a list.
## Examples
iex> "(car '(1 2 3))"
...> |> Terp.eval()
1
iex> "(car '())"
...> |> Terp.eval()
{:error, {:terp, :empty_list}}
iex> "(car 5)"
...> |> Terp.eval()
{:error, {:terp, {:not_a_list, 5}}}
"""
def car(operands, environment) do
operands
|> List.first()
|> Evaluate.eval_expr(environment)
|> car_helper()
end
defp car_helper([]), do: {:error, {:terp, :empty_list}}
defp car_helper([h | _t]), do: h
defp car_helper(e), do: {:error, {:terp, {:not_a_list, e}}}
@doc """
Take the tail of a list.
## Examples
iex> "(cdr '(1 2 3))"
...> |> Terp.eval()
[2, 3]
iex> "(cdr '())"
...> |> Terp.eval()
{:error, {:terp, :empty_list}}
iex> "(cdr 5)"
...> |> Terp.eval()
{:error, {:terp, {:not_a_list, 5}}}
"""
def cdr(operands, environment) do
operands
|> List.first()
|> Evaluate.eval_expr(environment)
|> cdr_helper()
end
defp cdr_helper([]), do: {:error, {:terp, :empty_list}}
defp cdr_helper([_h | t]), do: t
defp cdr_helper(e), do: {:error, {:terp, {:not_a_list, e}}}
@doc """
Predicate to check if a list is empty.
## Examples
iex> "(empty? '(1 2 3))"
...> |> Terp.eval()
false
iex> "(empty? '())"
...> |> Terp.eval()
true
"""
def empty?(operands, environment) do
operands
|> Enum.map(&Evaluate.eval_expr(&1, environment))
|> List.first()
|> Enum.empty?()
end
end
|
lib/evaluate/list.ex
| 0.737347 | 0.487429 |
list.ex
|
starcoder
|
defmodule Mix.Tasks.Potato.Upgrade do
@moduledoc """
Prepare an upgrade release from an existing release.
## Command line options
* --from - Specify the version to upgrade from.
## Notes
Generates a minimal tar file capable of upgrading from the
specified version to the current version. This task expects
to be able to find both releases, and all their respective
applications, along with any related appups.
```potato.upgrade``` requires:
* A full release of the _previous_ version.
* A full release of the _current_ version.
One way to do this is to leverage the existing `mix release` and
`mix potato.full` tasks, e.g:
```_
rm -fr _build
git checkout previous
MIX_ENV=prod mix do release, potato.full
...
git checkout current
MIX_ENV=prod mix do release, potato.full, potato.upgrade --from previous
```
The upgrade task will generate a relup file from the appup descriptions and place it into
the releases/_current_ subfolder for use during installation.
Additionally, it will add to the tarfile *only* those applications that have
changed since the _previous_ release.
The generated upgrade tar should be unpacked and installed using `:release_handler.unpack_release/1` and
`:release_handler.install_release/1`
"""
use Mix.Task
alias Mix.Project
@shortdoc "Prepare an upgrade release."
@impl Mix.Task
def run(args) do
app = Keyword.fetch!(Project.config(), :app)
old_ver =
case OptionParser.parse(args, strict: [from: :string]) do
{[from: f], [], []} -> f
_ -> Mix.raise("Invalid arguments.")
end
new_ver = Keyword.fetch!(Project.config(), :version)
build_path = Project.build_path()
build_rel = Path.join([build_path, "rel"])
root_path = Path.join([build_rel, to_string(app)])
old_vers = [old_ver]
Potato.check_releases(root_path, app, [new_ver | old_vers])
# Generate a minimal tar file, including only those components in this release that have
# changed since any of the older releases.
updated = get_updated_components(root_path, app, new_ver, old_vers)
new_erts = Keyword.has_key?(updated, :erts)
# Generate the relup
old_rels = for v <- old_vers, do: to_charlist(rel_path(root_path, app, v))
new_rel = to_charlist(rel_path(root_path, app, new_ver))
out_dir = to_charlist(rel_dir(root_path, new_ver))
bin_path = to_charlist(Path.join([root_path, "lib", "*", "ebin"]))
rel_opts =
[outdir: out_dir, path: [bin_path]]
|> prepend_if_true(new_erts, [:restart_emulator])
case :systools.make_relup(new_rel, old_rels, old_rels, rel_opts) do
:ok ->
:ok
{:ok, _relup, mod, warnings} ->
for w <- warnings, do: Mix.shell().info("relup warning: #{mod}: #{w}")
{:error, mod, error} ->
Mix.raise("Relup error: #{mod}: #{error}]")
_ ->
Mix.raise("Relup error")
end
entries =
build_tar_entries(updated)
|> prepend([Path.join("releases", "#{new_ver}")])
|> prepend([Path.join("releases", "#{app}-#{new_ver}.rel")])
tarfile = Path.join([root_path, "releases", "#{app}-#{new_ver}.tar.gz"])
tar_files = for e <- entries, do: {to_charlist(e), to_charlist(Path.join(root_path, e))}
case :erl_tar.create(tarfile, tar_files, [:compressed, :dereference]) do
:ok ->
Mix.shell().info("Generated upgrade release in #{tarfile}.")
{:error, reason} ->
Mix.raise("Failed to create #{tarfile}. #{reason}")
end
end
defp prepend(list, extra), do: prepend_if_true(list, true, extra)
defp prepend_if_true(list, cond, extra) do
if cond, do: extra ++ list, else: list
end
defp build_tar_entries(updated) do
for {c, v} <- updated do
case c do
:erts ->
"#{c}-#{v}"
_app ->
Path.join("lib", "#{c}-#{v}")
end
end
end
defp get_updated_components(root_path, app, new, olds) do
new_components = get_component_vers(root_path, app, new)
old_components = for old <- olds, do: get_component_vers(root_path, app, old)
Enum.reject(new_components, fn nc -> Enum.all?(old_components, fn ocs -> nc in ocs end) end)
end
defp get_component_vers(root_path, app, ver) do
case :file.consult(to_charlist("#{rel_path(root_path, app, ver)}.rel")) do
{:ok, [{:release, _, {rel_erts, rel_erts_ver}, app_vers}]} ->
[
{rel_erts, rel_erts_ver}
| for({app_name, app_ver, _} <- app_vers, do: {app_name, app_ver})
]
end
end
defp rel_dir(root_path, ver), do: Path.join([root_path, "releases", ver])
defp rel_path(root_path, app, ver), do: Path.join(rel_dir(root_path, ver), "#{app}")
end
|
lib/mix/tasks/potato/upgrade.ex
| 0.83747 | 0.726547 |
upgrade.ex
|
starcoder
|
defmodule Hierbautberlin.GeoData.AnalyzeText do
require Logger
use GenServer
import Ecto.Query, warn: false
alias Hierbautberlin.GeoData
alias Hierbautberlin.GeoData.{GeoPlace, GeoStreet, GeoStreetNumber}
alias Hierbautberlin.Repo
alias Hierbautberlin.Services.UnicodeHelper
@place_sorting ["Park", "School", "LOR"]
def init(%{streets: streets, places: places}) when is_list(streets) and is_list(places) do
send(self(), {:init_graph, streets, places})
{:ok,
%{
streets: %{},
places: %{},
street_graph: AhoCorasick.new([]),
place_graph: AhoCorasick.new([])
}}
end
def init(_quizzes), do: {:error, "streets must be a list"}
def start_link(options \\ []) do
Logger.debug("Booting street analyzer")
query = from(streets in GeoStreet, select: [:id, :name, :city, :district])
streets = Repo.all(query)
query = from(places in GeoPlace, select: [:id, :name, :city, :district])
places = Repo.all(query)
Logger.debug("... with #{length(streets)} Streets and #{length(places)} Places")
server = GenServer.start_link(__MODULE__, %{streets: streets, places: places}, options)
Logger.debug("Booting street analyzer completed")
server
end
def handle_info({:init_graph, streets, places}, _state) do
street_names = Enum.map(streets, & &1.name)
place_names = Enum.map(places, & &1.name)
result = %{
streets: geo_map(%{}, streets),
places: geo_map(%{}, places),
street_graph: AhoCorasick.new(street_names),
place_graph: AhoCorasick.new(place_names)
}
{:noreply, result}
end
def handle_info(message, state) do
Bugsnag.report(%RuntimeError{message: "unknown message in analyze_text: #{inspect(message)}"},
severity: "warning"
)
{:ok, state}
end
def handle_call({:reset_index}, _from, _state) do
{:reply, :ok,
%{
streets: %{},
places: %{},
street_graph: AhoCorasick.new([]),
place_graph: AhoCorasick.new([])
}}
end
def handle_call({:add_streets, streets}, _from, state) do
Enum.each(streets, fn street ->
AhoCorasick.add_term(state.street_graph, street.name)
end)
AhoCorasick.build_trie(state.street_graph)
{:reply, :ok, Map.merge(state, %{streets: geo_map(state.streets, streets)})}
end
def handle_call({:add_places, places}, _from, state) do
Enum.each(places, fn place ->
AhoCorasick.add_term(state.place_graph, place.name)
end)
AhoCorasick.build_trie(state.place_graph)
{:reply, :ok, Map.merge(state, %{places: geo_map(state.places, places)})}
end
def handle_call({:analyze_text, text, options}, _from, state) do
options = Map.merge(%{districts: []}, options)
text = clean_text(text)
districts = options.districts
result =
%{
streets: [],
street_numbers: [],
places: [],
unclear: %{}
}
|> search_street(state, text)
|> search_place(state, text)
if Enum.empty?(result.unclear) do
{:reply,
result
|> clean_results()
|> Map.delete(:unclear), state}
else
relevant_districts = Enum.uniq(districts_for(result) ++ districts)
{:reply,
result
|> guess_streets(relevant_districts)
|> guess_street_numbers(relevant_districts)
|> guess_place(relevant_districts)
|> clean_results()
|> Map.delete(:unclear), state}
end
end
defp clean_results(map) do
map
|> remove_lor_if_street_exists()
|> remove_street_if_place_exists()
|> remove_place_if_street_number_exists()
|> make_items_unique()
|> fetch_full_models()
end
defp make_items_unique(map) do
%{
streets: Enum.uniq_by(map.streets, & &1.id),
street_numbers: Enum.uniq_by(map.street_numbers, & &1.id),
places: Enum.uniq_by(map.places, & &1.id)
}
end
defp fetch_full_models(map) do
%{
streets: GeoData.get_geo_streets(Enum.map(map.streets, & &1.id)),
street_numbers: GeoData.get_geo_street_numbers(Enum.map(map.street_numbers, & &1.id)),
places: GeoData.get_geo_places(Enum.map(map.places, & &1.id))
}
end
defp remove_lor_if_street_exists(map) do
street_names = Enum.map(map.streets, & &1.name)
districts = Enum.map(map.streets, & &1.district)
Map.merge(map, %{
places:
Enum.filter(map.places, fn place ->
place.type != "LOR" || !(place.district in districts || place.name in street_names)
end)
})
end
defp remove_place_if_street_number_exists(map) do
street_names = Enum.map(map.street_numbers, & &1.geo_street.name)
Map.merge(map, %{
places:
Enum.filter(map.places, fn place ->
!(place.name in street_names)
end)
})
end
defp remove_street_if_place_exists(map) do
place_names = Enum.map(map.places, & &1.name)
Map.merge(map, %{
streets:
Enum.filter(map.streets, fn street ->
!(street.name in place_names)
end)
})
end
defp search_place(map, state, text) do
state.place_graph
|> do_search_place(text)
|> MapSet.to_list()
|> Enum.reduce(map, fn {hit, _, _}, acc ->
places = state.places[hit]
if length(places) == 1 do
Map.merge(acc, %{places: map.places ++ places})
else
unclear_places = Map.get(acc.unclear, :places, [])
Map.merge(acc, %{
unclear: Map.merge(acc.unclear, %{places: unclear_places ++ [places]})
})
end
end)
end
defp do_aho_corasick_search(graph, text) do
graph
|> AhoCorasick.search(text)
|> MapSet.to_list()
|> Enum.filter(fn {hit, position, _} ->
end_character = String.at(text, position + String.length(hit) - 1)
start_character = String.at(text, position - 2)
# Remove hit when the following character is a number or letter,
# which means it is only a partial match or the first character is a "-"
# hinting to a longer street name like "Example-Street"
(start_character == nil || start_character != "-") &&
(end_character == nil || !UnicodeHelper.is_character_letter_or_digit?(end_character))
end)
|> MapSet.new()
end
defp do_search_place(graph, text) do
graph
|> do_aho_corasick_search(text)
|> MapSet.union(
do_aho_corasick_search(
graph,
String.replace(text, ~r/(\w+)(viertel)\b/, "\\1kiez")
)
)
|> MapSet.union(
do_aho_corasick_search(
graph,
String.replace(text, ~r/(\w+)(kiez)\b/, "\\1viertel")
)
)
end
defp search_street(map, state, text) do
state.street_graph
|> do_aho_corasick_search(text)
|> MapSet.to_list()
|> remove_overlapping_results()
|> Enum.reduce(map, fn {hit, start_pos, length}, acc ->
number = text |> String.slice(start_pos + length, 10) |> get_street_number()
if number do
find_street_number_in(acc, state.streets[hit], number)
else
find_street_in(acc, state.streets[hit])
end
end)
end
defp remove_overlapping_results(map) do
Enum.filter(map, fn {_text, start, len} = item ->
longest_overlapping_item =
Enum.filter(map, fn {_text, other_start, other_len} ->
max_pos = max(start, other_start)
min_pos = min(start + len, other_start + other_len)
min_pos - max_pos > 0
end)
|> Enum.sort_by(
fn {_text, _start, len} ->
len
end,
:desc
)
|> List.first()
if longest_overlapping_item == nil do
true
else
item == longest_overlapping_item
end
end)
end
defp districts_for(map) do
street_districts = map.streets |> Enum.map(& &1.district)
street_number_districts = map.street_numbers |> Enum.map(& &1.geo_street.district)
place_districts = map.places |> Enum.map(& &1.district)
Enum.uniq(street_districts ++ street_number_districts ++ place_districts)
end
defp find_street_in(acc, streets) do
if length(streets) == 1 do
Map.merge(acc, %{streets: acc.streets ++ streets})
else
unclear_streets = Map.get(acc.unclear, :streets, [])
Map.merge(acc, %{unclear: Map.merge(acc.unclear, %{streets: unclear_streets ++ [streets]})})
end
end
defp find_street_number_in(acc, streets, number) do
street_ids = Enum.map(streets, & &1.id)
query =
from number in GeoStreetNumber,
where: number.geo_street_id in ^street_ids and number.number == ^number
street_numbers = Repo.all(query) |> Repo.preload(:geo_street)
found_items = length(street_numbers)
cond do
found_items == 0 ->
if strip_letter(number) != number do
find_street_number_in(acc, streets, strip_letter(number))
else
Map.merge(acc, %{streets: acc.streets ++ streets})
end
found_items == 1 ->
Map.merge(acc, %{street_numbers: acc.street_numbers ++ street_numbers})
true ->
unclear_street_numbers = Map.get(acc.unclear, :street_numbers, [])
Map.merge(acc, %{
unclear:
Map.merge(acc.unclear, %{street_numbers: unclear_street_numbers ++ [street_numbers]})
})
end
end
defp strip_letter(number) do
result = Regex.named_captures(~r/^(?<number>\d+).*/, number)
if result do
result["number"]
else
nil
end
end
defp get_street_number(text) do
result = Regex.named_captures(~r/^(?<number>\d+(\s*[a-zA-Z])?).*/, text)
if result do
result["number"]
|> String.replace(" ", "")
|> String.upcase()
else
nil
end
end
defp guess_streets(%{unclear: unclear} = map, districts) do
found_streets =
Enum.map(Map.get(unclear, :streets, []), fn streets ->
filtered =
Enum.filter(streets, fn street ->
Enum.member?(districts, street.district)
end)
if Enum.count_until(filtered, 2) == 1 do
filtered
else
nil
end
end)
|> List.flatten()
|> Enum.filter(fn item ->
item != nil
end)
Map.merge(map, %{streets: map.streets ++ found_streets})
end
defp guess_street_numbers(%{unclear: unclear} = map, districts) do
found_street_numbers =
unclear
|> Map.get(:street_numbers, [])
|> Enum.map(fn street_numbers ->
filtered =
Enum.filter(street_numbers, fn street_number ->
Enum.member?(districts, street_number.geo_street.district)
end)
if Enum.count_until(filtered, 2) == 1 do
filtered
else
nil
end
end)
|> List.flatten()
|> Enum.filter(fn item ->
item != nil
end)
Map.merge(map, %{street_numbers: map.street_numbers ++ found_street_numbers})
end
defp guess_place(%{unclear: unclear} = map, districts) do
found_places =
Enum.map(Map.get(unclear, :places, []), fn places ->
places
|> do_filter_park_districts(districts)
|> do_sort_places()
|> Enum.take(1)
end)
|> List.flatten()
|> Enum.filter(fn item ->
item != nil
end)
Map.merge(map, %{places: map.places ++ found_places})
end
defp do_sort_places(places) do
Enum.sort_by(places, fn place ->
Enum.find_index(@place_sorting, fn item ->
item == place.type
end)
end)
end
defp do_filter_park_districts(places, districts) do
Enum.filter(places, fn place ->
Enum.member?(districts, place.district)
end)
end
defp clean_text(text) do
text
|> String.replace("Strasse", "Straße")
|> String.replace("Str.", "Straße")
|> String.replace("strasse", "straße")
|> String.replace("str.", "straße")
|> String.replace("\n", " ")
|> String.replace(~r/\s+/, " ")
|> street_enumeration()
end
defp street_enumeration(text) do
match = Regex.run(~r/((\w*-), )*(\w*-) und \w*straße/, text)
if match do
[phrase | _] = match
new_phrase =
phrase
|> String.replace("-, ", "straße, ")
|> String.replace("- und ", "straße und ")
String.replace(text, phrase, new_phrase)
else
text
end
end
defp geo_map(map, items) do
Enum.reduce(items, map, fn item, map ->
if Map.has_key?(map, item.name) do
Map.put(map, item.name, Map.get(map, item.name) ++ [item])
else
Map.put(map, item.name, [item])
end
end)
end
def add_streets(manager \\ __MODULE__, streets) do
GenServer.call(manager, {:add_streets, streets})
end
def add_places(manager \\ __MODULE__, places) do
GenServer.call(manager, {:add_places, places})
end
def analyze_text(manager \\ __MODULE__, text, options) do
GenServer.call(manager, {:analyze_text, text, options}, 300_000)
rescue
error ->
Bugsnag.report(error)
%{
streets: [],
street_numbers: [],
places: []
}
catch
:exit, {reason, msg} ->
Bugsnag.report("analyze text exit #{reason} - #{msg}")
%{
streets: [],
street_numbers: [],
places: []
}
end
def reset_index(manager \\ __MODULE__) do
GenServer.call(manager, {:reset_index})
end
end
|
lib/hierbautberlin/geo_data/analyze_text.ex
| 0.539711 | 0.410697 |
analyze_text.ex
|
starcoder
|
defmodule Ecbolic do
@moduledoc """
This module aims to provide a simple interface to
`Ecbolic.Store` and the neccessary tools to add
documentation to functions, which is easly accessable
at runtime. This library was build with the intend to
make it easy to show help for users of applications,
such as chat bots.
In order to start creating documentation you first need
to use this module
```elixir
defmodule TestModule do
use Ecbolic
```
Then you can start adding your documentation like using
the macro `Ecbolic.help/1`. Modules without macro call above
them will be ignored
```elixir
Ecbolic.help("returns `:world`")
def hello, do: :world
```
In order to access the entries, you first have to load them.
The function `load_help()` will be added when you use
`Ecbolic` in your module. Calling this function will load
the documentation you created into `Ecbolic.Store`, where
it can be accessed from anywhere.
```elixir
TestModule.load_help()
```
Under the hood this macro (ab)uses a feature added to
Elixir 1.7, which allows you to add meta data to your
documentation. So the snippet above will turn out like
this:
```elixir
@doc help: "returns `:world`"
def hello, do: :world
```
Both forms are valid to use, but you should stick with
macro, so that it'll continue to work, when the an other
key will be used.
By default functions are stored with their function names.
So the function `hello` will be turned into `:hello`. In case
you want your function to be named by a different name, you
can do so, by aliasing it with `Ecbolix.alias/1`.
The internal function name will then be replaced with
whatever you provide here. Even though, I suggest to restrain
yourself to only use names, which can be turned into a string,
such as string as atom. Things that cannot be turned into
Strings will cause `Ecbolic.Pretty` problems when formatting
the entries.
```
Ecbolic.help("returns `:world`")
Ecbolic.alias("Hello world") # Allows for spaces
def hello, do: :world
```
You can also group all functions within a module with the
macro `Ecbolic.group/1`.
```elixir
Ecbolic.group("memes")
def hello_there, do: "<NAME>"
```
Ungrouped functions, will receive the `:default` group.
"""
alias Ecbolic.{Store, Help}
defmacro __using__(_opts) do
quote do
require Ecbolic
def load_help do
Ecbolic.Store.load(__MODULE__)
end
end
end
@doc """
Returns all help entries as a map, where each function
is mapped it's documentation
"""
@spec fetch_help() :: %{Help.atom_or_string() => Help.t()}
def fetch_help do
to_map(Store.all())
end
@doc """
Returns all requested help entries as a map, where each
function is mapped it's documentation
Will return an empty map, in none was found
"""
@spec fetch_help(Help.atom_or_string() | [Help.atom_or_string()]) :: %{
Help.atom_or_string() => Help.t()
}
def fetch_help(names) when is_list(names) do
with {:ok, help_entries} <- Store.lookup(names) do
to_map(help_entries)
end
end
@doc """
Returns the documentation for the one requested function.
Will return nil, if it was not found
"""
@spec fetch_help(Help.atom_or_string()) :: Help.t()
def fetch_help(name) do
with {:ok, help_entry} <- Store.lookup(name) do
help_entry
else
{:error, _reason} ->
nil
end
end
@doc """
Returns all functions in the given group, mapped to their
documentation
"""
@spec help_group(Help.atom_or_string()) :: %{Help.atom_or_string() => Help.t()}
def help_group(group_name) do
with {:ok, group} <- Store.group(group_name) do
to_map(group)
end
end
@doc """
Aliases the name by with the documentation for a functions
is accessed
"""
@spec alias(Help.atom_or_string()) :: term
defmacro alias name do
func_attr(:help_alias, name)
end
@doc """
Sets the group for all functions in that module
"""
@spec group(Help.atom_or_string()) :: term
defmacro group(group) do
module_attr(:help_group, group)
end
@doc """
Creates a documentation for the function below
"""
@spec group(String.t()) :: term
defmacro help(help) do
func_attr(:help_description, help)
end
@spec usage(String.t()) :: term
defmacro usage(usage) do
func_attr(:help_usage, usage)
end
defp func_attr(attr, val) do
quote do
@doc [unquote({attr, val})]
end
end
defp module_attr(attr, val) do
quote do
@moduledoc [unquote({attr, val})]
end
end
defp to_map(help) do
help
|> List.wrap()
|> Enum.map(fn %Help{name: name} = help -> {name, help} end)
|> Enum.into(%{})
end
end
|
lib/ecbolic.ex
| 0.888593 | 0.908658 |
ecbolic.ex
|
starcoder
|
defmodule Cog.Queries.Command do
import Ecto.Query, only: [from: 2]
alias Cog.Models.Command
def names do
from c in Command,
join: b in assoc(c, :bundle),
select: [b.name, c.name]
end
def names_for(enabled) do
from c in Command,
join: b in assoc(c, :bundle),
where: b.enabled == ^enabled,
select: [b.name, c.name]
end
def bundle_for(name) do
from c in Command,
join: b in assoc(c, :bundle),
where: c.name == ^name,
select: b.name
end
def named(name) do
{bundle, command} = Command.split_name(name)
named(bundle, command)
end
def named(bundle, command) do
from c in Command,
join: b in assoc(c, :bundle),
where: b.name == ^bundle,
where: c.name == ^command
end
def by_name(command) do
from c in Command,
where: c.name == ^command
end
def with_bundle(queryable),
do: from c in queryable, preload: [:bundle]
def with_rules(queryable),
do: from c in queryable, preload: [rules: [permissions: :namespace]]
def with_options(queryable),
do: from c in queryable, preload: [options: :option_type]
@doc """
Retrieve all information about a command
"""
def complete_command(bundle_name, command_name) do
named(bundle_name, command_name)
|> with_bundle
|> with_rules
|> with_options
end
@doc """
Given a qualified name, query the one command so named (if
any). If given an unqualified name, query all commands so named.
In all cases, the corresponding bundle comes preloaded, useful for
reconstituting the fully-qualified name of each command.
"""
def by_any_name(name) do
if is_qualified?(name) do
from c in (named(name)),
preload: [:bundle]
else
from c in Command,
where: c.name == ^name,
preload: [:bundle]
end
end
def sorted_by_qualified_name(query \\ Command) do
from c in query,
join: b in assoc(c, :bundle),
order_by: [b.name, c.name],
preload: [:bundle]
end
def enabled(query \\ Command) do
from c in query,
join: b in assoc(c, :bundle),
where: b.enabled
end
def disabled(query \\ Command) do
from c in query,
join: b in assoc(c, :bundle),
where: not(b.enabled)
end
# Split a string as though it were a qualified name
defp is_qualified?(name),
do: length(String.split(name, ":", parts: 2)) == 2
end
|
lib/cog/queries/command.ex
| 0.620622 | 0.611295 |
command.ex
|
starcoder
|
defmodule Journey do
@moduledoc ~S"""
Journey helps you define and execute workflow-like processes, simply, scalably, and reliably.
Examples of applications that could be powered by processes defined and executed with Journey:
* a food delivery application,
* a web site for computing horoscopes,
* a web site for accepting and processing credit card applications.
Journey process executions are designed to be persistent and resilient, to survivce service restarts, and to, quite literally, scale with your service.
## Project Status
Here is the project's current state and mini-roadmap:
- [x] Initial version, the state of executions lives in memory, does not survive service restarts.
- [x] The state of executions is persisted, executions survive service restarts. (using postgres)
- [x] Executions run in multiple replicas of your service.
- [ ] Maybe: loops in steps.
- [ ] Maybe: support for specific persistence types.
- [ ] Maybe: support for specific persistence types.
- [ ] Maybe: timer-based cron-like executions.
- [ ] Maybe: better naming / metaphores for Journey's / Processes / Executions
- [ ] Maybe: Documentation includes an example application.
- [ ] Maybe: Documentation includes examples of versioned processes.
- [ ] Maybe: Retry policy is configurable, clearly documented.
- [ ] Maybe: handle / pickup abandoned steps (e. g. server died while we were waiting for a response from an external service).
- [ ] Maybe: Logging is configurable, clearly documented.
- [ ] Maybe: Monitoring is clearly documented.
- [ ] Maybe: Performance and scalability are clearly documented.
- [ ] Maybe: More concise and expressive ways to define journies.
- [ ] Maybe: a command line tool for printing status of an execution.
The project is in active development.
For questions, comments, bug reports, feature requests please create issues (and/or Pull Requests:).
## Installation
The package can be installed from Hex by adding `journey` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[
{:journey, "~> 0.0.3"}
]
end
```
## Example: a Web Site for Computing Horoscopes
(If you prefer to play with the Livebook version of this example, please see `./examples` directory of the [github repo](https://github.com/shipworthy/journey).)
Imagine a web site that computes horoscopes.
To power this web site we will define a `Journey.Process`, consisting of a collection of `Journey.Step`s.
Some of the steps (`:user_name`, `:birth_day`, `:birth_month`) get their values from the user, while others (`:astrological_sign`, `:horoscope`) compute their values based on the data captured or computed so far, using `func`tions that are part of those steps' definitions.
This code fragment, defines the process, and then executes it, step by step, from the user (Mario) entering their name and birthday, to the process coming back with Mario's horoscope.
```elixir
iex> process = %Journey.Process{
...> process_id: "horoscopes-r-us",
...> steps: [
...> %Journey.Step{name: :first_name},
...> %Journey.Step{name: :birth_month},
...> %Journey.Step{name: :birth_day},
...> %Journey.Step{
...> name: :astrological_sign,
...> func: fn _values ->
...> # Everyone is a Taurus!
...> {:ok, "taurus"}
...> end,
...> blocked_by: [
...> %Journey.BlockedBy{step_name: :birth_month, condition: :provided},
...> %Journey.BlockedBy{step_name: :birth_day, condition: :provided}
...> ]
...> },
...> %Journey.Step{
...> name: :horoscope,
...> func: fn values ->
...> name = values[:first_name].value
...> sign = values[:astrological_sign].value
...> {
...> :ok,
...> "#{name}! You are a righteous #{sign}! This is the perfect week to smash the racist patriarchy!"
...> }
...> end,
...> blocked_by: [
...> %Journey.BlockedBy{step_name: :first_name, condition: :provided},
...> %Journey.BlockedBy{step_name: :astrological_sign, condition: :provided}
...> ]
...> }
...> ]
...>}
iex>
iex> # Start an execution of the process.
iex> # (this could be called by the app's phoenix controller, when a user starts the process on the web site).
iex> execution = Journey.Process.execute(process)
iex>
iex> # The user entered their name. Update the execution.
iex> # (this could be called by the app's Phoenix controller, when a user submits their name).
iex> {:ok, execution} = Journey.Execution.update_value(execution, :first_name, "Mario")
iex>
iex> # Mario entered their birth month and day. Update the execution.
iex> # (this could be called by the app's Phoenix controller, when a user submits these values).
iex> {:ok, execution} = Journey.Execution.update_value(execution, :birth_month, 3)
iex> {:ok, execution} = Journey.Execution.update_value(execution, :birth_day, 10)
iex>
iex> # :astrological_sign is no longer blocked, and it is now ":computing".
iex> {:computing, _} = Journey.Execution.read_value(execution, :astrological_sign)
iex>
iex> # Get a human friendly textual summary of the current status of this execution, in case we want to take a look.
iex> execution.execution_id |> Journey.Execution.load!() |> Journey.Execution.get_summary() |> IO.puts()
iex>
iex> # Get all values in this execution, in case the code wants to take a look.
iex> values = Journey.Execution.get_all_values(execution)
iex> values[:first_name][:status]
:computed
iex> values[:first_name][:value]
"Mario"
iex>
iex> # In just a few moments, we will have Mario's astrological sign.
iex> :timer.sleep(100)
iex> {:computed, "taurus"} = execution.execution_id |> Journey.Execution.load!() |> Journey.Execution.read_value(:astrological_sign)
iex>
iex> # :horoscope computation is no longer blocked. In just a few milliseconds, we will have Mario's horoscope.
iex> # The web page or the app's Phoenix controller can poll for this value, and render it when it becomes :computed.
iex> :timer.sleep(100)
iex> {:computed, horoscope} = execution.execution_id |> Journey.Execution.load!() |> Journey.Execution.read_value(:horoscope)
iex> horoscope
"Mario! You are a righteous taurus! This is the perfect week to smash the racist patriarchy!"
```
`Journey.Execution` execution will save every value it receives or computes, so, even if the server restarts (TODO: link to the issue), the execution will continue where it left off (unless you are using one-node-in-memory configuration). And, because `Journey.Process` runs as part of your application, it, quite literally, scales with your application.
In our example, `func`tions are very simple, but if your function is, for some reason, temporarily unable to compute the value, it can return `{:retriable, error_information}`, and Journey will retry it, according to the step's (implicit, in our example) retry policy. (TODO: link to the issue for implementing this).
# Introspection
At any point in the lifetime of an execution, you can get a human-friendly summary of its state:
```elixir
execution.execution_id |> Journey.Execution.load!() |> Journey.Execution.get_summary |> IO.puts
```
```text
Execution Summary
Execution ID: hs5ijpaif7
Execution started: 2021-03-13 07:59:08Z
Revision: 2
All Steps:
[started_at]: '1615622348'. Blocked by: []. Self-computing: false
[first_name]: 'not_computed'. Blocked by: []. Self-computing: false
[birth_month]: 'not_computed'. Blocked by: []. Self-computing: false
[birth_day]: '29'. Blocked by: []. Self-computing: false
[astrological_sign]: 'not_computed'. Blocked by: [birth_month]. Self-computing: true
[horoscope]: 'not_computed'. Blocked by: [first_name, astrological_sign]. Self-computing: true
:ok
```
## Source Code
The source code for this package can be found on Github: [https://github.com/shipworthy/journey](https://github.com/shipworthy/journey).
## Documentation
Full documentation can be found at [https://hexdocs.pm/journey/](https://hexdocs.pm/journey/).
Also, please see a Livebook in `./examples` directory of the [github repo](https://github.com/shipworthy/journey) for an example.
"""
end
|
lib/journey.ex
| 0.819352 | 0.924279 |
journey.ex
|
starcoder
|
defmodule Google.Protobuf.Struct do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
fields: %{}
]
@type t :: %__MODULE__{
fields: %{optional(String.t) => any}
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.map_field(<<10>>, :string, <<18>>, :struct, data.fields, <<10>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
post_map(:fields, 1, Decoder.map_field(10, :string, "", 18, Google.Protobuf.Value, nil, :fields, acc, data))
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
defp post_map(name, tag, {:error, %{tag: nil, message: message}}) do
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__}.#{name} tag #{tag} " <> message
}
{:error, err}
end
# either valid data or a complete error (which would happen if our value
# was a struct and the error happened decoding it)
defp post_map(_name, _prefix, data) do
data
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{:fields, {c, v}}, acc -> Map.update(acc, :fields, %{c => v}, fn m -> Map.put(m, c, v) end)
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
defmodule Google.Protobuf.Value do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
kind: nil
]
@type t :: %__MODULE__{
kind: map | {:null_value, Google.Protobuf.NullValue.t} | {:number_value, number} | {:string_value, String.t} | {:bool_value, boolean} | {:struct_value, Google.Protobuf.Struct.t} | {:list_value, Google.Protobuf.ListValue.t}
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.oneof_field(:null_value, data.kind, fn v -> Encoder.enum_field(Google.Protobuf.NullValue, v, <<8>>) end),
Encoder.oneof_field(:number_value, data.kind, fn v -> Encoder.field(:double, v, <<17>>) end),
Encoder.oneof_field(:string_value, data.kind, fn v -> Encoder.field(:string, v, <<26>>) end),
Encoder.oneof_field(:bool_value, data.kind, fn v -> Encoder.field(:bool, v, <<32>>) end),
Encoder.oneof_field(:struct_value, data.kind, fn v -> Encoder.field(:struct, v, <<42>>) end),
Encoder.oneof_field(:list_value, data.kind, fn v -> Encoder.field(:struct, v, <<50>>) end),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<8, data::binary>>) do
Decoder.oneof_field(:kind, 0, Decoder.enum_field(Google.Protobuf.NullValue, :null_value, acc, data), nil)
end
def decode(acc, <<17, data::binary>>) do
Decoder.oneof_field(:kind, 0, Decoder.field(:double, :number_value, acc, data), nil)
end
def decode(acc, <<26, data::binary>>) do
Decoder.oneof_field(:kind, 0, Decoder.field(:string, :string_value, acc, data), nil)
end
def decode(acc, <<32, data::binary>>) do
Decoder.oneof_field(:kind, 0, Decoder.field(:bool, :bool_value, acc, data), nil)
end
def decode(acc, <<42, data::binary>>) do
Decoder.oneof_field(:kind, 0, Decoder.struct_field(Google.Protobuf.Struct, :struct_value, acc, data), nil)
end
def decode(acc, <<50, data::binary>>) do
Decoder.oneof_field(:kind, 0, Decoder.struct_field(Google.Protobuf.ListValue, :list_value, acc, data), nil)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2,3,4,5,6] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
defmodule Google.Protobuf.ListValue do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
values: []
]
@type t :: %__MODULE__{
values: [Google.Protobuf.Value.t]
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.repeated_field(:struct, data.values, <<10>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Value, :values, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{:values, v}, acc -> Map.update(acc, :values, [v], fn e -> [v | e] end)
{k, v}, acc -> Map.put(acc, k, v)
end)
struct = Map.put(struct, :values, Elixir.Enum.reverse(struct.values))
struct
end
end
defmodule Google.Protobuf.NullValue do
@moduledoc false
@type t :: :NULL_VALUE | 0
@spec to_int(t | non_neg_integer) :: integer
def to_int(:NULL_VALUE), do: 0
def to_int(0), do: 0
def to_int(invalid) do
raise Pbuf.Encoder.Error,
type: __MODULE__,
value: invalid,
tag: nil,
message: "#{inspect(invalid)} is not a valid enum value for #{__MODULE__}"
end
@spec from_int(integer) :: t
def from_int(0), do: :NULL_VALUE
def from_int(_unknown), do: :invalid
end
|
lib/protoc/google/protobuf/struct.pb.ex
| 0.80567 | 0.570122 |
struct.pb.ex
|
starcoder
|
defmodule ExWordNet.Lemma do
@moduledoc """
Provides abstraction over a single word in the WordNet lexicon,
which can be used to look up a set of synsets.
Struct members:
- `:word`: The word this lemma represents.
- `:part_of_speech`: The part of speech (noun, verb, adjective) of this lemma.
- `:synset_offsets`: The offset, in bytes, at which the synsets contained in this lemma are
stored in WordNet's internal database.
- `:id`: A unique integer id that references this lemma. Used internally within WordNet's database.
- `:pointer_symbols`: An array of valid pointer symbols for this lemma.
- `:tagsense_count`: The number of times the sense is tagged in various semantic concordance
texts. A tagsense_count of 0 indicates that the sense has not been semantically tagged.<Paste>
"""
@enforce_keys ~w(word part_of_speech synset_offsets id pointer_symbols tagsense_count)a
defstruct @enforce_keys
@type t() :: %__MODULE__{
word: String.t(),
part_of_speech: ExWordNet.Constants.PartsOfSpeech.atom_part_of_speech(),
synset_offsets: [integer()],
id: integer(),
pointer_symbols: [String.t()],
tagsense_count: integer()
}
import ExWordNet.Constants.PartsOfSpeech
@doc """
Finds all lemmas for this word across all known parts of speech.
"""
@spec find_all(String.t()) :: [__MODULE__.t()]
def find_all(word) when is_binary(word) do
Enum.flat_map(atom_parts_of_speech(), fn part_of_speech ->
case find(word, part_of_speech) do
{:ok, lemma} when not is_nil(lemma) -> [lemma]
_ -> []
end
end)
end
@doc """
Find a lemma for a given word and part of speech.
"""
@spec find(String.t(), ExWordNet.Constants.PartsOfSpeech.atom_part_of_speech()) ::
{:ok, __MODULE__.t()} | {:error, any()}
def find(word, part_of_speech)
when is_binary(word) and is_atom_part_of_speech(part_of_speech) do
case lookup_index(word, part_of_speech) do
{:ok, {id, line}} ->
case lemma_from_entry({line, id}) do
result = %__MODULE__{} -> {:ok, result}
_ -> {:error, nil}
end
{:error, reason} ->
{:error, reason}
end
end
defp lookup_index(word, part_of_speech)
when is_binary(word) and is_atom_part_of_speech(part_of_speech) do
case :ets.whereis(part_of_speech) do
:undefined ->
case load_index(part_of_speech) do
:ok -> lookup_index(word, part_of_speech)
{:error, reason} -> {:error, reason}
end
_ ->
case :ets.lookup(part_of_speech, word) do
[{^word, {id, line}}] ->
{:ok, {id, line}}
[] ->
{:error, :not_found}
end
end
end
defp load_index(part_of_speech) when is_atom_part_of_speech(part_of_speech) do
:ets.new(part_of_speech, [:named_table])
path = ExWordNet.Config.db() |> Path.join("dict") |> Path.join("index.#{part_of_speech}")
case File.read(path) do
{:ok, content} ->
index =
content
|> String.split("\n", trim: true)
|> Enum.with_index()
|> Enum.map(fn {line, index} ->
[index_word | _] = String.split(line, " ", parts: 2)
{index_word, {index + 1, line}}
end)
:ets.insert(part_of_speech, index)
:ok
{:error, reason} ->
{:error, reason}
end
end
@doc """
Returns a list of synsets for this lemma.
Each synset represents a different sense, or meaning, of the word.
"""
@spec synsets(__MODULE__.t()) :: [ExWordNet.Synset.t()]
def synsets(%__MODULE__{synset_offsets: synset_offsets, part_of_speech: part_of_speech})
when is_atom_part_of_speech(part_of_speech) and is_list(synset_offsets) do
Enum.flat_map(synset_offsets, fn synset_offset ->
case ExWordNet.Synset.new(part_of_speech, synset_offset) do
{:ok, synset = %ExWordNet.Synset{}} -> [synset]
_ -> []
end
end)
end
defp lemma_from_entry({entry, id}) when is_binary(entry) and is_integer(id) do
[word, part_of_speech, synset_count, pointer_count | xs] = String.split(entry, " ")
synset_count = String.to_integer(synset_count)
pointer_count = String.to_integer(pointer_count)
{pointers, [_, tagsense_count | xs]} = Enum.split(xs, pointer_count)
{synset_offsets, _} = Enum.split(xs, synset_count)
%__MODULE__{
word: word,
part_of_speech: short_to_atom_part_of_speech(part_of_speech),
synset_offsets: Enum.map(synset_offsets, &String.to_integer/1),
id: id + 1,
pointer_symbols: pointers,
tagsense_count: String.to_integer(tagsense_count)
}
end
defp lemma_from_entry(_) do
nil
end
end
defimpl String.Chars, for: ExWordNet.Lemma do
import ExWordNet.Constants.PartsOfSpeech
def to_string(%ExWordNet.Lemma{word: word, part_of_speech: part_of_speech})
when is_binary(word) and is_atom_part_of_speech(part_of_speech) do
short_part_of_speech = atom_to_short_part_of_speech(part_of_speech)
"#{word}, #{short_part_of_speech}"
end
end
|
lib/exwordnet/lemma.ex
| 0.810179 | 0.555918 |
lemma.ex
|
starcoder
|
defprotocol Livebook.FileSystem do
@moduledoc false
# This protocol defines an interface for file systems
# that can be plugged into Livebook.
@typedoc """
A path uniquely idenfies file in the file system.
Path has most of the semantics of regular file paths,
with the following exceptions:
* path must be be absolute for consistency
* directory path must have a trailing slash, whereas
regular file path must not have a trailing slash.
Rationale: some file systems allow a directory and
a file with the same name to co-exist, while path
needs to distinguish between them
"""
@type path :: String.t()
@typedoc """
A human-readable error message clarifying the operation
failure reason.
"""
@type error :: String.t()
@type access :: :read | :write | :read_write | :none
@doc """
Returns a term uniquely identifying the resource used as a file
system.
"""
@spec resource_identifier(t()) :: term()
def resource_identifier(file_system)
@doc """
Returns the file system type.
Based on the underlying resource, the type can be either:
* `:local` - if the resource is local to its node
* `:global` - if the resource is external and available
accessible from any node
"""
@spec type(t()) :: :local | :global
def type(file_system)
@doc """
Returns the default directory path.
To some extent this is similar to current working directory
in a regular file system. For most file systems this
will just be the root path.
"""
@spec default_path(t()) :: path()
def default_path(file_system)
@doc """
Returns a list of files located in the given directory.
When `recursive` is set to `true`, nested directories
are traversed and the final list includes all the paths.
"""
@spec list(t(), path(), boolean()) :: {:ok, list(path())} | {:error, error()}
def list(file_system, path, recursive)
@doc """
Returns binary content of the given file.
"""
@spec read(t(), path()) :: {:ok, binary()} | {:error, error()}
def read(file_system, path)
@doc """
Writes the given binary content to the given file.
If the file exists, it gets overridden.
If the file doesn't exist, it gets created along with
all the necessary directories.
"""
@spec write(t(), path(), binary()) :: :ok | {:error, error()}
def write(file_system, path, content)
@doc """
Returns the current access level to the given file.
If determining the access is costly, then this function may
always return the most liberal access, since all access
functions return error on an invalid attempt.
"""
@spec access(t(), path()) :: {:ok, access()} | {:error, error()}
def access(file_system, path)
@doc """
Creates the given directory unless it already exists.
All necessary parent directories are created as well.
"""
@spec create_dir(t(), path()) :: :ok | {:error, error()}
def create_dir(file_system, path)
@doc """
Removes the given file.
If a directory is given, all of its contents are removed
recursively.
If the file doesn't exist, no error is returned.
"""
@spec remove(t(), path()) :: :ok | {:error, error()}
def remove(file_system, path)
@doc """
Copies the given file.
The given files must be of the same type.
If regular files are given, the contents are copied,
potentially overriding the destination if it already exists.
If directories are given, the directory contents are copied
recursively.
"""
@spec copy(t(), path(), path()) :: :ok | {:error, error()}
def copy(file_system, source_path, destination_path)
@doc """
Renames the given file.
If a directory is given, it gets renamed as expected and
consequently all of the child paths change.
If the destination exists, an error is returned.
"""
@spec rename(t(), path(), path()) :: :ok | {:error, error()}
def rename(file_system, source_path, destination_path)
@doc """
Returns a version identifier for the given file.
The resulting value must be a string of ASCII characters
placed between double quotes, suitable for use as the
value of the ETag HTTP header.
"""
@spec etag_for(t(), path()) :: {:ok, String.t()} | {:error, error()}
def etag_for(file_system, path)
@doc """
Checks if the given path exists in the file system.
"""
@spec exists?(t(), path()) :: {:ok, boolean()} | {:error, error()}
def exists?(file_system, path)
@doc """
Resolves `subject` against a valid directory path.
The `subject` may be either relative or absolute,
contain special sequences such as ".." and ".",
but the interpretation is left up to the file system.
In other words, this has the semantics of path join
followed by expand.
"""
@spec resolve_path(t(), path(), String.t()) :: path()
def resolve_path(file_system, dir_path, subject)
end
|
lib/livebook/file_system.ex
| 0.908283 | 0.44553 |
file_system.ex
|
starcoder
|
defmodule TradeIndicators.ATR do
use TypedStruct
alias __MODULE__, as: ATR
alias __MODULE__.Item
alias TradeIndicators.MA
alias TradeIndicators.Util, as: U
alias Decimal, as: D
alias Enum, as: E
alias Map, as: M
typedstruct do
field :list, List.t(), default: []
field :period, pos_integer(), default: 14
field :method, :ema | :wma, default: :ema
end
typedstruct module: Item do
field :avg, D.t() | nil, default: nil
field :tr, D.t() | nil, default: nil
field :t, non_neg_integer()
end
@zero D.new(0)
def step(chart = %ATR{list: atr_list, period: period, method: method}, bars)
when is_list(bars) and is_list(atr_list) and is_integer(period) and period > 1 do
ts =
case bars do
[%{t: t} | _] -> t
_ -> nil
end
case {bars, atr_list} do
{[], _} ->
chart
{[%{t: t1} | bars_tail], [%{t: t2} | atr_tail]} when t1 == t2 ->
if length(bars) < period do
new_atr = %{avg: nil, t: ts, tr: E.take(bars_tail, 2) |> get_tr()}
%{chart | list: [new_atr | atr_tail]}
else
new_atr = E.take(bars_tail, 2) |> get_tr() |> get_atr(atr_list, period, ts, method)
%{chart | list: [new_atr | tl(atr_list)]}
end
_ ->
if length(bars) < period do
new_atr = %{avg: nil, t: ts, tr: E.take(bars, 2) |> get_tr()}
%{chart | list: [new_atr | atr_list]}
else
new_atr = E.take(bars, 2) |> get_tr() |> get_atr(atr_list, period, ts, method)
%{chart | list: [new_atr | atr_list]}
end
end
end
def get_tr([]), do: @zero
def get_tr([%{c: c, h: h, l: l}]), do: get_tr(c, h, l)
def get_tr([%{h: h, l: l}, %{c: c}]), do: get_tr(c, h, l)
def get_tr(c, h, l)
when (is_binary(c) or is_integer(c)) and
(is_binary(h) or is_integer(h)) and
(is_binary(l) or is_integer(l)),
do: get_tr(D.new(c), D.new(h), D.new(l))
def get_tr(c = %D{}, h = %D{}, l = %D{}) do
D.sub(h, l)
|> D.max(D.abs(D.sub(h, c)))
|> D.max(D.abs(D.sub(l, c)))
end
def make_tr_list(new_tr, atr_list, period) do
atr_list
|> E.take(period - 1)
|> E.map(fn %{tr: v} -> v || @zero end)
|> case do
list -> [new_tr | list]
end
end
def get_atr(new_tr, atr_list, period, ts, avg_fn) when avg_fn in [:wma, :ema] do
%Item{
avg: get_avg(atr_list, new_tr, period, avg_fn),
tr: new_tr,
t: ts
}
end
def get_avg(atr_list, new_tr, period, :wma) do
new_tr
|> make_tr_list(atr_list, period)
|> MA.wma(period)
end
def get_avg(atr_list, new_tr, period, :ema) do
if length(atr_list) == period - 1 do
atr_list
|> E.map(fn %{tr: tr} -> tr end)
|> case do
list -> [new_tr | list]
end
|> E.reduce(@zero, fn n, t -> D.add(t, U.dec(n)) end)
|> D.div(period)
else
atr_list
|> hd()
|> M.get(:avg)
|> case do
last_tr -> MA.ema({last_tr, new_tr}, period)
end
end
end
end
|
lib/atr.ex
| 0.533397 | 0.49292 |
atr.ex
|
starcoder
|
defmodule ProductCatalog.Service.Product do
products = [
%{
title: "Sennheiser HD 202 II Professional Headphones",
description: "The HD 202 MK II closed, dynamic hi-fi stereo headphones are the ideal partner for DJs and powerful modern music, providing great insulation against ambient noise and a vivid, crisp bass response. The rugged lightweight headphones have a secure fit and can be used for both mobile sources and home (mini) hi-fi systems. When out and about, a convenient cord take-up lets you adjust the headphone cable to the required length.",
image: "http://ecx.images-amazon.com/images/I/71OJzVOZ5HL._SL1500_.jpg",
price: 23.64
},
%{
title: "Bose QuietComfort 25 Acoustic Noise Cancelling Headphones",
description: "QuietComfort 25 headphones are engineered to sound better, be more comfortable and easier to take with you. Put them on, and suddenly everything changes. Your music is deep, powerful and balanced, and so quiet that every note sounds clearer. Even air travel becomes enjoyable, as engine roar gently fades away. No matter how noisy the world is, it’s just you and your music—or simply peace and quiet.",
image: "http://ecx.images-amazon.com/images/I/71%2BHRQB7YCL._SL1500_.jpg",
price: 299.00
},
%{
title: "Audio-Technica ATH-M50x Professional Studio Monitor Headphones",
description: "As the most critically acclaimed model in the M-Series line, the ATH-M50 is praised by top audio engineers and pro audio reviewers year after year. Now, the ATH-M50x professional monitor headphones feature the same coveted sonic signature, with the added feature of detachable cables. From the large aperture drivers, sound isolating earcups and robust construction, the M50x provides an unmatched experience for the most critical audio professionals.",
image: "http://ecx.images-amazon.com/images/I/815OQlSZfkL._SL1500_.jpg",
price: 128.00
}
] |> Enum.map(fn(product) ->
id = "p#{:crypto.hash(:md5, product.title) |> Base.encode16(case: :lower)}"
Map.put(product, :id, id)
end)
def all do
unquote(Enum.map(products, fn(%{id: id}) ->
id
end))
end
for product <- products do
def get(unquote(product.id)) do
unquote(Macro.escape(product))
end
end
def get(_) do
throw :not_found
end
end
|
lib/product_catalog/service/product.ex
| 0.571169 | 0.645036 |
product.ex
|
starcoder
|
defmodule Transformations do
@moduledoc """
Transformations is an Elixir library for translating, rotating, reflecting, scaling, shearing, projecting, orthogonalizing, and superimposing arrays of 3D homogeneous coordinates as well as for converting between rotation matrices, Euler angles, and quaternions. Also includes an Arcball control object and functions to decompose transformation matrices.
"""
@doc """
Create a new matrix.
## Examples
```elixir
iex(1)> point = Transformations.matrix([[1],[1],[1],[1]])
#Matrex[4×1]
┌ ┐
│ 1.0 │
│ 1.0 │
│ 1.0 │
│ 1.0 │
└ ┘
```
"""
def matrix(mtx) do
Matrex.new(mtx)
end
@doc """
Transition (shift) a shape by xt, yt and zt along the x, y and z axes.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.transition(1,2,3)
#Matrex[4×8]
┌ ┐
│ 1.0 2.0 2.0 1.0 1.0 2.0 2.0 1.0 │
│ 2.0 2.0 2.0 2.0 3.0 3.0 3.0 3.0 │
│ 3.0 3.0 4.0 4.0 3.0 3.0 4.0 4.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def transition(mtx, xt, yt, zt) do
t = Matrex.new([
[1,0,0,xt],
[0,1,0,yt],
[0,0,1,zt],
[0,0,0,1],
])
Matrex.dot(t, mtx)
end
@doc """
Scale a shape by xs, ys and zs along the x, y and z axes.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.scale(1,2,3)
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 1.0 0.0 0.0 1.0 1.0 0.0 │
│ 0.0 0.0 0.0 0.0 2.0 2.0 2.0 2.0 │
│ 0.0 0.0 3.0 3.0 0.0 0.0 3.0 3.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def scale(mtx, xs, ys, zs) do
s = Matrex.new([
[xs,0,0,0],
[0,ys,0,0],
[0,0,zs,0],
[0,0,0,1],
])
Matrex.dot(s, mtx)
end
@doc """
Scale a shape by s along each of x, y and z axes.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.scaleu(3)
#Matrex[4×8]
┌ ┐
│ 0.0 3.0 3.0 0.0 0.0 3.0 3.0 0.0 │
│ 0.0 0.0 0.0 0.0 3.0 3.0 3.0 3.0 │
│ 0.0 0.0 3.0 3.0 0.0 0.0 3.0 3.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def scaleu(mtx, s) do
s = Matrex.new([
[s,0,0,0],
[0,s,0,0],
[0,0,s,0],
[0,0,0,1],
])
Matrex.dot(s, mtx)
end
@doc """
Rotate a shape by angle about the x axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.rotatex(45)
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 1.0 0.0 0.0 1.0 1.0 0.0 │
│ 0.0 0.0-0.70711-0.70711 0.70711 0.70711 0.0 0.0 │
│ 0.0 0.0 0.70711 0.70711 0.70711 0.70711 1.41421 1.41421 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def rotatex(mtx, degrees) do
angle = :math.pi() * degrees / 180.0
rx = Matrex.new([
[1,0,0,0],
[0,:math.cos(angle),-:math.sin(angle),0],
[0,:math.sin(angle), :math.cos(angle),0],
[0,0,0,1],
])
Matrex.dot(rx, mtx)
end
@doc """
Rotate a shape by angle about the y axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.rotatey(45)
#Matrex[4×8]
┌ ┐
│ 0.0 0.70711 1.41421 0.70711 0.0 0.70711 1.41421 0.70711 │
│ 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0 │
│ 0.0-0.70711 0.0 0.70711 0.0-0.70711 0.0 0.70711 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def rotatey(mtx, degrees) do
angle = :math.pi() * degrees / 180.0
ry = Matrex.new([
[:math.cos(angle),0,:math.sin(angle),0],
[0,1,0,0],
[-:math.sin(angle), 0, :math.cos(angle),0],
[0,0,0,1],
])
Matrex.dot(ry, mtx)
end
@doc """
Rotate a shape by angle about the z axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.rotatez(45)
#Matrex[4×8]
┌ ┐
│ 0.0 0.70711 0.70711 0.0-0.70711 0.0 0.0-0.70711 │
│ 0.0 0.70711 0.70711 0.0 0.70711 1.41421 1.41421 0.70711 │
│ 0.0 0.0 1.0 1.0 0.0 0.0 1.0 1.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def rotatez(mtx, degrees) do
angle = :math.pi() * degrees / 180.0
rz = Matrex.new([
[:math.cos(angle),-:math.sin(angle),0,0],
[:math.sin(angle), :math.cos(angle),0,0],
[0,0,1,0],
[0,0,0,1],
])
Matrex.dot(rz, mtx)
end
@doc """
Rotate a shape about any arbitrary axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.rotate(1,2,3,4,5,6,45)
#Matrex[4×8]
┌ ┐
│-2.33413 -1.5294-2.03528-2.84001-2.64475-1.84001-2.34589-3.15063 │
│ 1.04721 1.55309 1.8637 1.35782 1.85194 2.35782 2.66844 2.16256 │
│-1.28693-0.97631-0.17157-0.48219-1.79281-1.48219-0.67745-0.98807 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def rotate(mtx, x, y, z, p, q, r, degrees) do
angle = :math.pi() * degrees / 180.0
to = Matrex.new([
[1,0,0,-x],
[0,1,0,-y],
[0,0,1,-z],
[0,0,0,1],
])
a = p - x
b = q - y
c = r - z
l2 = :math.pow(a, 2) + :math.pow(b, 2) + :math.pow(c, 2)
l = :math.sqrt(l2)
v2 = :math.pow(b, 2) + :math.pow(c, 2)
v = :math.sqrt(v2)
rx = Matrex.new([
[1,0,0,0],
[0,c/v,-b/v,0],
[0,b/v, c/v,0],
[0,0,0,1],
])
ry = Matrex.new([
[v/l,0,-a/l,0],
[0,1,0,0],
[a/l, 0, v/l,0],
[0,0,0,1],
])
rz = Matrex.new([
[:math.cos(angle),-:math.sin(angle),0,0],
[:math.sin(angle), :math.cos(angle),0,0],
[0,0,1,0],
[0,0,0,1],
])
ryi = Matrex.new([
[v/l,0,a/l,0],
[0,1,0,0],
[-a/l, 0, v/l,0],
[0,0,0,1],
])
rxi = Matrex.new([
[1,0,0,0],
[0,c/v,b/v,0],
[0,-b/v, c/v,0],
[0,0,0,1],
])
toi = Matrex.new([
[1,0,0,x],
[0,1,0,y],
[0,0,1,z],
[0,0,0,1],
])
Matrex.dot(to, rx) |> Matrex.dot(ry) |> Matrex.dot(rz) |> Matrex.dot(ryi) |> Matrex.dot(rxi) |> Matrex.dot(toi) |> Matrex.dot(mtx)
end
@doc """
Shear a shape by a factor of sy, sz along the y and z axes about the x axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.shearx(2,3)
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 1.0 0.0 0.0 1.0 1.0 0.0 │
│ 0.0 2.0 2.0 0.0 1.0 3.0 3.0 1.0 │
│ 0.0 3.0 4.0 1.0 0.0 3.0 4.0 1.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def shearx(mtx, sy, sz) do
shx = Matrex.new([
[1,sy,sz,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1],
])
mtx
|> Matrex.transpose()
|> Matrex.dot(shx)
|> Matrex.transpose()
end
@doc """
Shear a shape by a factor of sx, sz along the x and z axes about the y axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.sheary(2,3)
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 1.0 0.0 2.0 3.0 3.0 2.0 │
│ 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0 │
│ 0.0 0.0 1.0 1.0 3.0 3.0 4.0 4.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def sheary(mtx, sx, sz) do
shy = Matrex.new([
[1,0,0,0],
[sx,1,sz,0],
[0,0,1,0],
[0,0,0,1],
])
mtx
|> Matrex.transpose()
|> Matrex.dot(shy)
|> Matrex.transpose()
end
@doc """
Shear a shape by a factor of sx, sy along the x and y axes about the z axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.shearz(2,3)
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 3.0 2.0 0.0 1.0 3.0 2.0 │
│ 0.0 0.0 3.0 3.0 1.0 1.0 4.0 4.0 │
│ 0.0 0.0 1.0 1.0 0.0 0.0 1.0 1.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def shearz(mtx, sx, sy) do
shz = Matrex.new([
[1,0,0,0],
[0,1,0,0],
[sx,sy,1,0],
[0,0,0,1],
])
mtx
|> Matrex.transpose()
|> Matrex.dot(shz)
|> Matrex.transpose()
end
@doc """
Mirror a shape around the yz plane or the x-axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.mirrorx()
#Matrex[4×8]
┌ ┐
│ 0.0 -1.0 -1.0 0.0 0.0 -1.0 -1.0 0.0 │
│ 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0 │
│ 0.0 0.0 1.0 1.0 0.0 0.0 1.0 1.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def mirrorx(mtx) do
m = Matrex.new([
[-1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1],
])
Matrex.dot(m, mtx)
end
@doc """
Mirror a shape around the xz plane or the y-axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.mirrory()
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 1.0 0.0 0.0 1.0 1.0 0.0 │
│ 0.0 0.0 0.0 0.0 -1.0 -1.0 -1.0 -1.0 │
│ 0.0 0.0 1.0 1.0 0.0 0.0 1.0 1.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def mirrory(mtx) do
m = Matrex.new([
[1,0,0,0],
[0,-1,0,0],
[0,0,1,0],
[0,0,0,1],
])
Matrex.dot(m, mtx)
end
@doc """
Mirror a shape around the xy plane or the z-axis.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.mirrorz()
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 1.0 0.0 0.0 1.0 1.0 0.0 │
│ 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0 │
│ 0.0 0.0 -1.0 -1.0 0.0 0.0 -1.0 -1.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def mirrorz(mtx) do
m = Matrex.new([
[1,0,0,0],
[0,1,0,0],
[0,0,-1,0],
[0,0,0,1],
])
Matrex.dot(m, mtx)
end
@doc """
Project a shape down to the yz plane.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.projectx()
#Matrex[4×8]
┌ ┐
│ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 │
│ 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0 │
│ 0.0 0.0 1.0 1.0 0.0 0.0 1.0 1.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def projectx(mtx) do
m = Matrex.new([
[0,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1],
])
Matrex.dot(m, mtx)
end
@doc """
Project a shape down to the xz plane.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.projecty()
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 1.0 0.0 0.0 1.0 1.0 0.0 │
│ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 │
│ 0.0 0.0 1.0 1.0 0.0 0.0 1.0 1.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def projecty(mtx) do
m = Matrex.new([
[1,0,0,0],
[0,0,0,0],
[0,0,1,0],
[0,0,0,1],
])
Matrex.dot(m, mtx)
end
@doc """
Project a shape down to the xy plane.
## Examples
```elixir
iex(1)> shape = Transformations.matrix([
[0,1,1,0,0,1,1,0],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[1,1,1,1,1,1,1,1]
])
shape |> Transformations.projectz()
#Matrex[4×8]
┌ ┐
│ 0.0 1.0 1.0 0.0 0.0 1.0 1.0 0.0 │
│ 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0 │
│ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 │
│ 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 │
└ ┘
```
"""
def projectz(mtx) do
m = Matrex.new([
[1,0,0,0],
[0,1,0,0],
[0,0,0,0],
[0,0,0,1],
])
Matrex.dot(m, mtx)
end
end
|
lib/transformations.ex
| 0.881104 | 0.88842 |
transformations.ex
|
starcoder
|
defmodule Aja.Vector.Builder do
@moduledoc false
alias Aja.Vector.{Tail, Node}
require Aja.Vector.CodeGen, as: C
def from_list(list) when is_list(list) do
do_concat_list([], list)
end
def concat_list(builder, list) when is_list(builder) and is_list(list) do
do_concat_list(builder, list)
end
defp do_concat_list(builder, unquote(C.list_with_rest(C.var(rest)))) when rest != [] do
leaf = unquote(C.array())
builder
|> append_node(leaf)
|> do_concat_list(rest)
end
defp do_concat_list(builder, rest) do
{builder, length(rest), Tail.partial_from_list(rest)}
end
def map_from_list(list, fun) when is_list(list) and is_function(fun, 1) do
concat_map_list([], list, fun)
end
defp concat_map_list(builder, unquote(C.list_with_rest(C.var(rest))), fun) when rest != [] do
leaf = unquote(C.arguments() |> Enum.map(C.apply_mapper(C.var(fun))) |> C.array())
builder
|> append_node(leaf)
|> concat_map_list(rest, fun)
end
defp concat_map_list(builder, rest, fun) do
{builder, length(rest), Enum.map(rest, fun) |> Tail.partial_from_list()}
end
def append_nodes(builder, []), do: builder
def append_nodes(builder, [node | nodes]) do
builder
|> append_node(node)
|> append_nodes(nodes)
end
def append_nodes_with_offset(builder, [node], offset, tail, tail_size) do
shifted_tail = Node.shift(tail, -tail_size)
last_node = Node.from_offset_nodes(node, shifted_tail, offset)
case tail_size - offset do
new_tail_size when new_tail_size > 0 ->
new_tail =
shifted_tail
|> Node.from_offset_nodes(unquote(C.fill_with([], nil) |> C.array()), offset)
|> Node.shift(new_tail_size)
builder
|> append_node(last_node)
|> append_tail(new_tail, new_tail_size)
tail_size_complement ->
new_tail = Node.shift(last_node, tail_size_complement)
append_tail(builder, new_tail, C.branch_factor() + tail_size_complement)
end
end
def append_nodes_with_offset(builder, [node1 | nodes = [node2 | _]], offset, tail, tail_size) do
node = Node.from_offset_nodes(node1, node2, offset)
builder
|> append_node(node)
|> append_nodes_with_offset(nodes, offset, tail, tail_size)
end
def append_node(builder, node)
def append_node([], node), do: [[node]]
def append_node([unquote(C.arguments() |> tl()) | tries], unquote(C.arguments() |> hd())) do
trie_node = unquote(C.reversed_arguments() |> C.array())
[[] | append_node(tries, trie_node)]
end
def append_node([trie | tries], node) do
[[node | trie] | tries]
end
def append_tail(builder, tail, tail_size) do
{builder, tail_size, tail}
end
def to_trie([[] | tries], level) do
to_trie(tries, C.incr_level(level))
end
def to_trie([[dense_trie]], level) do
{level, dense_trie}
end
def to_trie(sparse_trie, level) do
to_sparse_trie(sparse_trie, level)
end
defp to_sparse_trie([children | rest], level) do
node = Node.from_incomplete_reverse_list(children)
case rest do
[] -> {C.incr_level(level), node}
[head | tail] -> to_sparse_trie([[node | head] | tail], C.incr_level(level))
end
end
def from_trie(trie, level, index) do
case :erlang.bsr(index, level) do
C.branch_factor() ->
prepend_single_builder([[trie]], level)
_ ->
do_from_trie(trie, level, index, [])
end
end
defp do_from_trie(trie, level = C.bits(), index, acc) do
current_index = C.radix_search(index, level)
[subtries_list(trie, 1, current_index + 1, []) | acc]
end
defp do_from_trie(trie, level, index, acc) do
current_index = C.radix_search(index, level)
child = elem(trie, current_index)
new_acc = [subtries_list(trie, 1, current_index + 1, []) | acc]
do_from_trie(child, C.decr_level(level), index, new_acc)
end
defp prepend_single_builder(list, _level = 0), do: list
defp prepend_single_builder(list, level) do
prepend_single_builder([[] | list], C.decr_level(level))
end
defp subtries_list(_trie, _index = until, until, acc), do: acc
defp subtries_list(trie, index, until, acc) do
new_acc = [:erlang.element(index, trie) | acc]
subtries_list(trie, index + 1, until, new_acc)
end
@compile {:inline, tail_offset: 3}
def tail_offset([], _level, acc), do: acc
def tail_offset([trie | tries], level, acc) do
trie_size = length(trie) |> Bitwise.bsl(level)
tail_offset(tries, C.incr_level(level), acc + trie_size)
end
end
|
lib/vector/builder.ex
| 0.689828 | 0.551755 |
builder.ex
|
starcoder
|
defmodule Recurrencex do
@moduledoc """
Simple date recurrences
"""
require Logger
@enforce_keys [
:frequency,
:repeat_on,
:type
]
defstruct [
:frequency,
:repeat_on,
:type
]
@type t :: %__MODULE__{
type: atom,
frequency: integer,
repeat_on: [integer] | [{integer, integer}]
}
@doc """
A function that finds the date of the next occurence after 'date' with recurrence 'recurrencex'
## Examples
```elixir
iex> date = Timex.to_datetime({{2018, 4, 20}, {0, 0, 0}}, "America/Toronto")
...> # repeat every 7 days
...> next = Recurrencex.next(date, %Recurrencex{type: :daily, frequency: 7, repeat_on: []})
...> next == Timex.to_datetime({{2018, 4, 27}, {0, 0, 0}}, "America/Toronto")
true
iex> date = Timex.to_datetime({{2018, 4, 20}, {0, 0, 0}}, "America/Toronto")
...> # repeat on Mondays, Wednesdays, Fridays every week
...> recurrencex = %Recurrencex{type: :weekly, frequency: 1, repeat_on: [1, 3, 5]}
...> next = Recurrencex.next(date, recurrencex)
...> # date was a Friday the 20th, the next recurrence would be Monday the 23rd
...> next == Timex.to_datetime({{2018, 4, 23}, {0, 0, 0}}, "America/Toronto")
true
iex> date = Timex.to_datetime({{2018, 4, 20}, {0, 0, 0}}, "America/Toronto")
...> # repeat on the 20th and 25th of every month
...> recurrencex = %Recurrencex{type: :monthly_day, frequency: 1, repeat_on: [20, 25]}
...> next = Recurrencex.next(date, recurrencex)
...> next == Timex.to_datetime({{2018, 4, 25}, {0, 0, 0}}, "America/Toronto")
true
iex> date = Timex.to_datetime({{2018, 4, 20}, {0, 0, 0}}, "America/Toronto")
...> # repeat on the first Thursday of every month
...> recurrencex = %Recurrencex{type: :monthly_dow, frequency: 1, repeat_on: [{4,1}]}
...> next = Recurrencex.next(date, recurrencex)
...> next == Timex.to_datetime({{2018, 5, 3}, {0, 0, 0}}, "America/Toronto")
true
iex> r = %Recurrencex{type: :monthly_day, frequency: 12, repeat_on: [20]}
...> next = Timex.to_datetime({{2018, 4, 20}, {0, 0, 0}}, "America/Toronto")
...> |> Recurrencex.next(r)
...> |> Recurrencex.next(r)
...> next == Timex.to_datetime({{2020, 4, 20}, {0, 0, 0}}, "America/Toronto")
true
```
"""
@spec next(%DateTime{}, %Recurrencex{}) :: %DateTime{}
def next(date, recurrencex) do
next_occurence(date, recurrencex)
end
defp next_occurence(date, %Recurrencex{type: :daily} = recurrencex) do
Timex.shift(date, [days: recurrencex.frequency])
end
defp next_occurence(date, %Recurrencex{type: :weekly} = recurrencex) do
dow = Timex.weekday(date)
day_shift = next_in_sequence(dow, Enum.sort(recurrencex.repeat_on)) - dow
cond do
day_shift <= 0 ->
Timex.shift(date, [days: day_shift, weeks: recurrencex.frequency])
day_shift > 0 ->
Timex.shift(date, [days: day_shift])
end
end
defp next_occurence(date, %Recurrencex{type: :monthly_day} = recurrencex) do
day_shift = next_in_sequence(date.day, Enum.sort(recurrencex.repeat_on)) - date.day
cond do
day_shift <= 0 ->
shifted_date = Timex.shift(date, [days: day_shift, months: recurrencex.frequency])
cond do
shifted_date.month == 12 and rem(date.month + recurrencex.frequency, 12) == 0 ->
shifted_date
shifted_date.month == rem(date.month + recurrencex.frequency, 12) ->
shifted_date
true ->
Timex.shift(shifted_date, [days: -3])
|> Timex.end_of_month
|> Timex.set([
hour: date.hour,
minute: date.minute,
second: date.second,
microsecond: date.microsecond
])
end
day_shift > 0 ->
Timex.shift(date, [days: day_shift])
end
end
defp next_occurence(date, %Recurrencex{type: :monthly_dow} = recurrencex) do
base_pair = {Timex.weekday(date), nth_dow(date)}
repeat_on = Enum.sort(recurrencex.repeat_on, fn {_a, b}, {_x, y} -> b < y end)
{dow, n} = next_in_tuple_sequence(base_pair, repeat_on, Timex.beginning_of_month(date))
cond do
n >= nth_dow(date) ->
date_of_nth_dow(Timex.beginning_of_month(date), dow, n)
n < nth_dow(date) ->
shifted_date = Timex.shift(date, [months: recurrencex.frequency])
if shifted_date.month == date.month + recurrencex.frequency do
date_of_nth_dow(Timex.beginning_of_month(shifted_date), dow, n)
else
# Offset by 3 to be safe we don't skip a month (February/30/31th) because of the way
# Timex shifts by months
start_date = shifted_date
|> Timex.shift([days: -3])
|> Timex.beginning_of_month
|> Timex.set([
hour: date.hour,
minute: date.minute,
second: date.second,
microsecond: date.microsecond
])
date_of_nth_dow(start_date, dow, n)
end
end
end
defp date_of_nth_dow(date, dow, n) do
cond do
Timex.weekday(date) == dow and n == 1 -> date
Timex.weekday(date) == dow -> date_of_nth_dow(Timex.shift(date, [weeks: 1]), dow, n - 1)
Timex.weekday(date) != dow -> date_of_nth_dow(Timex.shift(date, [days: 1]), dow, n)
end
end
defp nth_dow(date) do
Enum.filter(1..5, fn n -> nth_dow?(date, n) end)
|> Enum.at(0)
end
defp nth_dow?(date, n) do
date == date_of_nth_dow(Timex.beginning_of_month(date), Timex.weekday(date), n)
end
defp next_in_tuple_sequence(base, sequence, date) do
{base_x, base_y} = base
sequence
|> Enum.find(Enum.at(sequence, 0), fn {x, y} ->
cond do
y == base_y ->
Timex.compare(date_of_nth_dow(date, base_x, base_y), date_of_nth_dow(date, x, y)) == -1
y > base_y -> true
y < base_y -> false
end
end)
end
defp next_in_sequence(base, sequence) do
sequence
|> Enum.find(Enum.at(sequence, 0), fn x -> x > base end)
end
end
|
lib/recurrencex.ex
| 0.861188 | 0.756964 |
recurrencex.ex
|
starcoder
|
defmodule Mnemonix.Behaviour.Definition.Params do
@moduledoc false
def arities(params) do
{arity, defaults} = Enum.reduce(params, {0, 0}, fn
{:\\, _, [_arg, _default]}, {arity, defaults} ->
{arity, defaults + 1}
_ast, {arity, defaults} ->
{arity + 1, defaults}
end)
Range.new(arity, arity + defaults)
end
def normalize(params) do
params
|> Enum.with_index(1)
|> Enum.map(&normalize_param/1)
end
defp normalize_param({{:\\, meta, [param, default]}, index}) do
{{:\\, meta, [normalize_param({param, index}), normalize_param({default, index})]}, index}
end
defp normalize_param({{:_, meta, context}, index}) do
{String.to_atom("arg#{index}"), meta, context}
end
defp normalize_param({{name, meta, context}, _}) when is_atom(name) and (is_atom(context) or context == nil) do
string = Atom.to_string(name)
if String.starts_with?(string, "_") do
{String.to_atom(String.trim_leading(string, "_")), meta, context}
else
{name, meta, context}
end
end
defp normalize_param({{call, meta, args}, index}) when is_list(args) do
params = Enum.map(args, fn param -> normalize_param({param, index}) end)
{call, meta, params}
end
defp normalize_param({{call, meta, args}, index}) when is_list(args) do
params = Enum.map(args, fn param -> normalize_param({param, index}) end)
{call, meta, params}
end
defp normalize_param({{two, tuple}, index}) do
{normalize_param({two, index}), normalize_param({tuple, index})}
end
defp normalize_param({literal, _}) do
literal
end
def strip_matches(params) do
Macro.prewalk(params, fn
{:=, _, [arg1, arg2]} -> pick_match(arg1, arg2)
ast -> ast
end)
end
defp pick_match({name, meta, context}, _arg2) when is_atom(name) and (is_atom(context) or context == nil) do
{name, meta, context}
end
defp pick_match(_arg1, {name, meta, context}) when is_atom(name) and (is_atom(context) or context == nil) do
{name, meta, context}
end
defp pick_match(arg1, arg2) do
description = """
could not resolve match into variable name;
either the left or right side of matches in
callback implementations must be a simple variable.
Got: `#{Macro.to_string(arg1)} = #{Macro.to_string(arg2)}`
"""
raise CompileError, description: description
end
def strip_defaults(params) do
Macro.prewalk(params, fn
{:\\, _, [arg, _default]} -> arg
ast -> ast
end)
end
end
|
lib/mnemonix/behaviour/definition/params.ex
| 0.616359 | 0.434161 |
params.ex
|
starcoder
|
defmodule Membrane.Dashboard.Dagre.G6Marshaller do
@moduledoc """
This module is responsible for marshalling data regarding links between membrane elements
to create a structure suitable for generating a dagre layout (directed graph) using a `G6` library
whose documentation is available at [https://g6.antv.vision/en/docs/api/graphLayout/dagre].
The resulting structure is a JSON document having 3 arrays, each for a different type:
- nodes
- edges
- combos
While what is a node and an edge is clear, combo is a group of nodes which can be again nested to become a group of combos.
It gives a visual effect of a box with all declared nodes being inside of it. Nodes are firstly grouped into pipelines, then they can be
again grouped but this time in bins which are nested inside pipelines ect.
This module takes into account the elements' liveness and accordingly assigns the nodes with proper colors.
Colors differ depending on what state the node currently is in. If it belongs to already existing elements' group, it will have a darker color. If it has
just been created it will have a brighter color.
One can distinguish 2 types of nodes:
- element nodes
- bin's nodes
Bin's nodes have different colors and represent bin's internal linking points while element nodes represent simple elements (sources, filters, sinks).
"""
require Logger
@type link_t :: %{
parent_path: String.t(),
from: String.t(),
to: String.t(),
pad_from: String.t(),
pad_to: String.t()
}
@bin_itself "{Membrane.Bin, :itself}"
@new_bin_node_style %{
fill: "#ffb700"
}
@dead_bin_node_style %{
fill: "#730000"
}
@existing_bin_node_style %{
fill: "#ad8110"
}
@new_node_style %{
fill: "#14fa14"
}
@dead_node_style %{
fill: "#ff5559"
}
@existing_node_style %{
fill: "#166e15"
}
@default_node_style %{}
@spec run(
[link_t()],
elements_liveliness :: %{new: MapSet.t(), dead: MapSet.t(), existing: MapSet.t()}
) :: {:ok, any()} | {:error, any()}
def run(links, elements_liveliness) do
bin_nodes = collect_bin_nodes(links)
result =
links
|> Enum.map(fn link -> format_link(link, bin_nodes) end)
|> Enum.reduce(
%{nodes: MapSet.new(), edges: MapSet.new(), combos: MapSet.new()},
&reduce_link/2
)
nodes = colorize_nodes(result.nodes, elements_liveliness)
{:ok, %{result | nodes: nodes}}
end
defp format_link(link, bin_nodes) do
parents = link.parent_path |> String.split("/")
last_parent = parents |> List.last()
{from_is_bin, from_path} = element_path(link.parent_path, parents, link.from, bin_nodes)
{to_is_bin, to_path} = element_path(link.parent_path, parents, link.to, bin_nodes)
from = format_element(last_parent, link.from, link.pad_from, from_is_bin)
to = format_element(last_parent, link.to, link.pad_to, to_is_bin)
link
|> Map.merge(%{
from: from,
from_node: generate_node(from_path, from),
from_path: from_path,
from_is_bin: from_is_bin,
to: to,
to_node: generate_node(to_path, to),
to_path: to_path,
to_is_bin: to_is_bin
})
end
defp reduce_link(link, %{nodes: nodes, edges: edges, combos: combos}) do
{from_combo, to_combo} = link_combos(link)
%{
nodes:
nodes
# put 'from' node
|> MapSet.put(%{
id: link.from_node,
label: link.from,
comboId: from_combo.id,
is_bin: link.from_is_bin,
path: link.from_path ++ [link.from]
})
# put 'to' node
|> MapSet.put(%{
id: link.to_node,
label: link.to,
comboId: to_combo.id,
is_bin: link.to_is_bin,
path: link.to_path ++ [link.to]
}),
edges:
edges
|> MapSet.put(%{
source: link.from_node,
target: link.to_node
}),
combos: combos |> MapSet.put(from_combo) |> MapSet.put(to_combo)
}
end
defp collect_bin_nodes(links) do
links
|> Enum.map(& &1.parent_path)
|> Enum.filter(&String.ends_with?(&1, " bin"))
|> MapSet.new()
end
# returns 'from' and 'to' elements combos
defp link_combos(link) do
from_combo = combo(link.from_path)
to_combo = combo(link.to_path)
{from_combo, to_combo}
end
defp combo(path) do
id = path |> Enum.join() |> hash_string()
[label | parents] = path |> Enum.reverse()
parent_id =
if parents == [] do
nil
else
parents |> Enum.reverse() |> Enum.join() |> hash_string()
end
%{
id: id,
label: label,
parentId: parent_id,
path: path
}
end
defp colorize_nodes(nodes, elements_liveliness) do
nodes
|> Enum.map(fn %{path: path, is_bin: is_bin} = node ->
path =
if is_bin do
path |> Enum.reverse() |> tl() |> Enum.reverse()
else
path
end
path_str = Enum.join(path, "/")
style = select_path_style(path_str, is_bin, elements_liveliness)
Map.put(node, :style, style)
end)
end
defp select_path_style(path, is_bin, %{dead: dead, new: new, existing: existing}) do
cond do
MapSet.member?(dead, path) ->
if is_bin, do: @dead_bin_node_style, else: @dead_node_style
MapSet.member?(new, path) ->
if is_bin, do: @new_bin_node_style, else: @new_node_style
MapSet.member?(existing, path) ->
if is_bin, do: @existing_bin_node_style, else: @existing_node_style
true ->
Logger.warn("#{path} has not been found among queried elements...")
@default_node_style
end
end
defp format_element(last_parent, @bin_itself, pad, _is_bin),
do: String.replace_suffix(last_parent, " bin", "") <> "\n" <> pad
defp format_element(_last_parent, element, pad, true), do: element <> "\n" <> pad
defp format_element(_last_parent, element, _pad, false), do: element
defp generate_node(path, element),
do: "#{path |> Enum.join()}#{element}" |> hash_string()
defp hash_string(to_hash),
do: to_hash |> :erlang.md5() |> Base.encode16()
# element_path is responsible for retrieving element path
# it has to be changed in case given element is a bin itself
defp element_path(_parent_path, parents, @bin_itself, _bin_nodes) do
{true, parents}
end
defp element_path(parent_path, parents, element, bin_nodes) do
element_bin = "#{element} bin"
bin_path = "#{parent_path}/#{element_bin}"
if MapSet.member?(bin_nodes, bin_path) do
{true, parents ++ [element_bin]}
else
{false, parents}
end
end
end
|
lib/membrane_dashboard/dagre/g6_marshaller.ex
| 0.85567 | 0.681356 |
g6_marshaller.ex
|
starcoder
|
defmodule DatabaseYamlConfigProvider do
@moduledoc """
A config provider that can load a Rails style database.yml file that has the
following structure:
```yaml
production:
adapter: postgresql
database: testdb
username: testuser
password: <PASSWORD>
host: pgsqlhost
port: 5432
```
## Usage
You need to register this `DatabaseYamlConfigProvider` as config provider in
the releases section of your mix.exs file.
releases: [
my_app: [
config_providers: [
{DatabaseYamlConfigProvider, path: "/production/shared/config/database.yml"}
],
...
]
]
By default, this config provider expects an `ENV` environment variable that
contains the current hosting environment name to be present when booting the
application.
Alternatively, you can set the environment directly when defining the config
provider.
{DatabaseYamlConfigProvider,
path: "/production/shared/config",
env: "production"}
Or you can speficy another env var containing the particular hosting
environment on application startup:
{DatabaseYamlConfigProvider,
path: "/production/shared/config",
env: {:system, "RAILS_ENV"}}
The same works for the location of the database file. You can specify an env
var containing the path to a folder that contains the database.yml file:
{DatabaseYamlConfigProvider, path: {:system, "RELEASE_CONFIG_PATH"}}
When the filename deviates from database.yml you can customize it, too:
{DatabaseYamlConfigProvider,
path: {:system, "RELEASE_CONFIG_PATH", "my_custom_database.yml"}}
"""
@behaviour Config.Provider
alias DatabaseYamlConfigProvider.AdapterMismatchError
alias DatabaseYamlConfigProvider.InvalidFileFormatError
alias DatabaseYamlConfigProvider.UndefinedEnvironmentError
@adapters %{
"mysql" => [Ecto.Adapters.MySQL, Ecto.Adapters.MyXQL],
"postgresql" => [Ecto.Adapters.Postgres]
}
@default_filename "database.yml"
@impl true
def init(opts) do
path = Keyword.fetch!(opts, :path)
repo = Keyword.fetch!(opts, :repo)
otp_app = Keyword.fetch!(repo.config(), :otp_app)
env = Keyword.get(opts, :env, {:system, "ENV"})
%{repo: repo, otp_app: otp_app, path: path, env: env}
end
@impl true
def load(config, opts) do
{:ok, _} = Application.ensure_all_started(:yaml_elixir)
env = resolve_env(opts.env)
path = resolve_path(opts.path)
config_data = fetch_config!(path, env)
validate_adapter!(opts.repo, config_data["adapter"])
Config.Reader.merge(config, [
{opts.otp_app,
[
{opts.repo,
[
database: config_data["database"],
hostname: config_data["host"],
password: <PASSWORD>_<PASSWORD>["password"],
port: config_data["port"],
username: config_data["username"]
]}
]}
])
end
defp fetch_config!(path, env) do
path
|> read_config()
|> validate_config!(env)
end
defp read_config(path) do
path
|> resolve_path()
|> Path.expand()
|> YamlElixir.read_from_file!()
end
defp validate_config!(config_data, env) when is_map(config_data) do
case Map.fetch(config_data, env) do
{:ok, env_config_data} -> env_config_data
:error -> raise UndefinedEnvironmentError, env: env
end
end
defp validate_config!(config_data, _env) do
raise InvalidFileFormatError, data: config_data
end
defp validate_adapter!(repo, configured_name) do
adapter = repo.__adapter__()
unless valid_adapter?(adapter, configured_name) do
raise AdapterMismatchError,
adapter: adapter,
configured_name: configured_name,
repo: repo
end
:ok
end
defp valid_adapter?(adapter, configured_name) do
case Map.fetch(@adapters, configured_name) do
{:ok, permitted_adapters} -> adapter in permitted_adapters
_ -> false
end
end
defp resolve_path({:system, varname, filename}) do
varname
|> System.fetch_env!()
|> Path.join(filename)
end
defp resolve_path({:system, varname}) do
resolve_path({:system, varname, @default_filename})
end
defp resolve_path(term), do: resolve_env(term)
defp resolve_env({:system, varname}) do
System.fetch_env!(varname)
end
defp resolve_env(term), do: term
end
|
lib/database_yaml_config_provider.ex
| 0.796094 | 0.589687 |
database_yaml_config_provider.ex
|
starcoder
|
defmodule Figlet.Parser.HeaderlineParser do
@moduledoc """
This module is dedicated to parsing the metadata from a Figlet file headerline.
The [header line](http://www.jave.de/figlet/figfont.html#headerline) gives
information about the FIGfont. Here is an example showing the names of all
parameters:
```
flf2a$ 6 5 20 15 3 0 143 229 NOTE: The first five characters in
| | | | | | | | | | the entire file must be "flf2a".
/ / | | | | | | | \
Signature / / | | | | | \ Codetag_Count
Hardblank / / | | | \ Full_Layout*
Height / | | \ Print_Direction
Baseline / \ Comment_Lines
Max_Length Old_Layout*
```
* The two layout parameters are closely related and fairly complex.
(See [INTERPRETATION OF LAYOUT PARAMETERS](http://www.jave.de/figlet/figfont.html#interpretlayout))
## See Also
- http://www.jave.de/figlet/figfont.html
- http://www.jave.de/docs/figfont.txt
- https://github.com/Marak/asciimo/issues/3
"""
alias Figlet.Meta
# Provide default values as strings so they can be properly converted
@defaults %{
codetag_count: "0",
full_layout: "0",
print_direction: "0"
}
@doc """
Parses the headerline (provided as a string binary).
"""
@spec parse(header_line :: binary, opts :: keyword()) :: {:ok, Meta.t()} | {:error, binary()}
def parse(header_line, opts \\ [])
def parse("flf2" <> <<_::binary-size(1), hard_blank::binary-size(1)>> <> tail, _) do
tail
|> String.trim()
|> String.split(" ")
|> metadata()
|> case do
{:ok, metadata} -> {:ok, struct(Meta, Map.put(metadata, :hard_blank, hard_blank))}
{:error, error} -> {:error, error}
end
end
def parse(_, _) do
{:error, "Invalid header line: missing flf2a"}
end
defp metadata([
height,
baseline,
max_length,
old_layout,
comment_lines,
print_direction,
full_layout,
codetag_count
]) do
{:ok,
%{
height: height |> String.to_integer(),
baseline: baseline |> String.to_integer(),
max_length: max_length |> String.to_integer(),
old_layout: old_layout |> String.to_integer(),
comment_lines: comment_lines |> String.to_integer(),
print_direction: print_direction |> String.to_integer(),
full_layout: full_layout |> String.to_integer(),
codetag_count: codetag_count |> String.to_integer()
}}
end
# If a headerline omits optional arguments, we pad it with defaults
defp metadata([_, _, _, _, _, _, _] = data), do: metadata(data ++ [@defaults[:codetag_count]])
defp metadata([_, _, _, _, _, _] = data),
do: metadata(data ++ [@defaults[:full_layout], @defaults[:codetag_count]])
defp metadata([_, _, _, _, _] = data),
do:
metadata(
data ++ [@defaults[:print_direction], @defaults[:full_layout], @defaults[:codetag_count]]
)
defp metadata(_) do
{:error, "Invalid metadata"}
end
end
|
lib/figlet/parser/headerline_parser.ex
| 0.833257 | 0.790813 |
headerline_parser.ex
|
starcoder
|
defmodule Crux.Structs.Overwrite do
@moduledoc """
Represents a Discord [Overwrite Object](https://discord.com/developers/docs/resources/channel#overwrite-object).
"""
@moduledoc since: "0.1.0"
@behaviour Crux.Structs
alias Crux.Structs
alias Crux.Structs.{Overwrite, Permissions, Role, Snowflake, User, Util}
defstruct [
:id,
:type,
:allow,
:deny
]
@typedoc since: "0.1.0"
@type t :: %__MODULE__{
id: Snowflake.t(),
type: 0..1,
allow: Permissions.t(),
deny: Permissions.t()
}
@typedoc """
The target of an overwrite.
- `0` role
- `1` member
"""
@typedoc since: "0.3.0"
@type target_type :: 0..1
@target_role 0
@target_member 1
@typedoc """
All available types that can be resolved into a target for a permission overwrite
"""
@typedoc since: "0.2.1"
@type target_resolvable() :: Overwrite.t() | Role.t() | User.id_resolvable()
@doc """
Resolves a `t:target_resolvable/0` into an overwrite target.
> Note that an id or string of it returns `:unknown` as type.
## Examples
```elixir
iex> %Crux.Structs.Overwrite{type: #{@target_member}, id: 218348062828003328}
...> |> Crux.Structs.Overwrite.resolve_target()
{#{@target_member}, 218348062828003328}
iex> %Crux.Structs.Role{id: 376146940762783746}
...> |> Crux.Structs.Overwrite.resolve_target()
{#{@target_role}, 376146940762783746}
iex> %Crux.Structs.User{id: 218348062828003328}
...> |> Crux.Structs.Overwrite.resolve_target()
{#{@target_member}, 218348062828003328}
iex> %Crux.Structs.Member{user: 218348062828003328}
...> |> Crux.Structs.Overwrite.resolve_target()
{#{@target_member}, 218348062828003328}
iex> %Crux.Structs.Message{author: %Crux.Structs.User{id: 218348062828003328}}
...> |> Crux.Structs.Overwrite.resolve_target()
{#{@target_member}, 218348062828003328}
iex> %Crux.Structs.VoiceState{user_id: 218348062828003328}
...> |> Crux.Structs.Overwrite.resolve_target()
{#{@target_member}, 218348062828003328}
iex> 218348062828003328
...> |> Crux.Structs.Overwrite.resolve_target()
{:unknown, 218348062828003328}
iex> "218348062828003328"
...> |> Crux.Structs.Overwrite.resolve_target()
{:unknown, 218348062828003328}
iex> nil
...> |> Crux.Structs.Overwrite.resolve_target()
nil
```
"""
@doc since: "0.2.1"
@spec resolve_target(target_resolvable()) :: {target_type() | :unknown, Snowflake.t()}
def resolve_target(%Overwrite{id: id, type: type}), do: {type, id}
def resolve_target(%Role{id: id}), do: {@target_role, id}
def resolve_target(resolvable) do
case Structs.resolve_id(resolvable, User) do
nil -> nil
id when is_map(resolvable) -> {@target_member, id}
id -> {:unknown, id}
end
end
@doc """
Creates a `t:Crux.Structs.Overwrite.t/0` struct from raw data.
> Automatically invoked by `Crux.Structs.create/2`.
"""
@doc since: "0.1.0"
@spec create(data :: map()) :: t()
def create(data) do
overwrite =
data
|> Util.atomify()
|> Map.update!(:id, &Snowflake.to_snowflake/1)
|> Map.update!(:allow, &Permissions.resolve/1)
|> Map.update!(:deny, &Permissions.resolve/1)
struct(__MODULE__, overwrite)
end
end
|
lib/structs/overwrite.ex
| 0.82573 | 0.592224 |
overwrite.ex
|
starcoder
|
defmodule Pnum do
@moduledoc """
Concurrent collection enumeration
Wraps the stdlib `Enum` module which implements the `Enumerable` protocol.
Implementation and documentation should mimic the `Enum` module.
"""
@type t :: Enumerable.t
@type item :: any
@doc """
Filters the collection, i.e. returns only those items for which `func`
returns `true`.
## Examples
iex> Pnum.filter([1, 2, 3], fn(x) -> rem(x, 2) == 0 end)
[2]
"""
@spec filter(t, (item -> as_boolean(term))) :: list
def filter(collection, func) do
process_many(collection, &({func.(&1), &1}))
|> collect
|> filter_results
end
@doc """
Filter the collection and map values in one pass.
## Examples
iex> Pnum.filter_map([1, 2, 3], fn(x) -> rem(x, 2) == 0 end, &(&1 * 2))
[4]
"""
@spec filter_map(t, (item -> as_boolean(term)), (item -> item)) :: list
def filter_map(collection, filter, mapper) do
process_many(collection, &({filter.(&1), mapper.(&1)}))
|> collect
|> filter_results
end
@doc """
Returns a new collection, where each item is the result of invoking `func`
on each corresponding item of `collection`.
For dicts, the function expects a key-value tuple.
## Examples
iex> Pnum.map([1, 2, 3], fn(x) -> x * 2 end)
[2, 4, 6]
iex> Pnum.map([a: 1, b: 2], fn({k, v}) -> { k, -v } end)
[a: -1, b: -2]
"""
@spec map(t, (item -> any)) :: list
def map(collection, func) do
process_many(collection, func) |> collect
end
@doc """
Sends the result of invoking `func` with `item` to the parent PID in a
`{child_pid, result}` tuple. Returns the resulting tuple.
Used internally to facilitate `Pnum` concurrent operations.
## Examples
iex> Pnum.process(1, fn(x) -> x * 2 end, self)
{#PID<0.42.0>, 2}
"""
@spec process(item, (item -> any), pid) :: any
def process(item, func, parent) do
send(parent, {self, func.(item)})
end
# Collect process results into a new list
defp collect(pids) do
Enum.map pids, &receive do: ({^&1, result} -> result)
end
# Filter based on item(0) of each tuple
defp filter_results([]), do: []
defp filter_results([head|tail]) do
case head do
{true, value} -> [value] ++ filter_results(tail)
{false, _val} -> filter_results(tail)
end
end
# Spawn a process for every item in the collection and apply `func`
defp process_many(collection, func) do
Enum.map collection, &spawn_link(__MODULE__, :process, [&1, func, self])
end
end
|
src/pnum.ex
| 0.875381 | 0.725454 |
pnum.ex
|
starcoder
|
defmodule Scenic.Driver.KeyMap do
@moduledoc """
Behaviour and support functions for mapping physical keys to characters.
This module is meant to be implemented elsewhere and provided to a driver
in order to localize key presses into the correct characters.
The `:scenic_driver_local` driver comes with a USEnglish key map, which it
uses by default. Look at that one as an example on how to make a custom
key mapping.
"""
# @doc """
# Map of current key state. A key with a value of 1 is pressed. A key with
# a value of 0 or that is missing from the map is not pressed.
# Some keys have multiple states and may be values higher than 1. You get
# to interpret that as appropriate.
# """
@type keys :: %{atom => integer}
@type mod_keys :: [
:meta
| :alt
| :ctrl
| :shift
| :caps_lock
| :num_lock
| :scroll_lock
]
@doc """
Translate a key to a codepoint, which is really just a string.
The first time this is called, state is nil. After that you can return
any state that makes sense and it will be passed back on the next call.
If the mapping is successful, i.e. the key press results in a valid character,
Then this function should return `{ :ok, codepoint, state }`. The returned
codepoint will be sent on to the ViewPort as a codepoint input event.
If the key press does not map to a string (this is common), then the function
should return `{ :ok, nil, state }`. This will not result in a codepoint input
being sent to the ViewPort.
If the data makes no sense at all, then you can return `{ :error, error_msg, state }`.
This will not send a codepoint input, but will log the error message, which should
be a string.
"""
@callback map_key(key :: atom, value :: integer, keys :: keys(), state :: any) ::
{:ok, nil, state :: any}
| {:ok, codepoint :: String.t(), state :: any}
| {:error, msg :: String.t(), state :: any}
@doc """
Is the caps lock enabled?
Returns true if any shift key or the caps lock is pressed or active.
"""
@spec caps_lock?(keys :: keys) :: boolean
def caps_lock?(keys) do
is_pressed?(keys[:virt_caps_lock])
end
@doc """
Is the num lock enabled?
Returns true if num lock is pressed or active.
"""
@spec num_lock?(keys :: keys) :: boolean
def num_lock?(keys) do
is_pressed?(keys[:virt_num_lock])
end
@doc """
Is the scroll lock enabled?
Returns true if scroll lock is pressed or active.
"""
@spec scroll_lock?(keys :: keys) :: boolean
def scroll_lock?(keys) do
is_pressed?(keys[:virt_scroll_lock])
end
@doc """
Is the current set of keys shifted?
Returns true if any shift key or the caps lock is pressed or active.
"""
@spec shift?(keys :: keys) :: boolean
def shift?(keys) do
is_pressed?(keys[:key_shift]) ||
is_pressed?(keys[:key_leftshift]) ||
is_pressed?(keys[:key_rightshift])
end
@doc """
Is any alt key pressed?
"""
@spec alt?(keys :: keys) :: boolean
def alt?(keys) do
is_pressed?(keys[:key_alt]) ||
is_pressed?(keys[:key_leftalt]) ||
is_pressed?(keys[:key_rightalt])
end
@doc """
Is any ctrl key pressed?
"""
@spec ctrl?(keys :: keys) :: boolean
def ctrl?(keys) do
is_pressed?(keys[:key_ctrl]) ||
is_pressed?(keys[:key_leftctrl]) ||
is_pressed?(keys[:key_rightctrl])
end
@doc """
Is any meta key pressed? This is usually the command button.
"""
@spec meta?(keys :: keys) :: boolean
def meta?(keys) do
is_pressed?(keys[:key_meta]) ||
is_pressed?(keys[:key_leftmeta]) ||
is_pressed?(keys[:key_rightmeta])
end
@doc """
Generate the list of pressed modifier keys
"""
@spec mods(keys :: keys) :: mod_keys
def mods(keys) do
[]
|> add_if_set(:meta, meta?(keys))
|> add_if_set(:alt, alt?(keys))
|> add_if_set(:ctrl, ctrl?(keys))
|> add_if_set(:shift, shift?(keys))
|> add_if_set(:caps_lock, caps_lock?(keys))
|> add_if_set(:num_lock, num_lock?(keys))
|> add_if_set(:scroll_lock, scroll_lock?(keys))
end
defp is_pressed?(nil), do: false
defp is_pressed?(0), do: false
defp is_pressed?(_), do: true
defp add_if_set(list, value, true), do: [value | list]
defp add_if_set(list, _value, false), do: list
end
|
lib/scenic/driver/key_map.ex
| 0.833562 | 0.533944 |
key_map.ex
|
starcoder
|
defmodule AssertIdentity do
import ExUnit.Assertions, only: [assert: 2]
@typedoc """
Value that can be compared by identity.
"""
@type comparable :: list | {list, any} | %{id: any} | {map, any}
@doc """
Asserts that `a` and `b` have the same identity.
Checks that the `id` keys of all provided structs are equal. Also compares
any lists.
This is useful to assert that Ecto structs are equal without doing a
comparison on the direct structs, which may not be strictly equivalent due
to e.g. association preloading.
Raises `ExUnit.AssertionError` if identities can't be compared.
## Options
* `sorted` - If `true`, indicates that the given lists are already sorted and
should not be sorted by the function
## Examples
iex> AssertIdentity.assert_ids_match([%{id: 1}], [%{id: 1}])
true
iex> AssertIdentity.assert_ids_match({[%{"id" => 1}], "id"}, {[%{"id" => 1}], "id"})
true
iex> AssertIdentity.assert_ids_match(%{id: 1}, %{id: 1})
true
iex> AssertIdentity.assert_ids_match({%{"id" => 1}, "id"}, {%{"id" => 1}, "id"})
true
iex> AssertIdentity.assert_ids_match([%{id: 2}, %{id: 1}], [%{id: 1}, %{id: 2}])
true
"""
@spec assert_ids_match(comparable(), comparable(), list) :: boolean()
def assert_ids_match(a, b, opts \\ [])
def assert_ids_match(list1, list2, opts)
when is_list(list1) and is_list(list2) do
key = Keyword.get(opts, :key, :id)
sort = Keyword.get(opts, :sorted, false)
list1_ids = pluck_ids(list1, key, sort)
list2_ids = pluck_ids(list2, key, sort)
match_lists(list1_ids, list2_ids)
end
def assert_ids_match({list1, id1}, {list2, id2}, opts)
when is_list(list1) and is_list(list2) do
sort = Keyword.get(opts, :sorted, false)
list1_ids = pluck_ids(list1, id1, sort)
list2_ids = pluck_ids(list2, id2, sort)
match_lists(list1_ids, list2_ids)
end
def assert_ids_match(%{id: id1}, %{id: id2}, _opts) do
match_structs(id1, id2)
end
def assert_ids_match({struct1, id1}, {struct2, id2}, _opts)
when is_map(struct1) and is_map(struct2) do
match_structs({struct1, id1}, {struct2, id2})
end
def assert_ids_match(a, b, _opts) do
raise ExUnit.AssertionError,
left: a,
right: b,
message: "No `id` key found to compare"
end
defp pluck_ids(list, key, true) do
list
|> Enum.map(fn
%{^key => id} -> id
nil -> nil
end)
end
defp pluck_ids(list, key, false) do
list
|> pluck_ids(key, true)
|> Enum.sort()
end
defp match_lists(list1_ids, list2_ids) do
assert list1_ids == list2_ids,
left: list1_ids,
right: list2_ids,
message: "List `id` keys do not match"
end
defp match_structs({struct1, id1}, {struct2, id2}) do
id1 = Map.get(struct1, id1)
id2 = Map.get(struct2, id2)
match_structs(id1, id2)
end
defp match_structs(id1, id2) do
assert id1 == id2,
left: id1,
right: id2,
message: "Struct `id` keys do not match"
end
end
|
lib/assert_identity.ex
| 0.902177 | 0.691276 |
assert_identity.ex
|
starcoder
|
defmodule Network do
alias Cumatrix, as: CM
@moduledoc """
defnetwork is macros to describe network
argument must have under bar to avoid warning message
```
defnetwork name(_x) do
_x |> element of network |> ...
end
```
element
- w(r,c) weight matrix row-size is r col-size is c. initial val is random * 0.1, default learning late 0.1
- w(r,c,ir,lr) ir is initial rate to multiple randam, lr is learning rate.
- w(r,c,ir,lr,dr) dr is dropout rate.
- b(n) bias row vector size n. initial val is randam * 0.1, default learning late 0.1
- b(n,ir,lr) ir is initial rate to multiple randam, lr is learning rate.
- b(n,ir,lr,dp) dr is dropout rate.
- activate function leru sigmoid tanh softmax
- f(r,c) filter matrix row-size is r col-size is c. input and output channel is 1, initial val random * 0.1, default learning late 0.1
- f(r,c,i) filter matrix. i input channel.
- f(r,c,i,o) filter matrix. o output channel
- f(r,c,i,o,{st_h,st_w}) filter matrix. st_h and st_w are stride size od hight and width.
- f(r,c,i,o,{st_h,st_w},pad) filter matrix. pad is padding size.
- f(r,c,i,o,{st_h,st_w},pad,{:xcavier,dim},lr) filter matrix. generate initial element by Xavier method. Dim is dimension of input, lr is learning rate.
- f(r,c,i,o,{st_h,st_w},pad,{:he,dim},lr) filter matrix. generate initial element by He method. Dim is dimension of input, lr is learning rate.
- f(r,c,i,o,{st_h,st_w},pad,ir,lr) filter matrix. ir is rate for initial val, lr is learning rate.
- f(r,c,i,o,{st_h,st_w},pad,ir,lr,dr) filter matrix. dr is dropout rate.
- pooling(st_h,st_w) st_h and st_w are pooling size.
- full convert from image of CNN to matrix for DNN.
for debug
- analizer(n) calculate max min average of data and display n max min average
- visualizer(n,c) display a data(n th, c channel) as graphics
data structure
```
network
[{:weight,w,ir,lr,dr,v},{:bias,b,ir,lr,dr,v},{:function,name},{:filter,w,{st_h,st_w},pad,ir,lr,dr,v} ...]
weight
{:weight,w,ir,lr,dp,v,mask} w is matrix, ir is rate for initial random number,
lr is learning rate, dp is dropout rate.
bias
{:bias,b,ir,lr,dp,v,mask} b is row vector
function
{:function,name} name is function name within sigmoid tanh relu softmax
filter
{:filter,w,{st_h,st_w},pad,ir,lr,dr,v,mask}
pooling
{:pooling,st_,st_w}
```
"""
defmacro defnetwork(name, do: body) do
{_, _, [{arg, _, _}]} = name
body1 = parse(body, arg)
quote do
def unquote(name) do
unquote(body1)
end
end
end
# weight
# cw mean constant weight for gradient check
def parse({:cw, _, [m]}, _) do
quote do
[{:weight, CM.new(unquote(m)), 0.1, 0.1, 0.0, CM.new(1, 1)}]
end
end
def parse({:w, _, [x, y]}, _) do
quote do
[
{:weight, CM.rand(unquote(x), unquote(y)) |> CM.mult(0.1), 0.1, 0.1, 0.0,
CM.new(unquote(x), unquote(y))}
]
end
end
def parse({:w, _, [x, y, ir]}, _) do
quote do
[
{:weight, CM.rand(unquote(x), unquote(y)) |> CM.mult(unquote(ir)), unquote(ir), 0.1, 0.0,
CM.new(unquote(x), unquote(y))}
]
end
end
def parse({:w, _, [x, y, {:xavier, dim}, lr]}, _) do
quote do
[
{:weight, CM.rand(unquote(x), unquote(y)) |> CM.mult(:math.sqrt(1 / unquote(dim))),
{:xavier, unquote(dim)}, unquote(lr), 0.0, CM.new(unquote(x), unquote(y))}
]
end
end
def parse({:w, _, [x, y, {:he, dim}, lr]}, _) do
quote do
[
{:weight, CM.rand(unquote(x), unquote(y)) |> CM.mult(:math.sqrt(2 / unquote(dim))),
{:he, unquote(dim)}, unquote(lr), 0.0, CM.new(unquote(x), unquote(y))}
]
end
end
def parse({:w, _, [x, y, ir, lr]}, _) do
quote do
[
{:weight, CM.rand(unquote(x), unquote(y)) |> CM.mult(unquote(ir)), unquote(ir),
unquote(lr), 0.0, CM.new(unquote(x), unquote(y))}
]
end
end
def parse({:w, _, [x, y, {:xavier, dim}, lr, dr]}, _) do
quote do
[
{:weight, CM.rand(unquote(x), unquote(y)) |> CM.mult(:math.sqrt(1 / unquote(dim))),
{:xavier, unquote(dim)}, unquote(lr), unquote(dr), CM.new(unquote(x), unquote(y))}
]
end
end
def parse({:w, _, [x, y, {:he, dim}, lr, dr]}, _) do
quote do
[
{:weight, CM.rand(unquote(x), unquote(y)) |> CM.mult(:math.sqrt(2 / unquote(dim))),
{:he, unquote(dim)}, unquote(lr), unquote(dr), CM.new(unquote(x), unquote(y))}
]
end
end
def parse({:w, _, [x, y, ir, lr, dr]}, _) do
quote do
[
{:weight, CM.rand(unquote(x), unquote(y)) |> CM.mult(unquote(ir)), unquote(ir),
unquote(lr), unquote(dr), CM.new(unquote(x), unquote(y))}
]
end
end
# bias
# cb means constant bias for gradient check
def parse({:cb, _, [m]}, _) do
quote do
[{:bias, CM.new(unquote(m)), 0.1, 0.1, 0.0, CM.new(1, 1)}]
end
end
def parse({:b, _, [x]}, _) do
quote do
[{:bias, CM.new(1, unquote(x)) |> CM.mult(0.1), 0.1, 0.1, 0.0, CM.new(1, unquote(x))}]
end
end
def parse({:b, _, [x, ir]}, _) do
quote do
[
{:bias, CM.rand(1, unquote(x)) |> CM.mult(unquote(ir)), unquote(ir), 0.1, 0.0,
CM.new(1, unquote(x))}
]
end
end
def parse({:b, _, [x, ir, lr]}, _) do
quote do
[
{:bias, CM.rand(1, unquote(x)) |> CM.mult(unquote(ir)), unquote(ir), unquote(lr), 0.0,
CM.new(1, unquote(x))}
]
end
end
def parse({:b, _, [x, ir, lr, dr]}, _) do
quote do
[
{:bias, CM.rand(1, unquote(x)) |> CM.mult(unquote(ir)), unquote(ir), unquote(lr),
unquote(dr), CM.new(1, unquote(x))}
]
end
end
# sigmoid
def parse({:sigmoid, _, nil}, _) do
quote do
[{:function, :sigmoid}]
end
end
# identity
def parse({:tanh, _, nil}, _) do
quote do
[{:function, :tanh}]
end
end
# relu
def parse({:relu, _, nil}, _) do
quote do
[{:function, :relu}]
end
end
# softmax
def parse({:softmax, _, nil}, _) do
quote do
[{:function, :softmax}]
end
end
# filter
# cf means constant filter for gradient check
def parse({:cf, _, [m]}, _) do
quote do
[{:filter, CM.new(unquote(m)), 1, 0, 0.1, 0.1, CM.new(1, 3, 3)}]
end
end
# {:filter,filter-matrix,stride,padding,init_rate,learning_rate,dropout_rate,v}
def parse({:f, _, [x, y]}, _) do
quote do
[
{:filter, CM.rand(1, 1, unquote(x), unquote(y)) |> CM.mult(0.1), 1, 0, 0.1, 0.1, 0.0,
CM.new(1, 1, unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c]}, _) do
quote do
[
{:filter, CM.rand(1, unquote(c), unquote(x), unquote(y)) |> CM.mult(0.1), 1, 0, 0.1, 0.1,
0.0, CM.new(1, unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n]}, _) do
quote do
[
{:filter, CM.rand(unquote(n), unquote(c), unquote(x), unquote(y)) |> CM.mult(0.1), 1, 0,
0.0, 0.1, 0.1, CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}]}, _) do
quote do
[
{:filter, CM.rand(unquote(n), unquote(c), unquote(x), unquote(y)) |> CM.mult(0.1),
{unquote(h), unquote(w)}, 0, 0.1, 0.1, 0.0,
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad]}, _) do
quote do
[
{:filter, CM.rand(unquote(n), unquote(c), unquote(x), unquote(y)) |> CM.mult(0.1),
{unquote(h), unquote(w)}, unquote(pad), 0.1, 0.1, 0.0,
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad, {:xavier, dim}]}, _) do
quote do
[
{:filter,
CM.rand(unquote(n), unquote(c), unquote(x), unquote(y))
|> CM.mult(:math.sqrt(1 / unquote(dim))), {unquote(h), unquote(w)}, unquote(pad),
{:xavier, unquote(dim)}, 0.1, 0.0,
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad, {:he, dim}]}, _) do
quote do
[
{:filter,
CM.rand(unquote(n), unquote(c), unquote(x), unquote(y))
|> CM.mult(:math.sqrt(2 / unquote(dim))), {unquote(h), unquote(w)}, unquote(pad),
{:he, unquote(dim)}, 0.1, 0.0, CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad, {:xavier, dim}, lr]}, _) do
quote do
[
{:filter,
CM.rand(unquote(n), unquote(c), unquote(x), unquote(y))
|> CM.mult(:math.sqrt(1 / unquote(dim))), {unquote(h), unquote(w)}, unquote(pad),
{:xavier, unquote(dim)}, unquote(lr), 0.0,
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad, {:he, dim}, lr]}, _) do
quote do
[
{:filter,
CM.rand(unquote(n), unquote(c), unquote(x), unquote(y))
|> CM.mult(:math.sqrt(2 / unquote(dim))), {unquote(h), unquote(w)}, unquote(pad),
{:he, unquote(dim)}, unquote(lr), 0.0,
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad, ir, lr]}, _) do
quote do
[
{:filter, CM.rand(unquote(n), unquote(c), unquote(x), unquote(y)) |> CM.mult(unquote(ir)),
{unquote(h), unquote(w)}, unquote(pad), unquote(ir), unquote(lr), 0.0,
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad, {:xavier, dim}, lr, dr]}, _) do
quote do
[
{:filter,
CM.rand(unquote(n), unquote(c), unquote(x), unquote(y))
|> CM.mult(:math.sqrt(1 / unquote(dim))), {unquote(h), unquote(w)}, unquote(pad),
{:xavier, unquote(dim)}, unquote(lr), unquote(dr),
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad, {:he, dim}, lr, dr]}, _) do
quote do
[
{:filter,
CM.rand(unquote(n), unquote(c), unquote(x), unquote(y))
|> CM.mult(:math.sqrt(2 / unquote(dim))), {unquote(h), unquote(w)}, unquote(pad),
{:he, unquote(dim)}, unquote(lr), unquote(dr),
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
def parse({:f, _, [x, y, c, n, {h, w}, pad, ir, lr, dr]}, _) do
quote do
[
{:filter, CM.rand(unquote(n), unquote(c), unquote(x), unquote(y)) |> CM.mult(unquote(ir)),
{unquote(h), unquote(w)}, unquote(pad), unquote(ir), unquote(lr), unquote(dr),
CM.new(unquote(n), unquote(c), unquote(x), unquote(y))}
]
end
end
# pooling
def parse({:pooling, _, [h, w]}, _) do
quote do
[{:pooling, unquote(h), unquote(w)}]
end
end
# flll connection
def parse({:full, _, nil}, _) do
quote do
[{:full}]
end
end
# analizer for debug
def parse({:analizer, _, [x]}, _) do
quote do
[{:analizer, unquote(x)}]
end
end
# visualizer for debug
def parse({:visualizer, _, [n, c]}, _) do
quote do
[{:visualizer, unquote(n), unquote(c)}]
end
end
# RNN
def parse({:rnn, _, [x, n]}, _) do
rnn(x, 1, n, 0.1, 0.1, 0.0)
end
def parse({:rnn, _, [x, n, ir]}, _) do
rnn(x, 1, n, ir, 0.1, 0.0)
end
def parse({:rnn, _, [x, n, ir, lr]}, _) do
rnn(x, 1, n, ir, lr, 0.0)
end
def parse({:rnn, _, [x, n, ir, lr, dr]}, _) do
rnn(x, 1, n, ir, lr, dr)
end
# LSTM
def parse({:lstm, _, [x, n]}, _) do
lstm(x, 1, n, 0.1, 0.1, 0.0)
end
def parse({:lstm, _, [x, n, ir]}, _) do
lstm(x, 1, n, ir, 0.1, 0.0)
end
def parse({:lstm, _, [x, n, ir, lr]}, _) do
lstm(x, 1, n, ir, lr, 0.0)
end
def parse({:lstm, _, [x, n, ir, lr, dr]}, _) do
lstm(x, 1, n, ir, lr, dr)
end
def parse({x, _, nil}, _) do
x
end
def parse({:|>, _, exp}, arg) do
parse(exp, arg)
end
def parse([{arg, _, nil}, exp], arg) do
parse(exp, arg)
end
def parse([exp1, exp2], arg) do
Enum.reverse(parse(exp2, arg) ++ Enum.reverse(parse(exp1, arg)))
end
def parse(x, _) do
IO.write("Syntax error in defnetwork ")
IO.inspect(x)
raise ""
end
def rnn(_, m, n, _, _, _) when m > n do
[]
end
# data structure RNN = {:rnn, nth, n, Wx, Wh, b, ir, lr, dr, v}
# nth means nth of recursive
# n means times of recursive
# Wx is weigth matrix for input x
# Wh is weight matrix for h (output t-1)
# b is bias
# ir is initial rate
# lr is learning rate
# dr is dropout rate
# v = matrix for learning momentum
def rnn(x, m, n, ir, lr, dr) do
[
quote do
{:rnn, unquote(m), unquote(n), CM.rand(unquote(x), unquote(x)),
CM.rand(unquote(x), unquote(x)), CM.rand(1, unquote(x)), unquote(ir), unquote(lr),
unquote(dr), CM.rand(unquote(x), unquote(x))}
end
| rnn(x, m + 1, n, ir, lr, dr)
]
end
def lstm(_, m, n, _, _, _) when m > n do
[]
end
# data structure RNN = {:lstm, nth, n, Wx, Wh, b, ir, lr, dr, v}
# nth means nth of recursive
# n means times of recursive
# Wx is weigth matrix for input x. Wx = Wxf+Wxg+Wxi+Wxo
# Wh is weight matrix for h. Wh = Whf+Whg+Whi+Who
# B is bias. B = Bf+Bg+Bi+Bo
# ir is initial rate
# lr is learning rate
# dr is dropout rate
# v = matrix for learning momentum
def lstm(x, m, n, ir, lr, dr) do
[
quote do
{:lstm, unquote(m), unquote(n), CM.rand(unquote(x), unquote(4 * x)),
CM.rand(unquote(x), unquote(4 * x)), CM.rand(1, unquote(4 * x)), unquote(ir),
unquote(lr), unquote(dr), CM.rand(unquote(x), unquote(4 * x))}
end
| lstm(x, m + 1, n, ir, lr, dr)
]
end
end
|
lib/macro.ex
| 0.746139 | 0.850655 |
macro.ex
|
starcoder
|
defmodule Plug.LoggerJSON do
@moduledoc """
A plug for logging basic request information in the format:
```json
{
"api_version": "N/A"
"client_ip": "192.168.3.11"
"client_version": "ios/1.6.7",
"date_time": "2016-05-31T18:00:13Z",
"duration": 4.670,
"handler": "fronts#index"
"log_type": "http",
"method": "POST",
"params": {
"user":"jkelly",
"password":"[<PASSWORD>]"
},
"path": "/",
"request_id": "d90jcl66vp09r8tke3utjsd1pjrg4ln8",
"status": "200"
}
```
To use it, just plug it into the desired module.
plug Plug.LoggerJSON, log: :debug
## Options
* `:log` - The log level at which this plug should log its request info.
Default is `:info`.
* `:extra_attributes_fn` - Function to call with `conn` to add additional
fields to the requests. Default is `nil`. Please see "Extra Fields" section
for more information.
## Extra Fields
Additional data can be logged alongside the request by specifying a function
to call which returns a map:
def extra_attributes(conn) do
map = %{
"user_id" => get_in(conn.assigns, [:user, :user_id]),
"other_id" => get_in(conn.private, [:private_resource, :id]),
"should_not_appear" => conn.private[:does_not_exist]
}
map
|> Enum.filter(&(&1 !== nil))
|> Enum.into(%{})
end
plug Plug.LoggerJSON, log: Logger.level,
extra_attributes_fn: &MyPlug.extra_attributes/1
In this example, the `:user_id` is retrieved from `conn.assigns.user.user_id`
and added to the log if it exists. In the example, any values that are `nil`
are filtered from the map. It is a requirement that the value is
serialiazable as JSON by the Poison library, otherwise an error will be raised
when attempting to encode the value.
"""
alias Plug.Conn
@behaviour Plug
require Logger
@typedoc """
Type for a plug option
"""
@type opts :: binary | tuple | atom | integer | float | [opts] | %{opts => opts}
@typedoc """
Type for time
"""
@type time :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}
@spec init(opts) :: opts
def init(opts), do: opts
@spec call(Plug.Conn.t(), opts) :: Plug.Conn.t()
def call(conn, level_or_opts) when is_atom(level_or_opts) do
call(conn, level: level_or_opts)
end
def call(conn, opts) do
level = Keyword.get(opts, :log, :info)
start = :os.timestamp()
Conn.register_before_send(conn, fn conn ->
:ok = log(conn, level, start, opts)
conn
end)
end
@spec log(Plug.Conn.t(), atom(), time(), opts) :: atom() | no_return()
def log(conn, level, start, opts \\ [])
def log(conn, :error, start, opts), do: log(conn, :info, start, opts)
def log(conn, :info, start, opts), do: log_message(conn, :info, start, opts)
def log(conn, :warning, start, opts), do: log(conn, :debug, start, opts)
@deprecated "use :warning instead"
def log(conn, :warn, start, opts), do: log(conn, :debug, start, opts)
def log(conn, :debug, start, opts) do
log_message(conn, :info, start, Keyword.put_new(opts, :include_debug_logging, true))
end
@spec log_error(atom(), map(), list()) :: atom()
def log_error(kind, reason, stacktrace) do
_ =
Logger.log(:error, fn ->
%{
"log_type" => "error",
"message" => Exception.format(kind, reason, stacktrace),
"request_id" => Logger.metadata()[:request_id]
}
|> Poison.encode!()
end)
end
@spec log_message(Plug.Conn.t(), atom(), time(), opts) :: atom()
defp log_message(conn, level, start, opts) do
Logger.log(level, fn ->
conn
|> basic_logging(start)
|> Map.merge(debug_logging(conn, opts))
|> Map.merge(phoenix_attributes(conn))
|> Map.merge(extra_attributes(conn, opts))
|> Poison.encode!()
end)
end
defp basic_logging(conn, start) do
stop = :os.timestamp()
duration = :timer.now_diff(stop, start)
req_id = Logger.metadata()[:request_id]
req_headers = format_map_list(conn.req_headers)
log_json = %{
"api_version" => Map.get(req_headers, "accept", "N/A"),
"date_time" => iso8601(:calendar.now_to_datetime(:os.timestamp())),
"duration" => Float.round(duration / 1000, 3),
"log_type" => "http",
"method" => conn.method,
"path" => conn.request_path,
"request_id" => req_id,
"status" => conn.status
}
Map.drop(log_json, Application.get_env(:plug_logger_json, :suppressed_keys, []))
end
defp extra_attributes(conn, opts) do
case Keyword.get(opts, :extra_attributes_fn) do
fun when is_function(fun) -> fun.(conn)
_ -> %{}
end
end
@spec client_version(%{String.t() => String.t()}) :: String.t()
defp client_version(headers) do
headers
|> Map.get("x-client-version", "N/A")
|> case do
"N/A" ->
Map.get(headers, "user-agent", "N/A")
accept_value ->
accept_value
end
end
@spec debug_logging(Plug.Conn.t(), opts) :: map()
defp debug_logging(conn, opts) do
case Keyword.get(opts, :include_debug_logging) do
true ->
req_headers = format_map_list(conn.req_headers)
%{
"client_ip" => format_ip(Map.get(req_headers, "x-forwarded-for", "N/A")),
"client_version" => client_version(req_headers),
"params" => format_map_list(conn.params)
}
_ ->
%{}
end
end
@spec filter_values(struct(), [binary()]) :: binary()
defp filter_values(%{__struct__: mod} = struct, filters) when is_atom(mod) do
struct
|> Map.from_struct()
|> filter_values(filters)
end
@spec filter_values(map(), [binary()]) :: [{binary(), any()}]
defp filter_values(%{} = map, filters) do
Enum.into(map, %{}, fn {k, v} ->
if is_binary(k) and k in filters do
{k, "[FILTERED]"}
else
{k, filter_values(v, filters)}
end
end)
end
@spec filter_values([{binary(), any()}], [binary()]) :: [{binary(), any()}]
defp filter_values(list, filters) when is_list(list) do
Enum.map(list, &filter_values(&1, filters))
end
defp filter_values(other, _filters), do: format_value(other)
@spec format_ip(String.t()) :: String.t()
defp format_ip("N/A") do
"N/A"
end
defp format_ip(x_forwarded_for) do
hd(String.split(x_forwarded_for, ", "))
end
@spec format_map_list(Enumerable.t()) :: map()
defp format_map_list(enumerable) do
enumerable
|> filter_values(Application.get_env(:plug_logger_json, :filtered_keys, []))
|> Enum.into(%{})
end
defp format_value(value) when is_binary(value) do
String.slice(value, 0..500)
end
defp format_value(value) do
value
end
defp iso8601({{year, month, day}, {hour, minute, second}}) do
zero_pad(year, 4) <>
"-" <>
zero_pad(month, 2) <>
"-" <>
zero_pad(day, 2) <> "T" <> zero_pad(hour, 2) <> ":" <> zero_pad(minute, 2) <> ":" <> zero_pad(second, 2) <> "Z"
end
@spec phoenix_attributes(map()) :: map()
defp phoenix_attributes(%{private: %{phoenix_controller: controller, phoenix_action: action}}) do
%{"handler" => "#{controller}##{action}"}
end
defp phoenix_attributes(_) do
%{"handler" => "N/A"}
end
@spec zero_pad(1..3_000, non_neg_integer()) :: String.t()
defp zero_pad(val, count) do
num = Integer.to_string(val)
:binary.copy("0", count - byte_size(num)) <> num
end
end
|
lib/plug/logger_json.ex
| 0.848549 | 0.674412 |
logger_json.ex
|
starcoder
|
defmodule Ecto.Model do
@moduledoc """
Provides convenience functions for defining and working
with models.
## Using
When used, `Ecto.Model` works as an "umbrella" module that adds
common functionality to your module:
* `use Ecto.Schema` - provides the API necessary to define schemas
* `import Ecto.Changeset` - functions for building and manipulating changesets
* `import Ecto.Model` - functions for working with models and their associations
* `import Ecto.Query` - functions for generating and manipulating queries
Plus all the modules existing in `Ecto.Model.*` are brought in
too:
* `use Ecto.Model.Callbacks` - provides lifecycle callbacks
* `use Ecto.Model.Timestamps` - automatically set `inserted_at` and
`updated_at` fields declared via `Ecto.Schema.timestamps/1`
* `use Ecto.Model.OptimisticLock` - makes the `optimistic_lock/1` macro
available
However, you can avoid using `Ecto.Model` altogether in favor
of cherry-picking any of the functionality above.
## Importing
You may want to import this module in contexts where you are
working with different models. For example, in a web application,
you may want to import this module into your plugs to provide
conveniences for building and accessing model information.
"""
@doc false
defmacro __using__(_opts) do
quote do
use Ecto.Schema
import Ecto.Changeset
import Ecto.Query
import Ecto.Model
use Ecto.Model.OptimisticLock
use Ecto.Model.Timestamps
use Ecto.Model.Callbacks
end
end
@type t :: %{__struct__: atom}
@doc """
Returns the model primary key value.
Raises `Ecto.NoPrimaryKeyError` if model has no primary key field.
"""
@spec primary_key(t) :: any
def primary_key(struct) do
Map.fetch!(struct, primary_key_field(struct))
end
defp primary_key_field(%{__struct__: model}) do
model.__schema__(:primary_key) || raise Ecto.NoPrimaryKeyError, model: model
end
@doc """
Builds a struct from the given `assoc` in `model`.
## Examples
If the relationship is a `has_one` or `has_many` and
the key is set in the given model, the key will automatically
be set in the built association:
iex> post = Repo.get(Post, 13)
%Post{id: 13}
iex> build(post, :comments)
%Comment{id: nil, post_id: 13}
Note though it doesn't happen with belongs to cases, as the
key is often the primary key and such is usually generated
dynamically:
iex> comment = Repo.get(Post, 13)
%Comment{id: 13, post_id: 25}
iex> build(comment, :post)
%Post{id: nil}
"""
def build(%{__struct__: model} = struct, assoc) do
assoc = Ecto.Association.association_from_model!(model, assoc)
assoc.__struct__.build(assoc, struct)
end
@doc """
Builds a query for the association in the given model or models.
## Examples
In the example below, we get all comments associated to the given
post:
post = Repo.get Post, 1
Repo.all assoc(post, :comments)
`assoc/2` can also receive a list of posts, as long as the posts are
not empty:
posts = Repo.all from p in Post, where: is_nil(p.published_at)
Repo.all assoc(posts, :comments)
"""
def assoc(model_or_models, assoc) do
structs = List.wrap(model_or_models)
if structs == [] do
raise ArgumentError, "cannot retrieve association #{inspect assoc} for empty list"
end
model = hd(structs).__struct__
assoc = %{owner_key: owner_key} =
Ecto.Association.association_from_model!(model, assoc)
values =
for struct <- structs,
assert_struct!(model, struct),
key = Map.fetch!(struct, owner_key),
do: key
assoc.__struct__.assoc_query(assoc, values)
end
defp assert_struct!(model, %{__struct__: struct}) do
if struct != model do
raise ArgumentError, "expected a homogeneous list containing the same struct, " <>
"got: #{inspect model} and #{inspect struct}"
else
true
end
end
end
|
lib/ecto/model.ex
| 0.870446 | 0.579252 |
model.ex
|
starcoder
|
defmodule Ockam.Hub.Service.Provider do
@moduledoc """
Behaviour module and entrypoint to start Ockam.Hub services
Provider behaviour implementations should provide a list of service names and be able to
start service workers given names and arguments
Provider can start all services configured in :ockam_hub => :services application environment
with :ockam_hub => :providers provider implementations
"""
require Logger
@type child_spec :: Supervisor.child_spec() | {module(), term()} | module()
@type service_config :: atom() | {atom(), list()}
## TODO: maybe we need more than just a name here?
@callback services() :: [atom()]
@callback child_spec(name :: atom(), args :: Keyword.t()) :: child_spec()
def child_spec(args) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, args}
}
end
def start_link(providers \\ nil, services \\ nil)
def start_link(providers, nil) do
services = get_configured_services()
start_link(providers, services)
end
def start_link(providers, services) do
{child_specs, errors} = get_services_child_specs(services, providers)
case errors do
[] ->
Supervisor.start_link(child_specs, name: __MODULE__, strategy: :one_for_one)
errors ->
{:error, errors}
end
end
@spec get_services_child_specs(Enum.t(), nil | list()) :: {[child_spec()], [{:error, any()}]}
def get_services_child_specs(services_config, providers \\ nil) do
service_providers_map = get_service_providers_map(providers)
spec_results =
Enum.map(services_config, fn service_config ->
get_service_child_spec(service_config, service_providers_map)
end)
{ok_results, errors} =
Enum.split_with(spec_results, fn
{:ok, _} -> true
{:error, _} -> false
end)
child_specs = Enum.map(ok_results, fn {:ok, spec} -> spec end)
{child_specs, errors}
end
@spec get_service_child_spec(service_config(), nil | list()) ::
{:ok, child_spec()} | {:error, any()}
def get_service_child_spec(service_config, providers \\ nil)
def get_service_child_spec(service_name, providers) when is_atom(service_name) do
get_service_child_spec({service_name, []}, providers)
end
def get_service_child_spec({service_name, service_args}, providers) do
service_providers_map = get_service_providers_map(providers)
case Map.get(service_providers_map, service_name) do
nil ->
{:error, {:unknown_service, service_name}}
provider_mod ->
child_spec =
Supervisor.child_spec(provider_mod.child_spec(service_name, service_args),
id: service_name
)
{:ok, child_spec}
end
end
@spec start_service(service_config(), atom(), nil | list()) ::
{:ok, pid()} | {:ok, pid(), any()} | {:error, any()}
def start_service(service_config, supervisor, providers \\ nil) do
case get_service_child_spec(service_config, providers) do
{:ok, child_spec} ->
Supervisor.start_child(supervisor, child_spec)
{:error, reason} ->
{:error, reason}
end
end
@spec start_service(atom(), atom(), list()) ::
{:ok, pid()} | {:ok, pid(), any()} | {:error, any()}
def start_configured_service(service_name, supervisor, extra_args \\ []) do
services = get_configured_services()
case Keyword.get(services, service_name) do
nil ->
{:error, :service_not_configured}
default_args ->
start_service({service_name, Keyword.merge(default_args, extra_args)}, nil, supervisor)
end
end
@spec get_service_providers_map(nil | list()) :: map()
def get_service_providers_map(providers) when is_list(providers) or providers == nil do
providers
|> get_providers()
|> Enum.flat_map(fn provider_mod ->
Enum.map(provider_mod.services(), fn service -> {service, provider_mod} end)
end)
|> Map.new()
end
def get_service_providers_map(providers_map) when is_map(providers_map) do
providers_map
end
@spec get_service_providers_map(nil | list()) :: list()
def get_providers(providers \\ nil)
def get_providers(nil), do: Application.get_env(:ockam_hub, :service_providers)
def get_providers(providers) when is_list(providers), do: providers
@spec get_configured_services() :: [service_config()]
def get_configured_services() do
case Application.get_env(:ockam_hub, :services_config_source) do
"json" ->
parse_services_json(Application.get_env(:ockam_hub, :services_json))
"file" ->
parse_services_file(Application.get_env(:ockam_hub, :services_file))
"list" ->
parse_services_list(Application.get_env(:ockam_hub, :services_list, []))
_other ->
parse_services_config(Application.get_env(:ockam_hub, :services, []))
end
end
@spec parse_services_config(Enum.t()) :: [service_config()]
def parse_services_config(services) do
Enum.map(
services,
fn
atom when is_atom(atom) -> {atom, []}
{atom, args_map} when is_map(args_map) -> {atom, Map.to_list(args_map)}
{_atom, _args} = config -> config
end
)
end
@doc false
def parse_services_list(nil) do
[]
end
def parse_services_list(services) do
services
|> String.split(",")
|> Enum.map(fn service_name -> service_name |> String.trim() |> String.to_atom() end)
|> parse_services_config()
end
@doc false
def parse_services_json(nil) do
[]
end
def parse_services_json("") do
[]
end
def parse_services_json(json) do
case Poison.decode(json, keys: :atoms) do
{:ok, services} ->
## TODO: validate services
services
|> Enum.map(fn {service, args} -> {service, Enum.to_list(args)} end)
|> Enum.to_list()
{:error, err} ->
raise("Unable to parse json services config: #{inspect(err)}")
end
end
@doc false
def parse_services_file(nil) do
raise("Services config file is not defined")
end
def parse_services_file(filename) do
with true <- File.exists?(filename),
{:ok, contents} <- File.read(filename),
data <- String.trim(contents) do
parse_services_json(data)
else
_other ->
raise("Services file is not found: #{inspect(filename)}")
end
end
end
|
implementations/elixir/ockam/ockam_hub/lib/hub/service/provider.ex
| 0.537284 | 0.428592 |
provider.ex
|
starcoder
|
defmodule Expdf do
alias Expdf.{
Document,
Parser,
Header,
Object,
ElementHexa,
Element,
}
@doc """
Parse given string data
## Parameters
- `data` - file content as binary string
## Example
iex> File.read!("./test/test_data/test.pdf") |> Expdf.parse
true
"""
def parse(data) do
with {:ok, parsed_data} <- Parser.parse(data),
:ok <- check_encrypt(parsed_data),
:ok <- check_objects(parsed_data),
{:ok, parsed_data} <- parse_objects(parsed_data) do
create_document(parsed_data)
else
{:error, reason} -> {:error, reason}
end
end
defp check_objects(%Parser{objects: objects}) do
if Enum.empty?(objects), do: {:error, "Object list not found."}, else: :ok
end
defp check_encrypt(%Parser{xref: xref}) do
if xref.trailer.encrypt, do: {:error, "Secured pdf file are currently not supported."}, else: :ok
end
def create_document(%Parser{elements: elements} = parser) do
dictionary = elements
|> Enum.map(fn {id, object} ->
{type, header, _} = object
case Header.get(parser, header, "Type") do
{:ok, header, nil} -> nil
{:ok, header, obj} -> {Element.content(obj), id, object}
end
end)
|> Enum.filter(fn val -> !is_nil(val) end)
|> Enum.group_by(fn {type, id, object} -> type end)
document = %Document{dictionary: dictionary, trailer: parse_trailer(parser.xref.trailer)}
Document.parse_details(parser, document)
end
defp parse_trailer(structure) when is_list(structure) do
trailer = structure
|> Enum.filter(&(!is_nil(&1)))
|> Enum.with_index
|> Enum.reduce(Keyword.new(), fn {values, i}, acc ->
name = i |> to_string |> String.to_atom
value = cond do
is_number(values) -> {:numeric, values}
is_list(values) -> {:array, parse_trailer(values)}
String.match?(values, ~r/_/) -> {:xref, values}
true -> parse_header_element("(", values)
end
Keyword.put(acc, name, value)
end)
%Header{elements: trailer}
end
defp parse_trailer(structure) when is_map(structure) do
trailer = structure
|> Enum.filter(fn val ->
case val do
{name, values} -> !is_nil(values)
_ -> !is_nil(val)
end
end)
|> Enum.reduce(Keyword.new(), fn {name, values}, acc ->
name = name |> to_string |> String.capitalize |> String.to_atom
value = cond do
is_number(values) -> {:numeric, values}
is_list(values) -> {:array, parse_trailer(values)}
String.match?(values, ~r/_/) -> {:xref, values}
true -> parse_header_element("(", values)
end
Keyword.put(acc, name, value)
end)
%Header{elements: trailer}
end
defp parse_objects(%Parser{objects: objects} = parser) do
elements = objects
|> Enum.reduce(%{}, fn {id, structure}, acc ->
{header, content, new_objects} = structure
|> Enum.with_index
|> Enum.reduce_while({%Header{}, "", []}, fn {part, i}, {header, content, new_objects} ->
{obj_type, obj_val, obj_offset, obj_content} = part
{new_header, new_content, new_objects, break} = case obj_type do
"[" ->
elements = Enum.reduce(obj_val, [], fn sub_element, elements ->
{sub_type, sub_val, sub_offset, sub_content} = sub_element
[parse_header_element(sub_type, sub_val) | elements]
end)
|> Enum.reverse()
{%Header{elements: elements}, content, [], false}
"<<" ->
{parse_header(obj_val), "", [], false}
"stream" ->
obj_content = Enum.at(obj_content, 0, obj_val)
case Header.get(parser, header, "Type") do
{:ok, header, nil} ->
{header, content, [], false}
{:ok, obj} ->
if obj.val == "ObjStm" do
matches = Regex.run(~r/^((\d+\s+\d+\s*)*)(.*)$/s, content)
new_content = matches |> Enum.at(3)
# Extract xrefs
table = Regex.split(~r/(\d+\s+\d+\s*)/s, Enum.at(matches, 1), [:trim, :include_captures])
|> Enum.into(%{}, fn xref ->
[id, position] = String.split(String.trim(xref), " ")
{position, id}
end)
positions = Map.keys(table) |> Enum.sort
new_objects = positions
|> Enum.with_index
|> Enum.map(fn {position, i} ->
id = "#{Map.get(table, position) |> to_string}_0"
next_position = Enum.at(positions, i + 1, byte_size(content))
sub_content = String.slice(content, position, next_position - position)
sub_header = Header.parse(sub_content, parser)
Object.new(parser, sub_header, "")
end)
{header, obj_content, new_objects, true}
else
{header, obj_content, [], false}
end
_ ->
{header, content, [], false}
end
_ ->
element = parse_header_element(obj_type, obj_val)
if element do
{%Header{elements: [element]}, content, [], false}
else
{header, content, [], false}
end
end
if break, do: {:halt, {new_header, new_content, new_objects}}, else: {:cont, {new_header, new_content, new_objects}}
end)
if Enum.empty?(new_objects) do
case Map.has_key?(acc, id) do
true -> acc
false ->
obj = Object.new(parser, header, content)
Map.put(acc, id, obj)
end
else
new_objects
|> Enum.map(fn {id, obj} ->
Map.put(acc, id, obj)
end)
end
end)
elements = elements
|> Map.keys
|> Enum.sort(fn id1, id2 ->
[i1, _] = String.split(to_string(id1), "_")
[i2, _] = String.split(to_string(id2), "_")
String.to_integer(i1) < String.to_integer(i2)
end)
|> Enum.map(fn id ->
{id, Map.get(elements, id)}
end)
{:ok, %{parser | elements: elements}}
end
defp parse_header(structure) do
count = Enum.count(structure)
acc = -1..count - 1
|> Enum.drop_every(2)
|> Enum.reduce(Keyword.new(), fn i, acc ->
{_, name, _, _} = Enum.at(structure, i)
{type, val, _, _} = Enum.at(structure, i + 1)
Keyword.put(acc, String.to_atom(name), parse_header_element(type, val))
end)
%Header{elements: acc}
end
defp parse_header_element(type, val) do
case type do
"<<" -> parse_header(val)
"numeric" -> {:numeric, float_val(val)}
"boolean" -> {:boolean, String.downcase(val) == "true"}
"null" -> nil
"(" ->
val = "(#{val})"
case Element.parse(:date, val) do
false -> Element.parse(:string, val)
date -> date
end
"<" ->
parse_header_element("(", ElementHexa.decode(val))
"/" ->
Element.parse(:name, "/#{val}")
"[" ->
values = Enum.reduce(val, [], fn {sub_type, sub_val, _, _}, acc ->
[parse_header_element(sub_type, sub_val) | acc]
end)
{:array, Enum.reverse(values)}
"objref" -> {:xref, val}
"endstream" -> nil
"obj" -> nil
"" -> nil
end
end
defp float_val(value) do
case Regex.run(~r/^[0-9.]+/, value) do
[float] ->
{float, _} = Float.parse(float)
float
_ -> 0.0
end
end
end
|
lib/expdf.ex
| 0.715821 | 0.416915 |
expdf.ex
|
starcoder
|
defmodule Ecto.Adapter do
@moduledoc """
This module specifies the adapter API that an adapter is required to
implement.
"""
@type t :: module
@typedoc "Ecto.Query metadata fields (stored in cache)"
@type query_meta :: %{prefix: binary | nil, sources: tuple, assocs: term,
preloads: term, select: term, fields: [term]}
@typedoc "Ecto.Schema metadata fields"
@type schema_meta :: %{source: source, schema: atom, context: term, autogenerate_id: {atom, :id | :binary_id}}
@type source :: {prefix :: binary | nil, table :: binary}
@type fields :: Keyword.t
@type filters :: Keyword.t
@type constraints :: Keyword.t
@type returning :: [atom]
@type prepared :: term
@type cached :: term
@type process :: (field :: Macro.t, value :: term, context :: term -> term)
@type autogenerate_id :: {field :: atom, type :: :id | :binary_id, value :: term} | nil
@typep repo :: Ecto.Repo.t
@typep options :: Keyword.t
@doc """
The callback invoked in case the adapter needs to inject code.
"""
@macrocallback __before_compile__(env :: Macro.Env.t) :: Macro.t
@doc """
Ensure all applications necessary to run the adapter are started.
"""
@callback ensure_all_started(repo, type :: :application.restart_type) ::
{:ok, [atom]} | {:error, atom}
@doc """
Returns the childspec that starts the adapter process.
"""
@callback child_spec(repo, options) :: Supervisor.Spec.spec
## Types
@doc """
Returns the loaders for a given type.
It receives the primitive type and the Ecto type (which may be
primitive as well). It returns a list of loaders with the given
type usually at the end.
This allows developers to properly translate values coming from
the adapters into Ecto ones. For example, if the database does not
support booleans but instead returns 0 and 1 for them, you could
add:
def loaders(:boolean, type), do: [&bool_decode/1, type]
def loaders(_primitive, type), do: [type]
defp bool_decode(0), do: {:ok, false}
defp bool_decode(1), do: {:ok, true}
All adapters are required to implement a clause for `:binary_id` types,
since they are adapter specific. If your adapter does not provide binary
ids, you may simply use Ecto.UUID:
def loaders(:binary_id, type), do: [Ecto.UUID, type]
def loaders(_primitive, type), do: [type]
"""
@callback loaders(primitive_type :: Ecto.Type.primitive, ecto_type :: Ecto.Type.t) ::
[(term -> {:ok, term} | :error) | Ecto.Type.t]
@doc """
Returns the dumpers for a given type.
It receives the primitive type and the Ecto type (which may be
primitive as well). It returns a list of dumpers with the given
type usually at the beginning.
This allows developers to properly translate values coming from
the Ecto into adapter ones. For example, if the database does not
support booleans but instead returns 0 and 1 for them, you could
add:
def dumpers(:boolean, type), do: [type, &bool_encode/1]
def dumpers(_primitive, type), do: [type]
defp bool_encode(false), do: {:ok, 0}
defp bool_encode(true), do: {:ok, 1}
All adapters are required to implement a clause or :binary_id types,
since they are adapter specific. If your adapter does not provide
binary ids, you may simply use Ecto.UUID:
def dumpers(:binary_id, type), do: [type, Ecto.UUID]
def dumpers(_primitive, type), do: [type]
"""
@callback dumpers(primitive_type :: Ecto.Type.primitive, ecto_type :: Ecto.Type.t) ::
[(term -> {:ok, term} | :error) | Ecto.Type.t]
@doc """
Called to autogenerate a value for id/embed_id/binary_id.
Returns the autogenerated value, or nil if it must be
autogenerated inside the storage or raise if not supported.
"""
@callback autogenerate(field_type :: :id | :binary_id | :embed_id) :: term | nil | no_return
@doc """
Commands invoked to prepare a query for `all`, `update_all` and `delete_all`.
The returned result is given to `execute/6`.
"""
@callback prepare(atom :: :all | :update_all | :delete_all, query :: Ecto.Query.t) ::
{:cache, prepared} | {:nocache, prepared}
@doc """
Executes a previously prepared query.
It must return a tuple containing the number of entries and
the result set as a list of lists. The result set may also be
`nil` if a particular operation does not support them.
The `meta` field is a map containing some of the fields found
in the `Ecto.Query` struct.
It receives a process function that should be invoked for each
selected field in the query result in order to convert them to the
expected Ecto type. The `process` function will be nil if no
result set is expected from the query.
"""
@callback execute(repo, query_meta, query, params :: list(), process | nil, options) :: result when
result: {integer, [[term]] | nil} | no_return,
query: {:nocache, prepared} |
{:cached, cached} |
{:cache, (cached -> :ok), prepared}
@doc """
Inserts multiple entries into the data store.
"""
@callback insert_all(repo, schema_meta, header :: [atom], [fields], returning, options) ::
{integer, [[term]] | nil} | no_return
@doc """
Inserts a single new struct in the data store.
## Autogenerate
The primary key will be automatically included in `returning` if the
field has type `:id` or `:binary_id` and no value was set by the
developer or none was autogenerated by the adapter.
"""
@callback insert(repo, schema_meta, fields, returning, options) ::
{:ok, fields} | {:invalid, constraints} | no_return
@doc """
Updates a single struct with the given filters.
While `filters` can be any record column, it is expected that
at least the primary key (or any other key that uniquely
identifies an existing record) be given as a filter. Therefore,
in case there is no record matching the given filters,
`{:error, :stale}` is returned.
"""
@callback update(repo, schema_meta, fields, filters, returning, options) ::
{:ok, fields} | {:invalid, constraints} |
{:error, :stale} | no_return
@doc """
Deletes a single struct with the given filters.
While `filters` can be any record column, it is expected that
at least the primary key (or any other key that uniquely
identifies an existing record) be given as a filter. Therefore,
in case there is no record matching the given filters,
`{:error, :stale}` is returned.
"""
@callback delete(repo, schema_meta, filters, options) ::
{:ok, fields} | {:invalid, constraints} |
{:error, :stale} | no_return
end
|
lib/ecto/adapter.ex
| 0.905003 | 0.436892 |
adapter.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.