code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Pummpcomm.Monitor.BloodGlucoseMonitor do
@moduledoc """
This module provides high-level continuous glucose monitor functions, such as the ability to retrieve a specific
number of historical minutes of cgm data. It manages the process of pulling the right number of cgm pages from the
insulin pump, along with handling Medtronic cgm-specific details like writing reference timestamps for accurate cgm
decoding.
"""
require Logger
alias Pummpcomm.Cgm
alias Pummpcomm.Cgm.Timestamper
alias Pummpcomm.Monitor.BloodGlucoseMonitor
@enforce_keys [:cgm]
defstruct [:cgm]
def get_sensor_values(%BloodGlucoseMonitor{cgm: cgm}, minutes_back, timezone) do
oldest_allowed = oldest_entry_allowed(minutes_back, timezone)
Logger.info(fn ->
"Searching until we find a cgm entry older than #{inspect(oldest_allowed)}"
end)
{:ok, %{page_number: page_number}} = cgm.get_current_cgm_page()
{:ok, fetch_and_filter_page(cgm, page_number, [], oldest_allowed, page_number - 5)}
end
defp fetch_and_filter_page(_, -1, sensor_values, _, _), do: Enum.reverse(sensor_values)
defp fetch_and_filter_page(_, page_number, sensor_values, oldest_allowed, lowest_page_allowed)
when page_number < lowest_page_allowed do
Logger.warn(fn ->
"Reached max page fetches before finding an entry older than #{inspect(oldest_allowed)}"
end)
Enum.reverse(sensor_values)
end
defp fetch_and_filter_page(cgm, page_number, sensor_values, oldest_allowed, lowest_page_allowed) do
{:ok, values} = cgm.read_cgm_page(page_number)
case Cgm.needs_timestamp?(values) do
true ->
Logger.info(fn -> "Writing cgm timestamp on page #{page_number}" end)
:ok = cgm.write_cgm_timestamp()
fetch_and_filter_page(
cgm,
page_number,
sensor_values,
oldest_allowed,
lowest_page_allowed
)
false ->
newest_first_values = Enum.reverse(values)
{oldest_reached, sensor_values} =
newest_first_values
|> Enum.filter(&filter_glucose_value/1)
|> filter_by_date(sensor_values, oldest_allowed)
case oldest_reached do
true ->
Enum.reverse(sensor_values)
false ->
fetch_and_filter_page(
cgm,
page_number - 1,
sensor_values,
oldest_allowed,
lowest_page_allowed
)
end
end
end
defp filter_by_date([], allowed_entries, _), do: {false, allowed_entries}
defp filter_by_date([head | tail], allowed_entries, oldest_allowed) do
{_, event_data} = head
case Timex.before?(event_data.timestamp, oldest_allowed) do
true -> {true, allowed_entries}
false -> filter_by_date(tail, [head | allowed_entries], oldest_allowed)
end
end
defp filter_glucose_value({event_key, _}) do
event_key in Timestamper.relative_events()
end
defp oldest_entry_allowed(minutes_back, timezone) do
timezone |> Timex.now() |> Timex.shift(minutes: -minutes_back) |> DateTime.to_naive()
end
end
|
lib/pummpcomm/monitor/blood_glucose_monitor.ex
| 0.768473 | 0.622273 |
blood_glucose_monitor.ex
|
starcoder
|
defmodule Gettext.Interpolation do
@moduledoc false
@interpolation_regex ~r/
(?<left>) # Start, available through :left
%{ # Literal '%{'
[^}]+ # One or more non-} characters
} # Literal '}'
(?<right>) # End, available through :right
/x
@doc """
Extracts interpolations from a given string.
This function extracts all interpolations in the form `%{interpolation}`
contained inside `str`, converts them to atoms and then returns a list of
string and interpolation keys.
## Examples
iex> Gettext.Interpolation.to_interpolatable("Hello %{name}, you have %{count} unread messages")
["Hello ", :name, ", you have ", :count, " unread messages"]
"""
@spec to_interpolatable(binary) :: [binary | atom]
def to_interpolatable(str) do
split = Regex.split(@interpolation_regex, str, on: [:left, :right], trim: true)
Enum.map split, fn
"%{" <> rest -> rest |> String.rstrip(?}) |> String.to_atom
segment -> segment
end
end
@doc """
Interpolate an interpolatable with the given bindings.
This function takes an interpolatable list returned from `to_interpolatable/1` and bindings
and returns the interpolated string. If it encounters an atom that should be interpolated
but is missing from the bindings, it will call the provided `handle_missing_binding` function.
The callback will be called with the missing binding, the original string and the locale.
See also the default implementation in `Gettext`.
## Examples
iex> msgid = "Hello %{name}, you have %{count} unread messages"
iex> interpolatable = Gettext.Interpolation.to_interpolatable(msgid)
iex> good_bindings = %{name: "José", count: 3}
iex> Gettext.Interpolation.interpolate(interpolatable, :ok, good_bindings)
{:ok, "Hello José, you have 3 unread messages"}
iex> bad_bindings = %{name: "José"}
iex> Gettext.Interpolation.interpolate(interpolatable, :ok, bad_bindings)
{:missing_bindings, "Hello José, you have %{count} unread messages", [:count]}
"""
def interpolate(interpolatable, key, bindings) do
interpolate(interpolatable, key, bindings, [], [])
end
defp interpolate([string | segments], key, bindings, strings, missing) when is_binary(string) do
interpolate(segments, key, bindings, [string | strings], missing)
end
defp interpolate([atom | segments], key, bindings, strings, missing) when is_atom(atom) do
case bindings do
%{^atom => value} ->
interpolate(segments, key, bindings, [to_string(value) | strings], missing)
%{} ->
interpolate(segments, key, bindings, ["%{" <> Atom.to_string(atom) <> "}" | strings], [atom | missing])
end
end
defp interpolate([], key, _bindings, strings, []) do
{key, IO.iodata_to_binary(Enum.reverse(strings))}
end
defp interpolate([], _key, _bindings, strings, missing) do
{:missing_bindings, IO.iodata_to_binary(Enum.reverse(strings)), missing}
end
@doc """
Returns all the interpolation keys contained in the given string or list of
segments.
This function returns a list of all the interpolation keys (patterns in the
form `%{interpolation}`) contained in its argument.
If the argument is a segment list, i.e., a list of strings and atoms where
atoms represent interpolation keys, then only the atoms in the list are
returned.
## Examples
iex> Gettext.Interpolation.keys("Hey %{name}, I'm %{other_name}")
[:name, :other_name]
iex> Gettext.Interpolation.keys(["Hello ", :name, "!"])
[:name]
iex> Gettext.Interpolation.keys(["Hello ", :name, "! Goodbye", :name])
[:name]
"""
@spec keys(binary | [atom]) :: [atom]
def keys(str) when is_binary(str),
do: str |> to_interpolatable |> keys
def keys(segments) when is_list(segments),
do: Enum.filter(segments, &is_atom/1) |> Enum.uniq
end
|
data/web/deps/gettext/lib/gettext/interpolation.ex
| 0.881168 | 0.527986 |
interpolation.ex
|
starcoder
|
defmodule Tesla.Middleware.Cacher do
@behaviour Tesla.Middleware
@moduledoc """
Cache the result in redis.
### Example
```elixir
defmodule MyClient do
use Tesla
plug Tesla.Middleware.Cacher,
redix: :redix,
expiry: :timer.seconds(2),
timeout: :timer.seconds(5),
prefix: :tesla_cacher
end
```
"""
require Logger
@redix_timeout 5000
@compression_level 6
@impl true
def call(env, next, opts) do
opts = [
redix: Keyword.fetch!(opts, :redix),
expiry: Keyword.get(opts, :expiry, :infinity),
timeout: Keyword.get(opts, :timeout, @redix_timeout),
prefix: Keyword.get(opts, :prefix, :tesla_cacher)
]
env
|> lookup(opts)
|> run(next)
|> insert(opts)
end
def lookup(%Tesla.Env{method: :get} = env, opts) do
key = make_key(env, opts)
{redix_lookup(key, opts), env}
end
def lookup(env, _), do: {:miss, env}
def run({{:hit, env}, _}, _next) when not is_nil(env) do
{:hit, env}
end
def run({_, env}, next) do
Tesla.run(env, next)
|> handle_run()
end
def insert({:miss, %Tesla.Env{method: :get, status: status} = env}, opts) when status == 200 do
key = make_key(env, opts)
value = env |> :erlang.term_to_binary([compressed: @compression_level])
status = redix_insert(key, value, opts)
{status, env}
end
def insert({_, %Tesla.Env{} = env}, _opts), do: {:ok, env}
def insert(result, _opts), do: result
# private
defguardp is_conn(value) when is_atom(value) or is_pid(value)
defp make_key(%Tesla.Env{url: url, query: query},
redix: _,
expiry: _,
timeout: _,
prefix: prefix
) do
fqurl = Tesla.build_url(url, query)
"#{prefix}|#{fqurl}"
end
defp redix_lookup(key, redix: conn, expiry: _, timeout: timeout, prefix: _)
when is_conn(conn) do
Redix.command(conn, ["GET", key], timeout: timeout)
|> handle_redix_lookup()
end
defp handle_redix_lookup({_, nil}) do
{:miss, nil}
end
defp handle_redix_lookup({:ok, result}) do
{:hit, :erlang.binary_to_term(result, [:safe])}
end
defp handle_redix_lookup({_, msg}) do
Logger.warn("TeslaCacher: unexpected cache miss: #{inspect msg}")
{:miss, msg}
end
defp handle_run({:error, :timeout} = result), do: result
defp handle_run({:ok, env}), do: {:miss, env}
defp redix_insert(key, value, redix: conn, expiry: ttl, timeout: timeout, prefix: _)
when is_conn(conn) and is_integer(ttl) do
Redix.command(conn, ["SET", key, value, "PX", ttl], timeout: timeout)
|> handle_redix_insert()
end
defp redix_insert(key, value, redix: conn, expiry: _, timeout: timeout, prefix: _)
when is_conn(conn) do
Redix.command(conn, ["SET", key, value], timeout: timeout)
|> handle_redix_insert()
end
defp handle_redix_insert({:ok, _}), do: :ok
defp handle_redix_insert(result) do
Logger.warn("TeslaCacher: unable to insert, got: #{inspect result}")
:ok
end
end
|
lib/tesla_cacher.ex
| 0.851506 | 0.703957 |
tesla_cacher.ex
|
starcoder
|
defmodule Feedistiller.Feeder do
@moduledoc """
Elixir encapsulation over the `feeder` module, allowing full usage
of the stream API. This is a very straightforward encapsulation with
direct mapping to `feeder` functions and minimal sugar over `feeder`
records to map them to structs.
"""
alias Feedistiller.Feeder
@type ws :: String.t | nil
@type wd :: DateTime.t | nil
@type wl :: integer | nil
defmodule Feed do
@moduledoc "Mapping to `feeder` `feed` record"
defstruct author: nil, id: nil, image: nil, language: nil, link: nil,
subtitle: nil, summary: nil, title: nil, updated: nil
@type t :: %__MODULE__{author: Feeder.ws, id: Feeder.ws, image: Feeder.ws,
language: Feeder.ws, link: Feeder.ws, subtitle: Feeder.ws,
summary: Feeder.ws, title: Feeder.ws, updated: Feeder.wd}
end
defmodule Entry do
@moduledoc "Mapping to `feeder` `entry` record"
defstruct author: nil, duration: nil, enclosure: nil, id: nil, image: nil,
link: nil, subtitle: nil, summary: nil, title: nil, updated: nil
@type t :: %__MODULE__{author: Feeder.ws, duration: Feeder.wl, enclosure: nil | Feeder.Enclosure.t,
id: Feeder.ws, image: Feeder.ws, link: Feeder.ws,
subtitle: Feeder.ws, summary: Feeder.ws, title: Feeder.ws,
updated: Feeder.wd}
end
defmodule Enclosure do
@moduledoc "Mapping to `feeder` `enclosure` record"
defstruct url: nil, size: nil, type: nil
@type t :: %__MODULE__{url: Feeder.ws, size: Feeder.wl, type: Feeder.ws}
end
defmodule Channel do
@moduledoc """
Holds feed + entries informations. This is used as a default container
to return feed information when `Feeder.parse/1` is used.
"""
defstruct feed: %Feeder.Feed{}, entries: []
@type t :: %__MODULE__{feed: Feeder.Feed.t, entries: list(Feeder.Entry.t)}
end
@doc """
Parse a file. Mapping to `:feeder.file/2` with default options.
See `xml_sax_parser` documentation for full result type (in case of error, an
incomplete `Channel` is returned as the last item of the error tuple).
"""
@spec file(String.t) :: {:ok, Channel.t, String.t } | {term, term, term, term, Channel.t}
def file(filename) do
file(filename, default_opts())
end
@doc """
Parse a file. Mapping to `:feeder.file/2`.
See `xml_sax_parser` documentation for full result type (in case of error, an
incomplete accumulator is returned as the last item of the error tuple).
"""
@spec file(String.t, list) :: {:ok, term, String.t } | {term, term, term, term, term}
def file(filename, opts) do
:feeder.file(filename, transform_opts(opts))
end
@doc """
Parse some data.
If the input parameter is a string, it will map to `:feeder.stream/2` with default options.
If it's a prop list, it will map to `:feeder.stream/2` after calling your continuation function
once to bootstrap the data (curiously `xml_sax_parser` does not do that automatically).
See `xml_sax_parser` documentation for full result type (in case of error, an
incomplete `Channel` is returned as the last item of the error tuple).
"""
@spec stream(String.t | Keyword.t ) :: {:ok, Channel.t, String.t } | {term, term, term, term, Channel.t}
def stream(opts = [_|_]) do
{data, state} = opts[:continuation_fun].(opts[:continuation_state])
stream(data, Keyword.put(opts, :continuation_state, state))
end
def stream(<<data :: binary>>) do
stream(data, default_opts())
end
@doc """
Parse a file. Mapping to `:feeder.stream/2`.
See `xml_sax_parser` documentation for full result type (in case of error, an
incomplete accumulator is returned as the last item of the error tuple).
"""
@spec stream(String.t, Keyword.t) :: {:ok, term, String.t } | {term, term, term, term, term}
def stream(data, opts) do
:feeder.stream(data, transform_opts(opts))
end
defp transform_opts(opts) do
if opts[:event_fun] do
Keyword.put(opts, :event_fun, fn e, acc -> opts[:event_fun].(event(e), acc) end)
else
transform_opts(Keyword.merge(default_opts(), opts))
end
end
defp default_opts do
[
event_state: %Channel{},
event_fun: &efun/2
]
end
defp efun(:endFeed, channel), do: %{channel | entries: :lists.reverse(channel.entries)}
defp efun(f = %Feed{}, channel), do: %{channel | feed: f}
defp efun(e = %Entry{}, channel), do: %{channel | entries: [e | channel.entries]}
defp is_valid_utf8?(<<_ :: utf8, rest :: binary>>), do: is_valid_utf8?(rest)
defp is_valid_utf8?(<<>>), do: :true
defp is_valid_utf8?(<<_ :: binary>>), do: :false
defp to_utf8(<<s :: binary>>) do
if is_valid_utf8?(s) do
s
else
# Try latin1 and if invalid, truncate
case :unicode.characters_to_binary(s, :latin1) do
<<s :: binary>> -> s
{_, <<s :: binary>>, _} -> s <> "-TRUNCATED"
end
end
end
defp ws(:undefined), do: nil
defp ws(any), do: to_utf8(any)
defp wd(:undefined), do: nil
defp wd(any) do
case Timex.parse(any, "{RFC1123}") do
{:error, _} -> nil
{:ok, date} -> date
end
end
defp wl(:undefined), do: nil
defp wl(any) do
try do
String.to_integer(any)
rescue
_ -> nil
end
end
defp event(:endFeed), do: :endFeed
defp event({:feed, {:feed, author, id, image, language, link, subtitle, summary, title, updated, _url}}) do
%Feeder.Feed{author: ws(author), id: ws(id), image: ws(image), language: ws(language),
link: ws(link), subtitle: ws(subtitle), summary: ws(summary),
title: ws(title), updated: wd(updated)}
end
defp event({:entry, {:entry, author, _category, duration, encl, id, image, link, subtitle, summary, title, updated}}) do
%Feeder.Entry{author: ws(author), duration: ws(duration), enclosure: enclosure(encl), id: ws(id),
image: ws(image), link: ws(link), subtitle: ws(subtitle), summary: ws(summary),
title: ws(title), updated: wd(updated)}
end
defp enclosure(:undefined), do: nil
defp enclosure({:enclosure, url, size, type}) do
%Feeder.Enclosure{url: ws(url), size: wl(size), type: ws(type)}
end
end
|
lib/feedistiller/feeder.ex
| 0.834811 | 0.499817 |
feeder.ex
|
starcoder
|
defmodule Scribe.Formatter.Index do
@moduledoc false
defstruct row: 0, row_max: 0, col: 0, col_max: 0
end
defmodule Scribe.Formatter.Line do
@moduledoc false
defstruct data: [], widths: [], style: nil, opts: [], index: nil
alias Scribe.Formatter.{Index, Line}
def format(%Line{index: %Index{row: 0}} = line) do
top(line) <> data(line) <> bottom(line)
end
def format(%Line{} = line) do
data(line) <> bottom(line)
end
def data(%Line{} = line) do
%Line{
data: row,
widths: widths,
style: style,
opts: opts,
index: index
} = line
border = style.border_at(index.row, 0, index.row_max, index.col_max)
left_edge = border.left_edge
line =
Enum.reduce(0..(index.col_max - 1), "", fn x, acc ->
b = style.border_at(index.row, x, index.row_max, index.col_max)
width = Enum.at(widths, x)
value = Enum.at(row, x)
cell_value =
case opts[:colorize] do
false -> value |> cell(width)
_ -> value |> cell(width) |> colorize(style.color(value))
end
acc <> cell_value <> b.right_edge
end)
left_edge <> line <> "\n"
end
def cell(x, width) do
len = min(String.length(" #{inspect(x)} "), width)
padding = String.duplicate(" ", width - len)
truncate(" #{inspect(x)}#{padding}", width - 2) <> " "
end
def cell_value(x, padding, max_width) when padding >= 0 do
truncate(" #{inspect(x)}#{String.duplicate(" ", padding)} ", max_width)
end
defp truncate(elem, width) do
String.slice(elem, 0..width)
end
def colorize(string, color) do
"#{color}#{string}#{IO.ANSI.reset()}"
end
def top(%Line{widths: widths, style: style, index: index, opts: opts}) do
border = style.border_at(index.row, 0, index.row_max, index.col_max)
top_left = border.top_left_corner
line =
Enum.reduce(0..(index.col_max - 1), "", fn x, acc ->
b = style.border_at(index.row, x, index.row_max, index.col_max)
width = Enum.at(widths, x)
acc <> String.duplicate(b.top_edge, width) <> b.top_right_corner
end)
color_prefix =
if Keyword.get(opts, :colorize, true) do
style.default_color()
else
""
end
color_prefix <> top_left <> add_newline(line)
end
def bottom(%Line{widths: widths, style: style, index: index}) do
border = style.border_at(index.row, 0, index.row_max, index.col_max)
bottom_left = border.bottom_left_corner
line =
Enum.reduce(0..(index.col_max - 1), "", fn x, acc ->
b = style.border_at(index.row, x, index.row_max, index.col_max)
width = Enum.at(widths, x)
acc <> String.duplicate(b.bottom_edge, width) <> b.bottom_right_corner
end)
bottom_left <> add_newline(line)
end
def add_newline(""), do: ""
def add_newline(line), do: line <> "\n"
end
|
lib/formatter.ex
| 0.691289 | 0.590336 |
formatter.ex
|
starcoder
|
defmodule Quarto do
@external_resource "./README.md"
@moduledoc """
#{File.read!(@external_resource) |> String.split("---", parts: 2) |> List.last()}
"""
defmacro __using__(opts) do
quote do
@defaults unquote(opts)
def paginate(queryable, opts \\ [], repo_opts \\ []) do
opts = Keyword.merge(@defaults, opts)
Quarto.paginate(queryable, opts, __MODULE__, repo_opts)
end
end
end
@spec paginate(Ecto.Query.t(), nil | maybe_improper_list, Ecto.Repo.t(), Keyword.t()) ::
Quarto.Page.t()
@doc """
Paginate/4
Fetches all the results matching the query within the cursors.
Options
* `:after` - Fetch the records after this cursor.
* `:before` - Fetch the records before this cursor.
* `:coalesce` - Function that receives the field
* `:cursor` - Module to use for encoding/decoding the cursor
* `:cursor_fields` - Module to use for building the cursor from a record
* `:include_total_count` - Set this to true to return the total number of records matching the query. Note that this number will be capped by :total_count_limit. Defaults to false.
* `:total_count_primary_key_field` - Running count queries on specified column of the table
* `:limit` - Limits the number of records returned per page. Note that this number will be capped by :maximum_limit. Defaults to `50`.
* `:maximum_limit` - Sets a maximum cap for :limit. This option can be useful when :limit is set dynamically (e.g from a URL param set by a user) but you still want to enfore a maximum. Defaults to 500.
* `:total_count_limit` - Running count queries on tables with a large number of records is expensive so it is capped by default. Can be set to `:infinity` in order to count all the records. Defaults to 10,000.
Repo options
This will be passed directly to Ecto.Repo.all/2, as such any option supported by this function can be used here.
"""
def paginate(queryable, opts, repo, repo_opts \\ []) do
config = Quarto.Config.new([queryable: queryable] ++ opts)
sorted_entries = entries(queryable, config, repo, repo_opts)
paginated_entries = paginate_entries(sorted_entries, config)
{total_count, total_count_cap_exceeded} =
Quarto.Ecto.QueryTotal.total_count(queryable, config, repo, repo_opts)
%Quarto.Page{
entries: paginated_entries,
metadata: %Quarto.Page.Metadata{
after: after_cursor(paginated_entries, sorted_entries, config),
before: before_cursor(paginated_entries, sorted_entries, config),
limit: config.limit,
total_count: total_count,
total_count_cap_exceeded: total_count_cap_exceeded
}
}
end
@doc """
Build the cursor for a given entry
It's a helper function for taking the (nested) fields used for ordering and constructing the
list that can be passed to the cursor encoder.
In addition to this it's also possible to pass in the queryable used to generate the original query and cursors
and derive the (nested) fields from that.
Building the opaque cursor from the list `cursor_for_entry/3` generates can be done by e.g. the `Quarto.Cursor.Base64` module
or any other module that implements the `Quarto.Cursor` behaviour.
iex> Quarto.cursor_for_entry(%User{id: 1}, :id)
[1]
iex> Quarto.cursor_for_entry(%User{id: 1}, [:id, :name])
[1, nil]
iex> Quarto.cursor_for_entry(%User{id: 1, profile: %Profile{title: "A profile"}}, {:profile, :title})
["A profile"]
iex> Quarto.cursor_for_entry(%User{id: 1, profile: %Profile{title: "A profile"}}, [[:profile, :title], :id])
["A profile", 1]
iex> Quarto.cursor_for_entry(%User{id: 1, profile: %Profile{title: "A profile"}}, [:id, {:profile, :title}])
[1, "A profile"]
iex> cursor = Quarto.cursor_for_entry(%Post{id: 2, user: %User{id: 1, profile: %Profile{title: "A profile"}}}, {:user, {:profile, :title}})
["A profile"]
iex> Quarto.Cursor.Base64.encode!(cursor)
"g2wAAAABbQAAAAlBIHByb2ZpbGVq"
iex> queryable = Post |> order_by({:desc, :position})
iex> Quarto.cursor_for_entry(%Post{id: 2, position: 3}, queryable)
[3]
iex> queryable = Quarto.Post
...> |> join(:left, [p], u in assoc(p, :user), as: :user)
...> |> preload([p, u], user: u)
...> |> order_by([p, u], desc: u.name)
...> |> order_by({:desc, :position})
iex> cursor = Quarto.cursor_for_entry(%Post{id: 2, position: 3, user: %User{name: "<NAME>"}}, queryable)
["<NAME>", 3]
iex> Quarto.Cursor.Base64.encode!(cursor)
"g2wAAAACbQAAAAtBLiBIYW1pbHRvbmEDag=="
"""
def cursor_for_entry(entry, queryable, opts \\ [])
def cursor_for_entry(entry, %Ecto.Query{} = queryable, opts) do
%{cursor_builder: {m, f, _}} = config = Quarto.Config.new([queryable: queryable] ++ opts)
Kernel.apply(m, f, [entry, config])
end
def cursor_for_entry(entry, cursor_fields, _opts)
when is_list(cursor_fields) do
Enum.map(cursor_fields, &cursor_for_entry_path(entry, &1))
end
def cursor_for_entry(entry, cursor_fields, opts) do
cursor_for_entry(entry, [cursor_fields], opts)
end
defp cursor_for_entry_path(entry, field) when is_atom(field) do
Map.get(entry, field)
end
defp cursor_for_entry_path(entry, {field, key}) do
path = to_list({field, key})
cursor_for_entry_path(entry, path)
end
defp cursor_for_entry_path(entry, path) when is_list(path) do
path = Enum.map(path, &Access.key/1)
get_in(entry, path)
end
defp to_list(nest) when is_tuple(nest) do
case Tuple.to_list(nest) do
[field, {a, b}] -> [field | to_list({a, b})]
[field, value] -> [field, value]
end
end
defp build_cursor_value(entry, %{cursor_builder: {m, f, _}} = config) do
Kernel.apply(m, f, [entry, config]) |> config.cursor.encode!(config)
end
defp after_cursor([], [], _config), do: nil
defp after_cursor(paginated_entries, _sorted_entries, %{before: c_before} = config)
when not is_nil(c_before) do
last_or_nil(paginated_entries, config)
end
defp after_cursor(paginated_entries, sorted_entries, config) do
if last_page?(sorted_entries, config) do
nil
else
last_or_nil(paginated_entries, config)
end
end
defp before_cursor([], [], _config), do: nil
defp before_cursor(_paginated_entries, _sorted_entries, %{after: nil, before: nil}),
do: nil
defp before_cursor(paginated_entries, _sorted_entries, %{after: c_after} = config)
when not is_nil(c_after) do
first_or_nil(paginated_entries, config)
end
defp before_cursor(paginated_entries, sorted_entries, config) do
if first_page?(sorted_entries, config) do
nil
else
first_or_nil(paginated_entries, config)
end
end
defp first_or_nil(entries, config) do
if first = List.first(entries) do
build_cursor_value(first, config)
else
nil
end
end
defp last_or_nil(entries, config) do
if last = List.last(entries) do
build_cursor_value(last, config)
else
nil
end
end
defp first_page?(sorted_entries, %{limit: limit}) do
Enum.count(sorted_entries) <= limit
end
defp last_page?(sorted_entries, %{limit: limit}) do
Enum.count(sorted_entries) <= limit
end
defp entries(queryable, config, repo, repo_opts) do
queryable
|> Quarto.Ecto.Query.paginate(config)
|> repo.all(repo_opts)
end
defp paginate_entries(sorted_entries, %{after: nil, before: before, limit: limit})
when not is_nil(before) do
sorted_entries
|> Enum.take(limit)
|> Enum.reverse()
end
defp paginate_entries(sorted_entries, %{limit: limit}) do
Enum.take(sorted_entries, limit)
end
end
|
lib/quarto.ex
| 0.757794 | 0.492005 |
quarto.ex
|
starcoder
|
defmodule Dispenser.Demands do
@moduledoc """
Tracks the demands of subscribers.
Keeps a constant-time `total/1` of the overall demand.
Used by `Buffer` to keep track of demand for events.
Used by implementations of `Dispenser.AssignmentStrategy`
to determine which subscribers to send to.
"""
@typedoc """
The current demand for one subscriber.
"""
@type demand :: pos_integer()
@typedoc """
A map of all subscribers that have demand > 0, with their demands.
"""
@type subscribers(subscriber) :: %{subscriber => demand()}
@typedoc """
The opaque internal state of the `Demands`.
"""
@opaque t(subscriber) :: %__MODULE__{
subscribers: subscribers(subscriber),
total: non_neg_integer()
}
@enforce_keys [:subscribers, :total]
defstruct [:subscribers, :total]
@doc """
Create a new `Demands`.
"""
@spec new() :: t(subscriber) when subscriber: any()
def new() do
%__MODULE__{subscribers: %{}, total: 0}
end
@doc """
Add some demand for one subscriber. A subscriber can demand as much as it wants.
"""
@spec add(t(subscriber), subscriber, non_neg_integer()) :: t(subscriber)
when subscriber: any()
def add(%__MODULE__{} = state, _subscriber, 0) do
state
end
def add(%__MODULE__{} = state, subscriber, amount) when amount > 0 do
subscribers =
Map.update(state.subscribers, subscriber, amount, fn demand -> demand + amount end)
%__MODULE__{subscribers: subscribers, total: state.total + amount}
end
@doc """
Remove some demand for one subscriber.
Once a subscriber reaches 0 demand, it is no longer tracked by `Demands`.
"""
@spec subtract(t(subscriber), subscriber, non_neg_integer()) :: t(subscriber)
when subscriber: any()
def subtract(%__MODULE__{} = state, _subscriber, 0) do
state
end
def subtract(%__MODULE__{} = state, subscriber, amount) when amount > 0 do
case Map.fetch(state.subscribers, subscriber) do
{:ok, demand} ->
if amount >= demand do
delete(state, subscriber)
else
subscribers = Map.put(state.subscribers, subscriber, demand - amount)
%__MODULE__{subscribers: subscribers, total: state.total - amount}
end
:error ->
state
end
end
@doc """
Get the current demand for one subscriber.
"""
@spec get(t(subscriber), subscriber) :: non_neg_integer() when subscriber: any()
def get(%__MODULE__{} = state, subscriber) do
Map.get(state.subscribers, subscriber, 0)
end
@doc """
Get all subscribers that have demand > 0.
"""
@spec subscribers(t(subscriber)) :: subscribers(subscriber) when subscriber: any()
def subscribers(%__MODULE__{} = state) do
state.subscribers
end
@doc """
Remove the demand of one subscriber.
"""
@spec delete(t(subscriber), subscriber) :: t(subscriber) when subscriber: any()
def delete(%__MODULE__{} = state, subscriber) do
case Map.pop(state.subscribers, subscriber) do
{nil, _subscribers} ->
state
{amount, subscribers} ->
%__MODULE__{subscribers: subscribers, total: state.total - amount}
end
end
@doc """
The total demand of all subscribers.
"""
@spec total(t(subscriber)) :: non_neg_integer() when subscriber: any()
def total(%__MODULE__{} = state) do
state.total
end
@doc """
The total number of subscribers that have demand > 0.
"""
@spec size(t(subscriber)) :: non_neg_integer() when subscriber: any()
def size(%__MODULE__{} = state) do
map_size(state.subscribers)
end
end
|
lib/dispenser/demands.ex
| 0.885102 | 0.612368 |
demands.ex
|
starcoder
|
defmodule Scenic.Primitive.Text do
@moduledoc """
Draw text on the screen.
## Data
`text`
The data for a Text primitive is a bitstring
* `text` - the text to draw
## Styles
This primitive recognizes the following styles
* [`hidden`](Scenic.Primitive.Style.Hidden.html) - show or hide the primitive
* [`fill`](Scenic.Primitive.Style.Fill.html) - fill in the area of the text. Only solid colors!
* [`font`](Scenic.Primitive.Style.Font.html) - name (or key) of font to use
* [`font_size`](Scenic.Primitive.Style.FontSize.html) - point size of the font
* [`font_blur`](Scenic.Primitive.Style.FontBlur.html) - option to blur the characters
* [`text_align`](Scenic.Primitive.Style.TextAlign.html) - alignment of lines of text
* [`text_height`](Scenic.Primitive.Style.TextHeight.html) - spacing between lines of text
## Usage
You should add/modify primitives via the helper functions in
[`Scenic.Primitives`](Scenic.Primitives.html#text/3)
```elixir
graph
|> text( "Some example text", fill: :green, font: :roboto_mono, font_size: 64 )
```
"""
use Scenic.Primitive
alias Scenic.Script
alias Scenic.Primitive
alias Scenic.Primitive.Style
@type t :: String.t()
@type styles_t :: [
:hidden
| :scissor
| :font
| :font_size
| :line_height
| :text_align
| :text_base
| :line_height
]
@styles [
:hidden,
:scissor,
:font,
:font_size,
:line_height,
:text_align,
:text_base,
:line_height
]
@impl Primitive
@spec validate(text :: t()) :: {:ok, t()} | {:error, String.t()}
def validate(text) when is_bitstring(text) do
{:ok, text}
end
def validate(data) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Text specification
Received: #{inspect(data)}
#{IO.ANSI.yellow()}
The data for Text must be a String#{IO.ANSI.default_color()}
"""
}
end
# --------------------------------------------------------
@doc """
Returns a list of styles recognized by this primitive.
"""
@impl Primitive
@spec valid_styles() :: styles_t()
def valid_styles(), do: @styles
# --------------------------------------------------------
# compiling Text is a special case and is handled in Scenic.ViewPort.GraphCompiler
@doc false
@impl Primitive
@spec compile(primitive :: Primitive.t(), styles :: Style.t()) :: Script.t()
def compile(%Primitive{module: __MODULE__}, _styles) do
raise "compiling Text is a special case and is handled in Scenic.ViewPort.GraphCompiler"
end
end
|
lib/scenic/primitive/text.ex
| 0.930561 | 0.755412 |
text.ex
|
starcoder
|
defmodule Bricks.Connector.Unix do
@moduledoc """
Connector for unix domain sockets, using `:gen_tcp`
## Create Options
### All
Ordering: Required first, then alphabetical
Option | Type(s) | Default | Raw `gen_tcp` option
:--------------------- | :---------------- | :------------- | :-----------------------
`:connect_timeout` | `timeout` | `5000` | `(POSITIONAL)`
`:bam_window` | `Socket.window` | `10` | `(NONE)`
`:active` | `Socket.active` | `true` | `:active`
`:bind_to_device` | `binary` | `(NONE)` | `:bind_to_device`
`:buffer` | `non_neg_integer` | `(UNKNOWN)` | `:buffer`
`:delay_send?` | `boolean` | `false` | `:delay_send`
`:deliver` | `:port`, `:term` | `(UNKNOWN)` | `:deliver`
`:exit_on_close?` | `boolean` | `true` | `:exit_on_close`
`:header_size` | `non_neg_integer` | `(NONE)` | `:header`
`:high_msgq_watermark` | `pos_integer` | `(UNKNOWN)` | `:high_msgq_watermark`
`:high_watermark` | `non_neg_integer` | `(UNKNOWN)` | `:high_watermark`
`:line_delimiter` | `char` | `?\\n` | `:line_delimiter`
`:low_msgq_watermark` | `pos_integer` | `(UNKNOWN)` | `:low_msgq_watermark`
`:low_watermark` | `non_neg_integer` | `(UNKNOWN)` | `:low_watermark`
`:packet_type` | `packet_type` | `:raw` | `:packet`
`:packet_size` | `pos_integer` | `0` (no limit) | `:packet_size`
`:priority` | `non_neg_integer` | `(NONE)` | `:priority`
`:raw_fd` | `non_neg_integer` | `(NONE)` | `:fd`
`:receive_timeout` | `timeout` | `5000` | `(NONE)`
`:send_timeout` | `timeout` | `5000` | `:send_timeout`
`:send_timeout_close?` | `boolean` | `true` | `:send_timeout_close`
`:send_buffer` | `non_neg_integer` | `(NONE)` | `:sndbuf`
`:tcp_module` | `atom` | `(SEE DOCS)` | `:tcp_module`
`:tcp_opts` | `proplist` | `[]` | `(ANY)`
### Timeouts
Option | Type | Default | Raw `gen_tcp` option
:----------------- | :-------- | :------ | :-------------------
`:connect_timeout` | `timeout` | `5000` | `(POSITIONAL)`
`:receive_timeout` | `timeout` | `5000` | `(POSITIONAL)`
`:send_timeout` | `timeout` | `5000` | `:send_timeout`
These toggle how long you are prepared to wait for an operation to
complete before a timeout error is returned. They are standard
erlang `timeout` values: non-negative integers or `:infinity`.
### Activity Control
Option | Type | Default | Raw `gen_tcp` option
:------------ | :-------------- | :------ | :-------------------
`:active` | `Socket.active` | `true` | `:active`
`:bam_window` | `Socket.window` | `10` | `(NONE)`
See discussion on socket activity modes in the `Bricks.Socket`
module documentation for more information.
### Erlang Options
Option | Type | Default | Raw `gen_tcp` option
:--------------------- | :---------------- | :----------- | :---------------------
`:buffer` | `non_neg_integer` | `(UNKNOWN)` | `:buffer`
`:delay_send?` | `boolean` | `false` | `:delay_send`
`:deliver` | `:port`, `:term` | `(UNKNOWN)` | `:deliver`
`:exit_on_close?` | `boolean` | `true` | `:exit_on_close`
`:header_size` | `non_neg_integer` | `(NONE)` | `:header`
`:high_msgq_watermark` | `pos_integer` | `(UNKNOWN)` | `:high_msgq_watermark`
`:high_watermark` | `non_neg_integer` | `(UNKNOWN)` | `:high_watermark`
`:line_delimiter` | `char` | `?\\n` | `:line_delimiter`
`:low_msgq_watermark` | `pos_integer` | `(UNKNOWN)` | `:low_msgq_watermark`
`:low_watermark` | `non_neg_integer` | `(UNKNOWN)` | `:low_watermark`
`:packet_type` | `packet_type` | `:raw` | `:packet`
`:send_timeout_close?` | `boolean` | `true` | `:send_timeout_close`
`:tcp_module` | `atom` | `(SEE DOCS)` | `:tcp_module`
`:tcp_opts` | `proplist` | `[]` | `(ANY)`
#### `:buffer`
The size of the user-level buffer used by the driver. Not to be
confused with options `:send_buffer` and `:receive_buffer`, which
correspond to the Kernel socket buffers. For TCP it is recommended
to have val(buffer) >= val(recbuf) to avoid performance issues
because of unnecessary copying. However, as the size set for recbuf
usually become larger, you are encouraged to use getopts/2 to
analyze the behavior of your operating system.
Note that this is also the maximum amount of data that can be
received from a single `recv` call. If you are using higher than
normal MTU consider setting buffer higher.
#### `:delay_send?`
Normally, when an Erlang process sends to a socket, the driver tries
to send the data immediately. If that fails, the driver uses any
means available to queue up the message to be sent whenever the
operating system says it can handle it. Setting `delay_send: true`
makes all messages queue up. The messages sent to the network are
then larger but fewer. The option affects the scheduling of send
requests versus Erlang processes instead of changing any real
property of the socket. The option is implementation-specific.
#### `:deliver`
When `active: true`, data is delivered on the form `port` :
`{socket_handle, {:data, [h1,..hsz | data]}}` or `term` : `{:tcp,
socket_handle, [h1..hsz | data]}`
#### `:exit_on_close?`
The only reason to set it to false is if you want to continue
sending data to the socket after a close is detected, for example,
if the peer uses `:gen_tcp.shutdown/2` to shut down the write side.
#### `:header_size`
This option is only meaningful if option binary was specified when
the socket was created. If option header is specified, the first
Size number bytes of data received from the socket are elements of a
list, and the remaining data is a binary specified as the tail of
the same list. For example, if set to `2`, the data received matches
`[byte1,byte2|Binary]`
#### `:high_msgq_watermark`
The socket message queue is set to a busy state when the amount of
data on the message queue reaches this limit. Notice that this limit
only concerns data that has not yet reached the ERTS internal socket
implementation. Defaults to `8 kB`.
Senders of data to the socket are suspended if either the socket
message queue is busy or the socket itself is busy.
For more information, see options `:low_msgq_watermark`,
`:high_watermark`, and `:low_watermark`.
Notice that distribution sockets disable the use of
`:high_msgq_watermark` and `:low_msgq_watermark`. Instead use the
distribution buffer busy limit, which is a similar feature.
#### `:high_watermark`
The socket is set to a busy state when the amount of data queued
internally by the ERTS socket implementation reaches this
limit. Defaults to `8 kB`.
Senders of data to the socket are suspended if either the socket
message queue is busy or the socket itself is busy.
For more information, see options low_watermark,
high_msgq_watermark, and low_msqg_watermark.
#### `:line_delimiter`
Sets the line delimiting character for line-oriented protocols
(`:line`). Defaults to `?\n`.
#### `:low_msgq_watermark`
If the socket message queue is in a busy state, the socket message
queue is set in a not busy state when the amount of data queued in
the message queue falls below this limit. Notice that this limit
only concerns data that has not yet reached the ERTS internal socket
implementation. Defaults to `4 kB`.
Senders that are suspended because of either a busy message queue or
a busy socket are resumed when the socket message queue and the
socket are not busy.
For more information, see options `:high_msgq_watermark`,
`:high_watermark`, and `:low_watermark`.
Notice that distribution sockets disable the use of
`:high_msgq_watermark` and `:low_msgq_watermark`. Instead they use
the distribution buffer busy limit, which is a similar feature.
#### `:low_watermark`
If the socket is in a busy state, the socket is set in a not busy
state when the amount of data queued internally by the ERTS socket
implementation falls below this limit. Defaults to `4 kB`.
Senders that are suspended because of a busy message queue or a busy
socket are resumed when the socket message queue and the socket are
not busy.
For more information, see options `:high_watermark`,
`:high_msgq_watermark`, and `:low_msgq_watermark`.
#### `:packet_type`
Defines the type of packets to use for a socket. Possible values:
`:raw` | `0`
: No packaging is done.
`1` | `2` | `4`
: Packets consist of a header specifying the number of bytes in the
packet, followed by that number of bytes. The header length can be
one, two, or four bytes, and containing an unsigned integer in
big-endian byte order. Each send operation generates the header,
and the header is stripped off on each receive operation. The
4-byte header is limited to 2Gb.
`:asn1` | `:cdr` | `:sunrm` | `:fcgi` | `:tpkt` | `:line`
: These packet types only have effect on receiving. When sending a
packet, it is the responsibility of the application to supply a
correct header. On receiving, however, one message is sent to the
controlling process for each complete packet received, and,
similarly, each call to `:gen_tcp.recv/2,3` returns one complete
packet. The header is not stripped off.
The meanings of the packet types are as follows:
- `:asn1` - ASN.1 BER
- `:sunrm` - Sun's RPC encoding
- `:cdr` - CORBA (GIOP 1.1)
- `:fcgi` - Fast CGI
- `:tpkt` - TPKT format [RFC1006]
- `:line` - Line mode, a packet is a line-terminated with newline,
lines longer than the receive buffer are truncated
##### `:http` | `:http_bin`
The Hypertext Transfer Protocol. The packets are returned with the
format according to HttpPacket described in
`:erlang.decode_packet/3` in ERTS. A socket in passive mode returns
`{:ok, packet}` from `:gen_tcp.recv` while an active socket sends
messages like `{http, socket_handle, packet}`.
##### `:httph` | `:httph_bin`
These two types are often not needed, as the socket automatically
switches from `:http`/`:http_bin` to `:httph`/`:httph_bin`
internally after the first line is read. However, there can be
occasions when they are useful, such as parsing trailers from
chunked encoding.
#### `:send_timeout_close?`
Used together with `:send_timeout` to specify whether the socket is to
be automatically closed when the send operation returns
`{:error,:timeout}`. The recommended setting is `true`, which
automatically closes the socket.
#### `:tcp_module`
Overrides which callback module is used. Defaults to `:inet_tcp` for
IPv4 and `:inet6_tcp` for IPv6.
#### `:tcp_opts`
Raw `gen_tcp`/`inet` options proplist. *Appended* to options.
### OS options
Option | Type | Default | Raw `gen_tcp` option
:---------------- | :---------------- | :------------- | :---------------------
`:bind_to_device` | `binary` | `(NONE)` | `:bind_to_device`
`:packet_size` | `pos_integer` | `0` (no limit) | `:packet_size`
`:priority` | `non_neg_integer` | `(NONE)` | `:priority`
`:raw_fd` | `non_neg_integer` | `(NONE)` | `:fd`
`:send_buffer` | `non_neg_integer` | `(NONE)` | `:sndbuf`
#### `:bind_to_device`
Binds a socket to a specific network interface. This option must be
used in a function call that creates a socket, that is,
`:gen_tcp.connect/3,4` or `:gen_tcp.listen/2`
Unlike `getifaddrs/0`, Ifname is encoded a binary. In the unlikely
case that a system is using non-7-bit-ASCII characters in network
device names, special care has to be taken when encoding this
argument.
This option uses the Linux-specific socket option `SO_BINDTODEVICE`,
such as in Linux kernel 2.0.30 or later, and therefore only exists
when the runtime system is compiled for such an operating system.
Before Linux 3.8, this socket option could be set, but could not
retrieved with getopts/2. Since Linux 3.8, it is readable.
The virtual machine also needs elevated privileges, either running
as superuser or (for Linux) having capability `CAP_NET_RAW`.
The primary use case for this option is to bind sockets into Linux VRF instances.
#### `:packet_size`
Sets the maximum allowed length of the packet body. If the packet
header indicates that the length of the packet is longer than the
maximum allowed length, the packet is considered invalid. The same
occurs if the packet header is too large for the socket receive
buffer.
For line-oriented protocols (`line`, `http*`), option `packet_size`
also guarantees that lines up to the indicated length are accepted
and not considered invalid because of internal buffer limitations.
#### `:priority`
Sets the `SO_PRIORITY` socket level option on platforms where this is
implemented. The behavior and allowed range varies between different
systems. The option is ignored on platforms where it is not
implemented. Use with caution.
#### `:raw_fd`
If a socket has somehow been connected without using gen_tcp, use
this option to pass the file descriptor for it. If
`:network_interface` and/or `:port` options are combined with this
option, the fd is bound to the specified interface and port before
connecting. If these options are not specified, it is assumed that
the fd is already bound appropriately.
#### `:send_buffer`
The minimum size of the send buffer to use for the socket. You are
encouraged to use `getopts/2`, to retrieve the size set by your
operating system.
"""
@enforce_keys [
:path,
:tcp_opts,
:connect_timeout,
:receive_timeout,
:bam_window,
:active
]
defstruct @enforce_keys
alias Bricks.{Connector, Options, Socket}
alias Bricks.Connector.{Tcp, Unix}
alias Bricks.Error.{BadOption, Connect}
import Bricks.Guards
@default_tcp_opts []
@default_connect_timeout 3000
@default_send_timeout 3000
@default_receive_timeout 3000
@default_bam_window 10
@default_active false
@typedoc "A file path to a unix domain socket"
@type path :: binary()
@typedoc "A unix domain socket Connector"
@type t :: %Unix{
path: path(),
tcp_opts: [term()],
receive_timeout: timeout(),
connect_timeout: timeout(),
bam_window: Socket.window(),
active: Socket.active()
}
@typedoc "Options passed to `create/2`"
@type create_opts :: %{
# Optional Socket members
optional(:connect_timeout) => timeout(),
optional(:receive_timeout) => timeout(),
optional(:bam_window) => Socket.window(),
optional(:active) => Socket.active(),
# Optional non-Socket member `:gen_tcp`/`:inet` socket options
optional(:bind_to_device) => binary(),
optional(:buffer) => non_neg_integer(),
optional(:delay_send?) => boolean(),
optional(:deliver) => :port | :term,
optional(:exit_on_close?) => boolean(),
optional(:header_size) => non_neg_integer(),
optional(:high_msgq_watermark) => pos_integer(),
optional(:high_watermark) => non_neg_integer(),
optional(:line_delimiter) => char(),
optional(:low_msgq_watermark) => pos_integer(),
optional(:low_watermark) => non_neg_integer(),
optional(:packet_type) => :raw | 1 | 2 | 4,
optional(:packet_size) => pos_integer(),
optional(:priority) => non_neg_integer(),
optional(:raw_fd) => non_neg_integer(),
optional(:send_buffer) => non_neg_integer(),
optional(:send_timeout) => timeout(),
optional(:send_timeout_close?) => boolean(),
optional(:tcp_module) => atom(),
optional(:tcp_opts) => [term()]
}
@spec create(path(), create_opts()) :: {:ok, Connector.t()} | {:error, BadOption.t()}
@doc """
Creates a `Bricks.Connector` which uses this module as a callback
and the provided path and options to open and configure the socket
"""
def create(path, opts \\ %{})
def create(path, %{} = opts) do
with {:ok, tcp_opts} <- tcp_options(opts) do
create_connector(path, opts, tcp_opts)
end
end
## behaviour impl: Connector
@spec connect(t()) :: {:ok, Socket.t()} | {:error, term()}
@doc false
def connect(%Unix{path: path, tcp_opts: opts, connect_timeout: timeout} = tcp) do
case :gen_tcp.connect({:local, path}, 0, opts, timeout) do
{:error, reason} -> {:error, Connect.new(reason)}
{:ok, socket} -> socket(socket, tcp)
end
end
## Internal helpers
@tcp_table_options [
bind_to_device: {:bind_to_device, &is_binary/1, [:binary]},
buffer: {:buffer, &non_neg_int?/1, [:non_neg_int]},
delay_send?: {:delay_send, &is_boolean/1, [:bool]},
deliver: {:deliver, &deliver?/1, [:port, :term]},
exit_on_close?: {:exit_on_close, &is_boolean/1, [:bool]},
header_size: {:header, &non_neg_int?/1, [:non_neg_int]},
high_msgq_watermark: {:high_msgq_watermark, &pos_int?/1, [:pos_int]},
high_watermark: {:high_watermark, &non_neg_int?/1, [:non_neg_int]},
line_delimiter: {:line_delimiter, &char?/1, [:char]},
local_port: {:port, &port?/1, [:non_neg_int]},
low_msgq_watermark: {:low_msgq_watermark, &pos_int?/1, [:pos_int]},
low_watermark: {:low_watermark, &non_neg_int?/1, [:non_neg_int]},
packet_type: {:packet, &packet_type?/1, [:raw, 1, 2, 4]},
packet_size: {:packet_size, &pos_int?/1, [:pos_int]},
priority: {:priority, &non_neg_int?/1, [:non_neg_int]},
raw_fd: {:fd, &non_neg_int?/1, [:non_neg_int]},
send_buffer: {:sndbuf, &non_neg_int?/1, [:non_neg_int]},
send_timeout: {:send_timeout, &timeout?/1, [:infinity, :non_neg_int]},
send_timeout_close?: {:send_timeout_close, &is_boolean/1, [:bool]},
tcp_module: {:tcp_module, &is_atom/1, [:atom]}
]
@tcp_custom_options [
:active,
:bam_window,
:binary?,
:raw,
:connect_timeout,
:receive_timeout,
:send_timeout,
:tcp_opts
]
@tcp_option_keys @tcp_custom_options ++ Keyword.keys(@tcp_table_options)
defp create_connector(path, opts, tcp_opts) do
with {:ok, conn_timeout} <-
Options.default_timeout(opts, :connect_timeout, @default_connect_timeout),
{:ok, receive_timeout} <-
Options.default_timeout(opts, :receive_timeout, @default_receive_timeout),
{:ok, bam_window} <-
Options.default(opts, :bam_window, @default_bam_window, &window?/1, [:once, :pos_int]),
{:ok, active} <-
Options.default(opts, :active, @default_active, &active?/1, [:bool, :integer, :once]) do
case is_binary(path) do
true ->
unix = %Unix{
path: path,
tcp_opts: tcp_opts,
receive_timeout: receive_timeout,
connect_timeout: conn_timeout,
bam_window: bam_window,
active: active
}
{:ok, Connector.new(__MODULE__, unix)}
false ->
{:error, BadOption.new(:path, path, [:binary])}
end
end
end
@doc false
defp tcp_options(opts) do
with {:ok, active} <-
Options.default(opts, :active, @default_active, &active?/1, [:bool, :integer, :once]),
{:ok, mode} <- Tcp.mode(opts),
{:ok, send_timeout} <-
Options.default_timeout(opts, :send_timeout, @default_send_timeout),
{:ok, tcp_opts} <-
Options.default(opts, :tcp_opts, @default_tcp_opts, &is_list/1, [:proplist]),
:ok <- Options.check_extra_keys(opts, @tcp_option_keys),
{:ok, table} <- Options.table_options(opts, @tcp_table_options),
{:ok, raw} <- Tcp.raw_opts(opts) do
synthetic = [active: active, mode: mode, send_timeout: send_timeout]
{:ok, table ++ raw ++ synthetic ++ tcp_opts}
end
end
defp socket_opts(%Unix{path: host} = unix) do
Map.take(unix, [:active, :receive_timeout, :bam_window])
|> Map.put(:host, host)
|> Map.put(:port, :local)
end
defp socket(socket, %Unix{} = unix) do
try do
with {:error, reason} <- Socket.Tcp.create(socket, socket_opts(unix)) do
:gen_tcp.close(socket)
{:error, reason}
end
rescue
e ->
:gen_tcp.close(socket)
{:error, e}
end
end
end
|
bricks/lib/connectors/unix.ex
| 0.879768 | 0.797911 |
unix.ex
|
starcoder
|
defmodule Liquor do
@moduledoc """
Liquor is a search filter helper, it provides:
* whitelisting
* transformation
* filtering
"""
@type op ::
:match |
:unmatch |
:== |
:!= |
:>= |
:<= |
:> |
:<
@type search_spec :: %{
whitelist: Liquor.Whitelist.filter(),
transform: Liquor.Transformer.type_spec(),
filter: Liquor.Filter.filter(),
}
@spec parse_string(String.t) :: {:ok, list} | {:error, term}
def parse_string(string), do: Liquor.Parser.parse(string, parse_operators: true)
@spec whitelist_terms(list, search_spec) :: list
def whitelist_terms(terms, search_spec), do: Liquor.Whitelist.whitelist(terms, search_spec.whitelist)
@spec transform_terms(list, search_spec) :: list
def transform_terms(terms, search_spec), do: Liquor.Transformer.transform(terms, search_spec.transform)
@spec filter_terms(Ecto.Query.t, list, search_spec) :: Ecto.Query.t
def filter_terms(query, terms, search_spec), do: Liquor.Filter.filter(query, terms, search_spec.filter)
@spec prepare_terms(String.t, search_spec) :: {:ok, list} | {:error, term}
def prepare_terms(string, spec) when is_binary(string) do
case parse_string(string) do
{:ok, terms} -> prepare_terms(terms, spec)
{:error, _} = err -> err
end
end
def prepare_terms(terms, spec) when is_map(terms) do
prepare_terms(Map.to_list(terms), spec)
end
def prepare_terms(terms, spec) when is_list(terms) do
terms = whitelist_terms(terms, spec)
terms = transform_terms(terms, spec)
{:ok, terms}
end
@spec apply_search(Ecto.Query.t, String.t | list, search_spec) :: Ecto.Query.t
def apply_search(query, string, spec) when is_binary(string) do
{:ok, terms} = parse_string(string)
apply_search(query, terms, spec)
end
def apply_search(query, terms, spec) when is_map(terms) do
apply_search(query, Map.to_list(terms), spec)
end
def apply_search(query, terms, spec) when is_list(terms) do
{:ok, terms} = prepare_terms(terms, spec)
query
|> filter_terms(terms, spec)
end
@spec binary_op(op) :: :match | :unmatch
def binary_op(:match), do: :match
def binary_op(:==), do: :match
def binary_op(:>=), do: :match
def binary_op(:<=), do: :match
def binary_op(:unmatch), do: :unmatch
def binary_op(:!=), do: :unmatch
def binary_op(:>), do: :unmatch
def binary_op(:<), do: :unmatch
end
|
lib/liquor.ex
| 0.55266 | 0.654967 |
liquor.ex
|
starcoder
|
defmodule Csp.Searcher do
@moduledoc """
Search strategies for CSP.
"""
require Logger
@doc """
Performs a brute force search on `csp`.
**NOTE:** don't use it for real stuff. This is provided only for comparison with backtracking.
Use backtracking instead!
If solution is found, returned `{:solved, assignment | assignments}`, otherwise returns `:no_solution`.
## Options
- `all`, boolean: if all solutions should be found. By default is set to `false`,
so only the first found solution is returned. If `all` is true, all solutions are found,
and instead of returning a single `assignment`, returns a list of `assignments`.
"""
@spec brute_force(Csp.t(), Keyword.t()) :: Csp.solve_result()
def brute_force(%Csp{} = csp, opts \\ []) do
all = Keyword.get(opts, :all, false)
candidates = generate_candidates(csp)
solution_or_solutions =
if all do
Enum.filter(candidates, &Csp.solved?(csp, &1))
else
Enum.find(candidates, &Csp.solved?(csp, &1))
end
if is_nil(solution_or_solutions) do
:no_solution
else
{:solved, solution_or_solutions}
end
end
## Helpers
@spec generate_candidates(Csp.t()) :: [Csp.assignment()]
defp generate_candidates(csp) do
generate_candidates(csp, csp.variables, [])
end
@spec generate_candidates(Csp.t(), [Csp.variable()], [Csp.assignment()]) :: [Csp.assignment()]
defp generate_candidates(csp, variables_to_consider, candidates)
defp generate_candidates(_csp, [], candidates), do: candidates
defp generate_candidates(csp, [variable | rest], candidates) do
domain = Map.fetch!(csp.domains, variable)
case candidates do
[] ->
candidates = Enum.map(domain, fn value -> %{variable => value} end)
generate_candidates(csp, rest, candidates)
_ ->
candidates_with_variable =
Enum.reduce(domain, [], fn value, candidates_with_variable ->
candidates_with_variable ++ Enum.map(candidates, &Map.put(&1, variable, value))
end)
generate_candidates(csp, rest, candidates_with_variable)
end
end
end
|
lib/csp/searcher.ex
| 0.824037 | 0.496216 |
searcher.ex
|
starcoder
|
defmodule Votr.Blt do
@doc """
Parses a BLT file `stream`.
The BLT file format is described here: https://www.opavote.com/help/overview#blt-file-format
Returns a map containing:
* `seats`: the number of seats to be elected
* `ballots`: a list of ballots that can be passed to `eval/3`
* `candidates`: a list of candidate names
* `withdrawn`: a list of candidate ids that should be filtered from the ballots (optional)
"""
def parse(stream) do
# file consists of the following lines
# :initial 1 line <number of candidates c> <number of seats s>
# :ballot 0~1 line <the candidates that have withdrawn>+
# :ballot 1~n lines a ballot (see format below)
# :ballot 1 line 0 (end of ballots marker)
# :candidate c lines "<name of candidate>"
# :candidate 1 line "<name of election>"
# each ballot has the format
# <weight> <candidate> <candidate> ...0
# weight can be used to group identical ballots, or to give each voter different weight
# candidate is the integer id of the candidate (i.e. 1,2,3)
# candidate may be a - to indicate a skipped vote
# two candidates may be joined with = to indicate the have equal rank
Enum.reduce(
stream,
%{state: :initial},
fn line, a ->
[data | _] = String.split(line, "#", parts: 2)
data = String.trim(data)
cond do
# comment only line
data == "" ->
a
# first line
a.state == :initial ->
[c, s] = String.split(data, " ")
{candidates, _} = Integer.parse(c)
{seats, _} = Integer.parse(s)
a
|> Map.put(:remaining, candidates)
|> Map.put(:seats, seats)
|> Map.put(:state, :ballot)
|> Map.put(:ballots, [])
|> Map.put(:candidates, [])
# end of ballots marker line
a.state == :ballot && data == "0" ->
Map.put(a, :state, :candidate)
# withdrawn candidates line
a.state == :ballot && String.starts_with?(data, "-") ->
withdrawn =
Regex.scan(~r/(-\d+)+/, data)
|> Enum.map(
fn [match, _] ->
{c, _} = Integer.parse(match)
-c
end
)
Map.put(a, :withdrawn, withdrawn)
# ballot line
a.state == :ballot ->
[weight | candidates] = String.split(data, " ")
{weight, _} = Float.parse(weight)
vote = Enum.reduce(
candidates,
{1, %{}},
fn term, {rank, ballot} ->
case term do
"0" ->
# end of ballot marker
ballot
"-" ->
# undervote marker
{rank + 1, ballot}
_ ->
{
rank + 1,
Enum.reduce(
String.split(term, "="),
ballot,
fn c, a ->
{c, _} = Integer.parse(c)
Map.put(a, c, rank)
end
)
}
end
end
)
ballot = %{ weight: weight, candidates: vote }
Map.update!(
a,
:ballots,
fn ballots ->
[ballot] ++ ballots
end
)
a.state == :candidate && a.remaining == 0 ->
a
|> Map.put(:title, String.replace(String.trim(data, "\""), "\\", ""))
|> Map.delete(:remaining)
|> Map.delete(:state)
a.state == :candidate ->
a
|> Map.update(
:candidates,
[],
fn candidates ->
candidates ++ [String.replace(String.trim(data, "\""), "\\", "")]
end
)
|> Map.update!(:remaining, &(&1 - 1))
true ->
a
end
end
)
end
def rekey(result, candidates) when is_list(result) do
Enum.map(result, fn round -> rekey(round, candidates) end)
end
@doc """
Takes `results` with numeric candidate keys and returns results
with the candidate keys from `candidates`.
"""
def rekey(result, candidates) do
Enum.reduce(
result,
%{},
fn {i, v}, a ->
if i == :exhausted do
Map.put(a, :exhausted, v)
else
Map.put(a, Enum.at(candidates, i - 1), v)
end
end
)
end
end
|
lib/votr/blt.ex
| 0.764848 | 0.670156 |
blt.ex
|
starcoder
|
defmodule Cldr.Calendar.Sigils do
@moduledoc """
Implements the `~d` sigils to produce
dates, datetimes and naive datetimes.
"""
alias Cldr.Config
@doc """
Implements a ~d sigil for expressing dates.
Dates can be expressed in the following formats:
* `~d[yyyy-mm-dd]` which produces a date in the `Cldr.Calendar.Gregorian` calendar
* `~d[yyyy-Wmm-dd]` which produces a date in the `Cldr.Calendar.IsoWeek` calendar
* `~d[yyyy-mm-dd calendar]` which produces a date in the given month-based calendar
* `~d[yyyy-Wmm-dd calendar]` which produces a date in the given week-based calendar
* `~d[yyyy-mm-dd C.E Julian]` which produces a date in the Cldr.Calendar.Julian calendar
* `~d[yyyy-mm-dd B.C.E Julian]` which produces a date in the Cldr.Calendar.Julian calendar
## Examples
iex> import Cldr.Calendar.Sigils
iex> ~d[2019-01-01 Gregorian]
~d[2019-01-01 Gregorian]
iex> ~d[2019-W01-01]
~d[2019-W01-1 ISOWeek]
"""
defmacro sigil_d({:<<>>, _, [string]}, modifiers) do
do_sigil_d(string, modifiers)
|> Macro.escape()
end
def do_sigil_d(<<year::bytes-4, "-", month::bytes-2, "-", day::bytes-2>>, _) do
to_date(year, month, day, Cldr.Calendar.Gregorian)
end
def do_sigil_d(<<year::bytes-4, "-", month::bytes-2, "-", day::bytes-2, " C.E. Julian">>, _) do
to_date(year, month, day, Cldr.Calendar.Julian)
end
def do_sigil_d(<<year::bytes-4, "-", month::bytes-2, "-", day::bytes-2, " <NAME>">>, _) do
to_date("-" <> year, month, day, Calendar.Julian)
end
def do_sigil_d(
<<year::bytes-4, "-", month::bytes-2, "-", day::bytes-2, " ", calendar::binary>>,
_
) do
to_date(year, month, day, calendar)
end
def do_sigil_d(
<<"-", year::bytes-4, "-", month::bytes-2, "-", day::bytes-2, " ", calendar::binary>>,
_
) do
to_date("-" <> year, month, day, calendar)
end
def do_sigil_d(<<year::bytes-4, "-W", month::bytes-2, "-", day::bytes-2>>, _) do
to_date(year, month, day, Cldr.Calendar.ISOWeek)
end
def do_sigil_d(<<year::bytes-4, "-W", month::bytes-2, "-", day::bytes-1>>, _) do
to_date(year, month, day, Cldr.Calendar.ISOWeek)
end
def do_sigil_d(
<<year::bytes-4, "-W", month::bytes-2, "-", day::bytes-2, " ", calendar::binary>>,
_
) do
to_date(year, month, day, calendar)
end
def do_sigil_d(
<<year::bytes-4, "-W", month::bytes-2, "-", day::bytes-1, " ", calendar::binary>>,
_
) do
to_date(year, month, day, calendar)
end
defp to_date(year, month, day, calendar) do
[year, month, day] = Enum.map([year, month, day], &String.to_integer/1)
with {:ok, calendar} <- calendar_from_binary(calendar),
{:ok, date} <- Date.new(year, month, day, calendar) do
date
end
end
defp calendar_from_binary(calendar) do
inbuilt_calendar(calendar) ||
fiscal_calendar(calendar) ||
user_calendar(calendar) ||
calendar_error(calendar)
end
defp inbuilt_calendar(calendar) do
calendar = Module.concat(Cldr.Calendar, calendar)
get_calendar(calendar)
end
defp fiscal_calendar(calendar) do
calendar = Module.concat(Cldr.Calendar.FiscalYear, calendar)
get_calendar(calendar)
end
defp user_calendar(calendar) do
Module.concat("Elixir", calendar)
|> get_calendar
end
defp get_calendar(calendar) do
if Config.ensure_compiled?(calendar) and function_exported?(calendar, :cldr_calendar_type, 0) do
{:ok, calendar}
else
nil
end
end
defp calendar_error(calendar) do
{:error, {Cldr.UnknownCalendarError, calendar}}
end
end
|
lib/cldr/calendar/sigils.ex
| 0.869327 | 0.658321 |
sigils.ex
|
starcoder
|
defmodule Herald.Pipeline do
@moduledoc """
Pipeline is where messages are processed.
All message processing is started by function `run/2`.
When `run/2` receives a message, it runs the following steps:
* **pre_processing** - Will convert the message to a struct, using
the schema defined in route for the given queue;
* **processor_caler** - Will call the `processor` defined in router for
the given queue.
"""
alias Herald.Errors.MissingRouter
defstruct [
:schema,
:message,
:perform,
:processor,
result: :uncalled,
]
@typedoc """
Indicates to upper layers what must do doing with message.
Possible values:
* `:ack` - When the message is sucessfully processed;
* `:delete` - When message must be deleted from broker
after a processing error;
* `:requeue` - When message must be reprocessed in
future after a processing error.
"""
@type to_perform ::
:ack |
:delete |
:requeue
@type t :: %__MODULE__{
schema: atom(),
message: map(),
result: any(),
processor: fun(),
perform: to_perform(),
}
@doc """
Process a given message.
Receive the following arguments:
* `queue` - The queue where message was received from;
* `message` - The received message in a raw
state (i.e. the String as received from broker)
This function must be called by plugins integrating
Herald with Brokers.
*Warning:* When you call this function without
[configure a router](Herald.Router.html) for you
application, it will raises the exception
`Herald.Errors.MissingRouter`
"""
@spec run(String.t(), String.t()) :: :ok | {:error, atom()}
def run(queue, message) do
Application.get_env(:herald, :router)
|> case do
router when is_nil(router) ->
raise MissingRouter, message: """
Router not found.
You need set a router for your application, as bellow:
config :herald,
router: MyApp.Router
See document bellow for more details:
- https://hexdocs.pm/herald/Herald.Router.html
"""
router ->
message
|> pre_processor(queue, router)
|> processor_caller()
end
end
defp pre_processor(message, queue, router) do
case router.get_queue_route(queue) do
{:ok, {schema, processor}} ->
%__MODULE__{}
|> Map.put(:schema, schema)
|> Map.put(:processor, processor)
|> Map.put(:message, schema.from_string(queue, message))
{:error, reason} ->
{:error, reason}
end
end
defp processor_caller(%{message: {:error, _reason}} = pipeline),
do: Map.put(pipeline, :perform, :requeue)
defp processor_caller(%{message: message, processor: processor} = pipeline) do
case processor.(message) do
{:ok, _} = result ->
pipeline
|> Map.put(:perform, :ack)
|> Map.put(:result, result)
{:error, :delete, _} = result ->
pipeline
|> Map.put(:result, result)
|> Map.put(:perform, :delete)
{:error, _} = result ->
pipeline
|> Map.put(:result, result)
|> Map.put(:perform, :requeue)
end
end
end
|
lib/herald/pipeline.ex
| 0.836605 | 0.474936 |
pipeline.ex
|
starcoder
|
defmodule Clickhousex.Codec.RowBinary do
alias Clickhousex.{Codec, Codec.Binary, Type}
@behaviour Codec
@impl Codec
def response_format do
"RowBinaryWithNamesAndTypes"
end
@impl Codec
def request_format do
"Values"
end
@impl Codec
def encode(query, replacements, params) do
params =
Enum.map(params, fn
%DateTime{} = dt -> DateTime.to_unix(dt)
other -> other
end)
Clickhousex.Codec.Values.encode(query, replacements, params)
end
@impl Codec
def decode(response) when is_binary(response) do
{:ok, column_count, rest} = Binary.decode_varint(response)
decode_metadata(rest, column_count)
end
@spec decode_metadata(binary, integer) :: {:ok, map}
defp decode_metadata(bytes, column_count) when is_binary(bytes) and is_integer(column_count) do
{:ok, column_names, rest} = decode_column_names(bytes, column_count, [])
{:ok, column_types, rest} = decode_column_types(rest, column_count, [])
{:ok, rows} = decode_rows(rest, column_types, [])
{:ok, %{column_names: column_names, column_types: column_types, rows: rows}}
end
@spec decode_column_names(binary, column_count :: integer, [String.t()]) :: {:ok, [String.t()]}
defp decode_column_names(bytes, 0, names) when is_binary(bytes) and is_list(names) do
{:ok, Enum.reverse(names), bytes}
end
defp decode_column_names(bytes, column_count, names)
when is_binary(bytes) and is_integer(column_count) and is_list(names) do
{:ok, column_name, rest} = Binary.decode(bytes, %Type.String{})
decode_column_names(rest, column_count - 1, [column_name | names])
end
@spec decode_column_types(binary, column_count :: integer, [String.t()]) :: {:ok, [String.t()]}
defp decode_column_types(bytes, 0, types) when is_binary(bytes) and is_list(types) do
{:ok, Enum.reverse(types), bytes}
end
defp decode_column_types(bytes, column_count, types)
when is_binary(bytes) and is_integer(column_count) and is_list(types) do
{:ok, column_type, rest} = Binary.decode(bytes, %Type.String{})
decode_column_types(rest, column_count - 1, [Type.parse(column_type) | types])
end
@spec decode_rows(binary, [Type.t()], [tuple]) :: {:ok, [tuple]}
defp decode_rows(<<>>, _, rows) do
{:ok, Enum.reverse(rows)}
end
defp decode_rows(bytes, types, rows)
when is_binary(bytes) and is_list(types) and is_list(rows) do
{:ok, row, rest} = decode_row(bytes, types, [])
decode_rows(rest, types, [row | rows])
end
@spec decode_row(binary, [Type.t()], [term]) :: {:ok, tuple, binary}
defp decode_row(bytes, [], row) when is_binary(bytes) and is_list(row) do
row_tuple =
row
|> Enum.reverse()
|> List.to_tuple()
{:ok, row_tuple, bytes}
end
defp decode_row(bytes, [type | types], row) do
{:ok, value, rest} = Binary.decode(bytes, type)
decode_row(rest, types, [value | row])
end
end
|
lib/clickhousex/codec/row_binary.ex
| 0.861101 | 0.407982 |
row_binary.ex
|
starcoder
|
defmodule QMI do
@moduledoc """
Qualcomm MSM Interface in Elixir
This module lets you send and receive messages from a QMI-enabled cellular
modem.
To use it, start a `QMI.Supervisor` in the supervision tree of your choosing
and pass it a name and interface. After that, use the service modules to send
it messages. For example:
```elixir
# In your application's supervision tree
children = [
#... other children ...
{QMI.Supervisor, ifname: "wwan0", name: MyApp.QMI}
#... other children ...
]
# Later on
iex> QMI.WirelessData.start_network_interface(MyApp.QMI, apn: "super")
:ok
iex> QMI.NetworkAccess.get_signal_strength(MyApp.QMI)
{:ok, %{rssi_reports: [%{radio: :lte, rssi: -74}]}}
```
"""
@typedoc """
The name passed to QMI.Supervisor
"""
@type name() :: atom()
@typedoc """
Structure that contains information about how to handle a QMI service message
* `:service_id` - which service this request is for
* `:payload` - iodata of the message being sent
* `:decode` - a function that will be used to decode the incoming response
"""
@type request() :: %{
service_id: non_neg_integer(),
payload: iodata(),
decode: (binary() -> :ok | {:ok, any()} | {:error, atom()})
}
@typedoc """
A function that is ran when QMI receives an indication
This function receives a map as the indication and returns `:ok`.
"""
@type indication_callback_fun() :: (map() -> :ok)
@doc """
Configure the framing when using linux
This should be called once the device appears and before an attempt is made to
connect to the network.
"""
@spec configure_linux(String.t()) :: :ok
def configure_linux(ifname) do
# This might not be true for all modems as some support 802.3 IP framing,
# however, on the EC25 supports raw IP framing. This feature can be detected
# and is probably a better solution that just forcing the raw IP framing.
File.write!("/sys/class/net/#{ifname}/qmi/raw_ip", "Y")
end
@doc """
Send a request over QMI and return the response
NOTE: the QMI name parameter is second to facilitate piping
"""
@spec call(request(), name()) :: :ok | {:ok, any()} | {:error, atom()}
def call(request, qmi) do
with {:ok, client_id} <- QMI.ClientIDCache.get_client_id(qmi, request.service_id) do
QMI.Driver.call(qmi, client_id, request)
end
end
end
|
lib/qmi.ex
| 0.763484 | 0.77535 |
qmi.ex
|
starcoder
|
defmodule Enums do
use Koans
@intro "Enums"
koan "Knowing how many elements are in a list is important for book-keeping" do
assert Enum.count([1, 2, 3]) == 3
end
koan "Depending on the type, it counts pairs" do
assert Enum.count(%{a: :foo, b: :bar}) == 2
end
def less_than_five?(n), do: n < 5
koan "Elements can have a lot in common" do
assert Enum.all?([1, 2, 3], &less_than_five?/1) == true
assert Enum.all?([4, 6, 8], &less_than_five?/1) == false
end
def even?(n), do: rem(n, 2) == 0
koan "Sometimes you just want to know if there are any elements fulfilling a condition" do
assert Enum.any?([1, 2, 3], &even?/1) == true
assert Enum.any?([1, 3, 5], &even?/1) == false
end
koan "Sometimes you just want to know if an element is part of the party" do
input = [1, 2, 3]
assert Enum.member?(input, 1) == true
assert Enum.member?(input, 30) == false
end
def multiply_by_ten(n), do: 10 * n
koan "Map converts each element of a list by running some function with it" do
assert Enum.map([1, 2, 3], &multiply_by_ten/1) == [10,20,30]
end
def odd?(n), do: rem(n, 2) == 1
koan "Filter allows you to only keep what you really care about" do
assert Enum.filter([1, 2, 3], &odd?/1) == [1,3]
end
koan "Reject will help you throw out unwanted cruft" do
assert Enum.reject([1, 2, 3], &odd?/1) == [2]
end
koan "You three there, follow me!" do
assert Enum.take([1, 2, 3, 4, 5], 3) == [1,2,3]
end
koan "You can ask for a lot, but Enum won't hand you more than you give" do
assert Enum.take([1, 2, 3, 4, 5], 10) == [1,2,3,4,5]
end
koan "Just like taking, you can also drop elements" do
assert Enum.drop([-1, 0, 1, 2, 3], 2) == [1,2,3]
end
koan "Zip-up in pairs!" do
letters = [:a, :b, :c]
numbers = [1, 2, 3]
assert Enum.zip(letters, numbers) == [a: 1, b: 2, c: 3]
end
koan "When you want to find that one pesky element" do
assert Enum.find([1, 2, 3, 4], &even?/1) == 2
end
def divisible_by_five?(n), do: rem(n, 5) == 0
koan "...but you don't quite find it..." do
assert Enum.find([1, 2, 3], &divisible_by_five?/1) == nil
end
koan "...you can settle for a consolation prize" do
assert Enum.find([1, 2, 3], :no_such_element, &divisible_by_five?/1) == :no_such_element
end
koan "Collapse an entire list of elements down to a single one by repeating a function." do
assert Enum.reduce([1, 2, 3], 0, fn(element, accumulator) -> element + accumulator end) == 6
end
end
|
lib/koans/14_enums.ex
| 0.760562 | 0.728 |
14_enums.ex
|
starcoder
|
defmodule List do
@moduledoc """
Functions that work on (linked) lists.
Many of the functions provided for lists, which implement
the `Enumerable` protocol, are found in the `Enum` module.
Additionally, the following functions and operators for lists are
found in `Kernel`:
* `++/2`
* `--/2`
* `hd/1`
* `tl/1`
* `in/2`
* `length/1`
Lists in Elixir are specified between square brackets:
iex> [1, "two", 3, :four]
[1, "two", 3, :four]
Two lists can be concatenated and subtracted using the
`Kernel.++/2` and `Kernel.--/2` operators:
iex> [1, 2, 3] ++ [4, 5, 6]
[1, 2, 3, 4, 5, 6]
iex> [1, true, 2, false, 3, true] -- [true, false]
[1, 2, 3, true]
Lists in Elixir are effectively linked lists, which means
they are internally represented in pairs containing the
head and the tail of a list:
iex> [head | tail] = [1, 2, 3]
iex> head
1
iex> tail
[2, 3]
Similarly, we could write the list `[1, 2, 3]` using only
such pairs (called cons cells):
iex> [1 | [2 | [3 | []]]]
[1, 2, 3]
Some lists, called improper lists, do not have an empty list as
the second element in the last cons cell:
iex> [1 | [2 | [3 | 4]]]
[1, 2, 3 | 4]
Although improper lists are generally avoided, they are used in some
special circumstances like iodata and chardata entities (see the `IO` module).
Due to their cons cell based representation, prepending an element
to a list is always fast (constant time), while appending becomes
slower as the list grows in size (linear time):
iex> list = [1, 2, 3]
iex> [0 | list] # fast
[0, 1, 2, 3]
iex> list ++ [4] # slow
[1, 2, 3, 4]
Most of the functions in this module work in linear time. This means that,
that the time it takes to perform an operation grows at the same rate as the
length of the list. For example `length/1` and `last/1` will run in linear
time because they need to iterate through every element of the list, but
`first/1` will run in constant time because it only needs the first element.
## Charlists
If a list is made of non-negative integers, where each integer represents a
Unicode code point, the list can also be called a charlist. These integers
must:
* be within the range `0..0x10FFFF` (`0..1_114_111`);
* and be out of the range `0xD800..0xDFFF` (`55_296..57_343`), which is
reserved in Unicode for UTF-16 surrogate pairs.
Elixir uses single quotes to define charlists:
iex> 'héllo'
[104, 233, 108, 108, 111]
In particular, charlists will be printed back by default in single
quotes if they contain only printable ASCII characters:
iex> 'abc'
'abc'
The rationale behind this behaviour is to better support
Erlang libraries which may return text as charlists
instead of Elixir strings. One example of such functions
is `Application.loaded_applications/0`:
Application.loaded_applications()
#=> [
#=> {:stdlib, 'ERTS CXC 138 10', '2.6'},
#=> {:compiler, 'ERTS CXC 138 10', '6.0.1'},
#=> {:elixir, 'elixir', '1.0.0'},
#=> {:kernel, 'ERTS CXC 138 10', '4.1'},
#=> {:logger, 'logger', '1.0.0'}
#=> ]
A list can be checked if it is made of only printable ASCII
characters with `ascii_printable?/2`.
Improper lists are never deemed as charlists.
"""
@compile :inline_list_funcs
@doc """
Deletes the given `element` from the `list`. Returns a new list without
the element.
If the `element` occurs more than once in the `list`, just
the first occurrence is removed.
## Examples
iex> List.delete([:a, :b, :c], :a)
[:b, :c]
iex> List.delete([:a, :b, :b, :c], :b)
[:a, :b, :c]
"""
@spec delete(list, any) :: list
def delete(list, element)
def delete([element | list], element), do: list
def delete([other | list], element), do: [other | delete(list, element)]
def delete([], _element), do: []
@doc """
Duplicates the given element `n` times in a list.
## Examples
iex> List.duplicate("hello", 3)
["hello", "hello", "hello"]
iex> List.duplicate([1, 2], 2)
[[1, 2], [1, 2]]
"""
@spec duplicate(elem, non_neg_integer) :: [elem] when elem: var
def duplicate(elem, n) do
:lists.duplicate(n, elem)
end
@doc """
Flattens the given `list` of nested lists.
## Examples
iex> List.flatten([1, [[2], 3]])
[1, 2, 3]
"""
@spec flatten(deep_list) :: list when deep_list: [any | deep_list]
def flatten(list) do
:lists.flatten(list)
end
@doc """
Flattens the given `list` of nested lists.
The list `tail` will be added at the end of
the flattened list.
## Examples
iex> List.flatten([1, [[2], 3]], [4, 5])
[1, 2, 3, 4, 5]
"""
@spec flatten(deep_list, [elem]) :: [elem] when elem: var, deep_list: [elem | deep_list]
def flatten(list, tail) do
:lists.flatten(list, tail)
end
@doc """
Folds (reduces) the given list from the left with
a function. Requires an accumulator.
## Examples
iex> List.foldl([5, 5], 10, fn x, acc -> x + acc end)
20
iex> List.foldl([1, 2, 3, 4], 0, fn x, acc -> x - acc end)
2
"""
@spec foldl([elem], acc, (elem, acc -> acc)) :: acc when elem: var, acc: var
def foldl(list, acc, fun) when is_list(list) and is_function(fun) do
:lists.foldl(fun, acc, list)
end
@doc """
Folds (reduces) the given list from the right with
a function. Requires an accumulator.
## Examples
iex> List.foldr([1, 2, 3, 4], 0, fn x, acc -> x - acc end)
-2
"""
@spec foldr([elem], acc, (elem, acc -> acc)) :: acc when elem: var, acc: var
def foldr(list, acc, fun) when is_list(list) and is_function(fun) do
:lists.foldr(fun, acc, list)
end
@doc """
Returns the first element in `list` or `nil` if `list` is empty.
## Examples
iex> List.first([])
nil
iex> List.first([1])
1
iex> List.first([1, 2, 3])
1
"""
@spec first([]) :: nil
@spec first([elem, ...]) :: elem when elem: var
def first([]), do: nil
def first([head | _]), do: head
@doc """
Returns the last element in `list` or `nil` if `list` is empty.
## Examples
iex> List.last([])
nil
iex> List.last([1])
1
iex> List.last([1, 2, 3])
3
"""
@spec last([]) :: nil
@spec last([elem, ...]) :: elem when elem: var
def last([]), do: nil
def last([head]), do: head
def last([_ | tail]), do: last(tail)
@doc """
Receives a list of tuples and returns the first tuple
where the element at `position` in the tuple matches the
given `key`.
## Examples
iex> List.keyfind([a: 1, b: 2], :a, 0)
{:a, 1}
iex> List.keyfind([a: 1, b: 2], 2, 1)
{:b, 2}
iex> List.keyfind([a: 1, b: 2], :c, 0)
nil
"""
@spec keyfind([tuple], any, non_neg_integer, any) :: any
def keyfind(list, key, position, default \\ nil) do
:lists.keyfind(key, position + 1, list) || default
end
@doc """
Receives a list of tuples and returns `true` if there is
a tuple where the element at `position` in the tuple matches
the given `key`.
## Examples
iex> List.keymember?([a: 1, b: 2], :a, 0)
true
iex> List.keymember?([a: 1, b: 2], 2, 1)
true
iex> List.keymember?([a: 1, b: 2], :c, 0)
false
"""
@spec keymember?([tuple], any, non_neg_integer) :: boolean
def keymember?(list, key, position) do
:lists.keymember(key, position + 1, list)
end
@doc """
Receives a list of tuples and if the identified element by `key` at `position`
exists, it is replaced with `new_tuple`.
## Examples
iex> List.keyreplace([a: 1, b: 2], :a, 0, {:a, 3})
[a: 3, b: 2]
iex> List.keyreplace([a: 1, b: 2], :a, 1, {:a, 3})
[a: 1, b: 2]
"""
@spec keyreplace([tuple], any, non_neg_integer, tuple) :: [tuple]
def keyreplace(list, key, position, new_tuple) do
:lists.keyreplace(key, position + 1, list, new_tuple)
end
@doc """
Receives a list of tuples and sorts the elements
at `position` of the tuples. The sort is stable.
## Examples
iex> List.keysort([a: 5, b: 1, c: 3], 1)
[b: 1, c: 3, a: 5]
iex> List.keysort([a: 5, c: 1, b: 3], 0)
[a: 5, b: 3, c: 1]
"""
@spec keysort([tuple], non_neg_integer) :: [tuple]
def keysort(list, position) do
:lists.keysort(position + 1, list)
end
@doc """
Receives a `list` of tuples and replaces the element
identified by `key` at `position` with `new_tuple`.
If the element does not exist, it is added to the end of the `list`.
## Examples
iex> List.keystore([a: 1, b: 2], :a, 0, {:a, 3})
[a: 3, b: 2]
iex> List.keystore([a: 1, b: 2], :c, 0, {:c, 3})
[a: 1, b: 2, c: 3]
"""
@spec keystore([tuple], any, non_neg_integer, tuple) :: [tuple, ...]
def keystore(list, key, position, new_tuple) do
:lists.keystore(key, position + 1, list, new_tuple)
end
@doc """
Receives a `list` of tuples and deletes the first tuple
where the element at `position` matches the
given `key`. Returns the new list.
## Examples
iex> List.keydelete([a: 1, b: 2], :a, 0)
[b: 2]
iex> List.keydelete([a: 1, b: 2], 2, 1)
[a: 1]
iex> List.keydelete([a: 1, b: 2], :c, 0)
[a: 1, b: 2]
"""
@spec keydelete([tuple], any, non_neg_integer) :: [tuple]
def keydelete(list, key, position) do
:lists.keydelete(key, position + 1, list)
end
@doc """
Receives a `list` of tuples and returns the first tuple
where the element at `position` in the tuple matches the
given `key`, as well as the `list` without found tuple.
If such a tuple is not found, `nil` will be returned.
## Examples
iex> List.keytake([a: 1, b: 2], :a, 0)
{{:a, 1}, [b: 2]}
iex> List.keytake([a: 1, b: 2], 2, 1)
{{:b, 2}, [a: 1]}
iex> List.keytake([a: 1, b: 2], :c, 0)
nil
"""
@spec keytake([tuple], any, non_neg_integer) :: {tuple, [tuple]} | nil
def keytake(list, key, position) do
case :lists.keytake(key, position + 1, list) do
{:value, element, list} -> {element, list}
false -> nil
end
end
@doc """
Wraps `term` in a list if this is not list.
If `term` is already a list, it returns the list.
If `term` is `nil`, it returns an empty list.
## Examples
iex> List.wrap("hello")
["hello"]
iex> List.wrap([1, 2, 3])
[1, 2, 3]
iex> List.wrap(nil)
[]
"""
@spec wrap(nil) :: []
@spec wrap(list) :: list when list: maybe_improper_list()
@spec wrap(term) :: nonempty_list(term) when term: any()
def wrap(term)
def wrap(list) when is_list(list) do
list
end
def wrap(nil) do
[]
end
def wrap(other) do
[other]
end
@doc """
Zips corresponding elements from each list in `list_of_lists`.
The zipping finishes as soon as any list terminates.
## Examples
iex> List.zip([[1, 2], [3, 4], [5, 6]])
[{1, 3, 5}, {2, 4, 6}]
iex> List.zip([[1, 2], [3], [5, 6]])
[{1, 3, 5}]
"""
@spec zip([list]) :: [tuple]
def zip([]), do: []
def zip(list_of_lists) when is_list(list_of_lists) do
do_zip(list_of_lists, [])
end
@doc ~S"""
Checks if `list` is a charlist made only of printable ASCII characters.
Takes an optional `limit` as a second argument. `ascii_printable?/2` only
checks the printability of the list up to the `limit`.
A printable charlist in Elixir contains only the printable characters in the
standard seven-bit ASCII character encoding, which are characters ranging from
32 to 126 in decimal notation, plus the following control characters:
* `?\a` - Bell
* `?\b` - Backspace
* `?\t` - Horizontal tab
* `?\n` - Line feed
* `?\v` - Vertical tab
* `?\f` - Form feed
* `?\r` - Carriage return
* `?\e` - Escape
For more information read the [Character groups](https://en.wikipedia.org/wiki/ASCII#Character_groups)
section in the Wikipedia article of the [ASCII](https://en.wikipedia.org/wiki/ASCII) standard.
## Examples
iex> List.ascii_printable?('abc')
true
iex> List.ascii_printable?('abc' ++ [0])
false
iex> List.ascii_printable?('abc' ++ [0], 2)
true
Improper lists are not printable, even if made only of ASCII characters:
iex> List.ascii_printable?('abc' ++ ?d)
false
"""
@doc since: "1.6.0"
@spec ascii_printable?(list, 0) :: true
@spec ascii_printable?([], limit) :: true
when limit: :infinity | pos_integer
@spec ascii_printable?([...], limit) :: boolean
when limit: :infinity | pos_integer
def ascii_printable?(list, limit \\ :infinity)
when is_list(list) and (limit == :infinity or (is_integer(limit) and limit >= 0)) do
ascii_printable_guarded?(list, limit)
end
defp ascii_printable_guarded?(_, 0) do
true
end
defp ascii_printable_guarded?([char | rest], counter)
# 7..13 is the range '\a\b\t\n\v\f\r'. 32..126 are ASCII printables.
when is_integer(char) and
((char >= 7 and char <= 13) or char == ?\e or (char >= 32 and char <= 126)) do
ascii_printable_guarded?(rest, decrement(counter))
end
defp ascii_printable_guarded?([], _counter), do: true
defp ascii_printable_guarded?(_, _counter), do: false
@compile {:inline, decrement: 1}
defp decrement(:infinity), do: :infinity
defp decrement(counter), do: counter - 1
@doc """
Returns `true` if `list` is an improper list. Otherwise returns `false`.
## Examples
iex> List.improper?([1, 2 | 3])
true
iex> List.improper?([1, 2, 3])
false
"""
@doc since: "1.8.0"
@spec improper?(maybe_improper_list) :: boolean
def improper?(list) when is_list(list) and length(list) >= 0, do: false
def improper?(list) when is_list(list), do: true
@doc """
Returns a list with `value` inserted at the specified `index`.
Note that `index` is capped at the list length. Negative indices
indicate an offset from the end of the `list`.
## Examples
iex> List.insert_at([1, 2, 3, 4], 2, 0)
[1, 2, 0, 3, 4]
iex> List.insert_at([1, 2, 3], 10, 0)
[1, 2, 3, 0]
iex> List.insert_at([1, 2, 3], -1, 0)
[1, 2, 3, 0]
iex> List.insert_at([1, 2, 3], -10, 0)
[0, 1, 2, 3]
"""
@spec insert_at(list, integer, any) :: list
def insert_at(list, index, value) when is_list(list) and is_integer(index) do
case index do
-1 ->
list ++ [value]
_ when index < 0 ->
case length(list) + index + 1 do
index when index < 0 -> [value | list]
index -> do_insert_at(list, index, value)
end
_ ->
do_insert_at(list, index, value)
end
end
@doc """
Returns a list with a replaced value at the specified `index`.
Negative indices indicate an offset from the end of the `list`.
If `index` is out of bounds, the original `list` is returned.
## Examples
iex> List.replace_at([1, 2, 3], 0, 0)
[0, 2, 3]
iex> List.replace_at([1, 2, 3], 10, 0)
[1, 2, 3]
iex> List.replace_at([1, 2, 3], -1, 0)
[1, 2, 0]
iex> List.replace_at([1, 2, 3], -10, 0)
[1, 2, 3]
"""
@spec replace_at(list, integer, any) :: list
def replace_at(list, index, value) when is_list(list) and is_integer(index) do
if index < 0 do
case length(list) + index do
index when index < 0 -> list
index -> do_replace_at(list, index, value)
end
else
do_replace_at(list, index, value)
end
end
@doc """
Returns a list with an updated value at the specified `index`.
Negative indices indicate an offset from the end of the `list`.
If `index` is out of bounds, the original `list` is returned.
## Examples
iex> List.update_at([1, 2, 3], 0, &(&1 + 10))
[11, 2, 3]
iex> List.update_at([1, 2, 3], 10, &(&1 + 10))
[1, 2, 3]
iex> List.update_at([1, 2, 3], -1, &(&1 + 10))
[1, 2, 13]
iex> List.update_at([1, 2, 3], -10, &(&1 + 10))
[1, 2, 3]
"""
@spec update_at([elem], integer, (elem -> any)) :: list when elem: var
def update_at(list, index, fun) when is_list(list) and is_function(fun) and is_integer(index) do
if index < 0 do
case length(list) + index do
index when index < 0 -> list
index -> do_update_at(list, index, fun)
end
else
do_update_at(list, index, fun)
end
end
@doc """
Produces a new list by removing the value at the specified `index`.
Negative indices indicate an offset from the end of the `list`.
If `index` is out of bounds, the original `list` is returned.
## Examples
iex> List.delete_at([1, 2, 3], 0)
[2, 3]
iex> List.delete_at([1, 2, 3], 10)
[1, 2, 3]
iex> List.delete_at([1, 2, 3], -1)
[1, 2]
"""
@spec delete_at(list, integer) :: list
def delete_at(list, index) when is_integer(index) do
elem(pop_at(list, index), 1)
end
@doc """
Returns and removes the value at the specified `index` in the `list`.
Negative indices indicate an offset from the end of the `list`.
If `index` is out of bounds, the original `list` is returned.
## Examples
iex> List.pop_at([1, 2, 3], 0)
{1, [2, 3]}
iex> List.pop_at([1, 2, 3], 5)
{nil, [1, 2, 3]}
iex> List.pop_at([1, 2, 3], 5, 10)
{10, [1, 2, 3]}
iex> List.pop_at([1, 2, 3], -1)
{3, [1, 2]}
"""
@doc since: "1.4.0"
@spec pop_at(list, integer, any) :: {any, list}
def pop_at(list, index, default \\ nil) when is_integer(index) do
if index < 0 do
do_pop_at(list, length(list) + index, default, [])
else
do_pop_at(list, index, default, [])
end
end
@doc """
Returns `true` if `list` starts with the given `prefix` list; otherwise returns `false`.
If `prefix` is an empty list, it returns `true`.
### Examples
iex> List.starts_with?([1, 2, 3], [1, 2])
true
iex> List.starts_with?([1, 2], [1, 2, 3])
false
iex> List.starts_with?([:alpha], [])
true
iex> List.starts_with?([], [:alpha])
false
"""
@doc since: "1.5.0"
@spec starts_with?(list, list) :: boolean
@spec starts_with?(list, []) :: true
@spec starts_with?([], nonempty_list) :: false
def starts_with?(list, prefix)
def starts_with?([head | tail], [head | prefix_tail]), do: starts_with?(tail, prefix_tail)
def starts_with?(list, []) when is_list(list), do: true
def starts_with?(list, [_ | _]) when is_list(list), do: false
@doc """
Converts a charlist to an atom.
Elixir supports conversions from charlists which contains any Unicode
code point.
Inlined by the compiler.
## Examples
iex> List.to_atom('Elixir')
:Elixir
iex> List.to_atom('🌢 Elixir')
:"🌢 Elixir"
"""
@spec to_atom(charlist) :: atom
def to_atom(charlist) do
:erlang.list_to_atom(charlist)
end
@doc """
Converts a charlist to an existing atom. Raises an `ArgumentError`
if the atom does not exist.
Elixir supports conversions from charlists which contains any Unicode
code point.
Inlined by the compiler.
## Examples
iex> _ = :my_atom
iex> List.to_existing_atom('my_atom')
:my_atom
iex> _ = :"🌢 Elixir"
iex> List.to_existing_atom('🌢 Elixir')
:"🌢 Elixir"
iex> List.to_existing_atom('this_atom_will_never_exist')
** (ArgumentError) argument error
"""
@spec to_existing_atom(charlist) :: atom
def to_existing_atom(charlist) do
:erlang.list_to_existing_atom(charlist)
end
@doc """
Returns the float whose text representation is `charlist`.
Inlined by the compiler.
## Examples
iex> List.to_float('2.2017764e+0')
2.2017764
"""
@spec to_float(charlist) :: float
def to_float(charlist) do
:erlang.list_to_float(charlist)
end
@doc """
Returns an integer whose text representation is `charlist`.
Inlined by the compiler.
## Examples
iex> List.to_integer('123')
123
"""
@spec to_integer(charlist) :: integer
def to_integer(charlist) do
:erlang.list_to_integer(charlist)
end
@doc """
Returns an integer whose text representation is `charlist` in base `base`.
Inlined by the compiler.
## Examples
iex> List.to_integer('3FF', 16)
1023
"""
@spec to_integer(charlist, 2..36) :: integer
def to_integer(charlist, base) do
:erlang.list_to_integer(charlist, base)
end
@doc """
Converts a list to a tuple.
Inlined by the compiler.
## Examples
iex> List.to_tuple([:share, [:elixir, 163]])
{:share, [:elixir, 163]}
"""
@spec to_tuple(list) :: tuple
def to_tuple(list) do
:erlang.list_to_tuple(list)
end
@doc """
Converts a list of integers representing code points, lists or
strings into a string.
To be converted to a string, a list must either be empty or only
contain the following elements:
* strings
* integers representing Unicode code points
* a list containing one of these three elements
Notice that this function expects a list of integers representing
UTF-8 code points. If you have a list of bytes, you must instead use
the [`:binary` module](http://www.erlang.org/doc/man/binary.html).
## Examples
iex> List.to_string([0x00E6, 0x00DF])
"æß"
iex> List.to_string([0x0061, "bc"])
"abc"
iex> List.to_string([0x0064, "ee", ['p']])
"deep"
iex> List.to_string([])
""
"""
@spec to_string(:unicode.charlist()) :: String.t()
def to_string(list) when is_list(list) do
try do
:unicode.characters_to_binary(list)
rescue
ArgumentError ->
raise ArgumentError, """
cannot convert the given list to a string.
To be converted to a string, a list must either be empty or only
contain the following elements:
* strings
* integers representing Unicode code points
* a list containing one of these three elements
Please check the given list or call inspect/1 to get the list representation, got:
#{inspect(list)}
"""
else
result when is_binary(result) ->
result
{:error, encoded, rest} ->
raise UnicodeConversionError, encoded: encoded, rest: rest, kind: :invalid
{:incomplete, encoded, rest} ->
raise UnicodeConversionError, encoded: encoded, rest: rest, kind: :incomplete
end
end
@doc """
Converts a list of integers representing code points, lists or
strings into a charlist.
Notice that this function expects a list of integers representing
UTF-8 code points. If you have a list of bytes, you must instead use
the [`:binary` module](http://www.erlang.org/doc/man/binary.html).
## Examples
iex> List.to_charlist([0x00E6, 0x00DF])
'æß'
iex> List.to_charlist([0x0061, "bc"])
'abc'
iex> List.to_charlist([0x0064, "ee", ['p']])
'deep'
"""
@doc since: "1.8.0"
@spec to_charlist(:unicode.charlist()) :: charlist()
def to_charlist(list) when is_list(list) do
try do
:unicode.characters_to_list(list)
rescue
ArgumentError ->
raise ArgumentError, """
cannot convert the given list to a charlist.
To be converted to a charlist, a list must contain only:
* strings
* integers representing Unicode code points
* or a list containing one of these three elements
Please check the given list or call inspect/1 to get the list representation, got:
#{inspect(list)}
"""
else
result when is_list(result) ->
result
{:error, encoded, rest} ->
raise UnicodeConversionError, encoded: encoded, rest: rest, kind: :invalid
{:incomplete, encoded, rest} ->
raise UnicodeConversionError, encoded: encoded, rest: rest, kind: :incomplete
end
end
@doc """
Returns a keyword list that represents an *edit script*.
The algorithm is outlined in the
"An O(ND) Difference Algorithm and Its Variations" paper by <NAME>.
An *edit script* is a keyword list. Each key describes the "editing action" to
take in order to bring `list1` closer to being equal to `list2`; a key can be
`:eq`, `:ins`, or `:del`. Each value is a sublist of either `list1` or `list2`
that should be inserted (if the corresponding key `:ins`), deleted (if the
corresponding key is `:del`), or left alone (if the corresponding key is
`:eq`) in `list1` in order to be closer to `list2`.
See `myers_difference/3` if you want to handle nesting in the diff scripts.
## Examples
iex> List.myers_difference([1, 4, 2, 3], [1, 2, 3, 4])
[eq: [1], del: [4], eq: [2, 3], ins: [4]]
"""
@doc since: "1.4.0"
@spec myers_difference(list, list) :: [{:eq | :ins | :del, list}]
def myers_difference(list1, list2) when is_list(list1) and is_list(list2) do
myers_difference_with_diff_script(list1, list2, nil)
end
@doc """
Returns a keyword list that represents an *edit script* with nested diffs.
This is an extension of `myers_difference/2` where a `diff_script` function
can be given in case it is desired to compute nested differences. The function
may return a list with the inner edit script or `nil` in case there is no
such script. The returned inner edit script will be under the `:diff` key.
## Examples
iex> List.myers_difference(["a", "db", "c"], ["a", "bc"], &String.myers_difference/2)
[eq: ["a"], diff: [del: "d", eq: "b", ins: "c"], del: ["c"]]
"""
@doc since: "1.8.0"
@spec myers_difference(list, list, (term, term -> script | nil)) :: script
when script: [{:eq | :ins | :del | :diff, list}]
def myers_difference(list1, list2, diff_script)
when is_list(list1) and is_list(list2) and is_function(diff_script) do
myers_difference_with_diff_script(list1, list2, diff_script)
end
defp myers_difference_with_diff_script(list1, list2, diff_script) do
path = {0, list1, list2, []}
find_script(0, length(list1) + length(list2), [path], diff_script)
end
defp find_script(envelope, max, paths, diff_script) do
case each_diagonal(-envelope, envelope, paths, [], diff_script) do
{:done, edits} -> compact_reverse(edits, [])
{:next, paths} -> find_script(envelope + 1, max, paths, diff_script)
end
end
defp compact_reverse([], acc), do: acc
defp compact_reverse([{:diff, _} = fragment | rest], acc) do
compact_reverse(rest, [fragment | acc])
end
defp compact_reverse([{kind, elem} | rest], [{kind, result} | acc]) do
compact_reverse(rest, [{kind, [elem | result]} | acc])
end
defp compact_reverse(rest, [{:eq, elem}, {:ins, elem}, {:eq, other} | acc]) do
compact_reverse(rest, [{:ins, elem}, {:eq, elem ++ other} | acc])
end
defp compact_reverse([{kind, elem} | rest], acc) do
compact_reverse(rest, [{kind, [elem]} | acc])
end
defp each_diagonal(diag, limit, _paths, next_paths, _diff_script) when diag > limit do
{:next, :lists.reverse(next_paths)}
end
defp each_diagonal(diag, limit, paths, next_paths, diff_script) do
{path, rest} = proceed_path(diag, limit, paths, diff_script)
case follow_snake(path) do
{:cont, path} -> each_diagonal(diag + 2, limit, rest, [path | next_paths], diff_script)
{:done, edits} -> {:done, edits}
end
end
defp proceed_path(0, 0, [path], _diff_script), do: {path, []}
defp proceed_path(diag, limit, [path | _] = paths, diff_script) when diag == -limit do
{move_down(path, diff_script), paths}
end
defp proceed_path(diag, limit, [path], diff_script) when diag == limit do
{move_right(path, diff_script), []}
end
defp proceed_path(_diag, _limit, [path1, path2 | rest], diff_script) do
if elem(path1, 0) > elem(path2, 0) do
{move_right(path1, diff_script), [path2 | rest]}
else
{move_down(path2, diff_script), [path2 | rest]}
end
end
defp move_right({y, [elem1 | rest1] = list1, [elem2 | rest2], edits}, diff_script)
when diff_script != nil do
if diff = diff_script.(elem1, elem2) do
{y + 1, rest1, rest2, [{:diff, diff} | edits]}
else
{y, list1, rest2, [{:ins, elem2} | edits]}
end
end
defp move_right({y, list1, [elem | rest], edits}, _diff_script) do
{y, list1, rest, [{:ins, elem} | edits]}
end
defp move_right({y, list1, [], edits}, _diff_script) do
{y, list1, [], edits}
end
defp move_down({y, [elem1 | rest1], [elem2 | rest2] = list2, edits}, diff_script)
when diff_script != nil do
if diff = diff_script.(elem1, elem2) do
{y + 1, rest1, rest2, [{:diff, diff} | edits]}
else
{y + 1, rest1, list2, [{:del, elem1} | edits]}
end
end
defp move_down({y, [elem | rest], list2, edits}, _diff_script) do
{y + 1, rest, list2, [{:del, elem} | edits]}
end
defp move_down({y, [], list2, edits}, _diff_script) do
{y + 1, [], list2, edits}
end
defp follow_snake({y, [elem | rest1], [elem | rest2], edits}) do
follow_snake({y + 1, rest1, rest2, [{:eq, elem} | edits]})
end
defp follow_snake({_y, [], [], edits}) do
{:done, edits}
end
defp follow_snake(path) do
{:cont, path}
end
## Helpers
# replace_at
defp do_replace_at([], _index, _value) do
[]
end
defp do_replace_at([_old | rest], 0, value) do
[value | rest]
end
defp do_replace_at([head | tail], index, value) do
[head | do_replace_at(tail, index - 1, value)]
end
# insert_at
defp do_insert_at([], _index, value) do
[value]
end
defp do_insert_at(list, 0, value) do
[value | list]
end
defp do_insert_at([head | tail], index, value) do
[head | do_insert_at(tail, index - 1, value)]
end
# update_at
defp do_update_at([value | list], 0, fun) do
[fun.(value) | list]
end
defp do_update_at([head | tail], index, fun) do
[head | do_update_at(tail, index - 1, fun)]
end
defp do_update_at([], _index, _fun) do
[]
end
# pop_at
defp do_pop_at([], _index, default, acc) do
{default, :lists.reverse(acc)}
end
defp do_pop_at([head | tail], 0, _default, acc) do
{head, :lists.reverse(acc, tail)}
end
defp do_pop_at([head | tail], index, default, acc) do
do_pop_at(tail, index - 1, default, [head | acc])
end
# zip
defp do_zip(list, acc) do
converter = fn x, acc -> do_zip_each(to_list(x), acc) end
case :lists.mapfoldl(converter, [], list) do
{_, nil} ->
:lists.reverse(acc)
{mlist, heads} ->
do_zip(mlist, [to_tuple(:lists.reverse(heads)) | acc])
end
end
defp do_zip_each(_, nil) do
{nil, nil}
end
defp do_zip_each([head | tail], acc) do
{tail, [head | acc]}
end
defp do_zip_each([], _) do
{nil, nil}
end
defp to_list(tuple) when is_tuple(tuple), do: Tuple.to_list(tuple)
defp to_list(list) when is_list(list), do: list
end
|
lib/elixir/lib/list.ex
| 0.887625 | 0.790692 |
list.ex
|
starcoder
|
defmodule Keyword do
@moduledoc """
A set of functions for working with keywords.
A keyword is a list of two-element tuples where the first
element of the tuple is an atom and the second element
can be any value.
A keyword may have duplicated keys so it is not strictly
a key-value store. However most of the functions in this module
behave exactly as a dictionary so they work similarly to
the functions you would find in the `Map` module.
For example, `Keyword.get/3` will get the first entry matching
the given key, regardless if duplicated entries exist.
Similarly, `Keyword.put/3` and `Keyword.delete/3` ensure all
duplicated entries for a given key are removed when invoked.
A handful of functions exist to handle duplicated keys, in
particular, `Enum.into/2` allows creating new keywords without
removing duplicated keys, `get_values/2` returns all values for
a given key and `delete_first/2` deletes just one of the existing
entries.
The functions in Keyword do not guarantee any property when
it comes to ordering. However, since a keyword list is simply a
list, all the operations defined in `Enum` and `List` can be
applied too, specially when ordering is required.
"""
@compile :inline_list_funcs
@type key :: atom
@type value :: any
@type t :: [{key, value}]
@type t(value) :: [{key, value}]
@doc """
Returns `true` if `term` is a keyword list; otherwise returns `false`.
## Examples
iex> Keyword.keyword?([])
true
iex> Keyword.keyword?([a: 1])
true
iex> Keyword.keyword?([{Foo, 1}])
true
iex> Keyword.keyword?([{}])
false
iex> Keyword.keyword?([:key])
false
iex> Keyword.keyword?(%{})
false
"""
@spec keyword?(term) :: boolean
def keyword?(term)
def keyword?([{key, _value} | rest]) when is_atom(key), do: keyword?(rest)
def keyword?([]), do: true
def keyword?(_other), do: false
@doc """
Returns an empty keyword list, i.e. an empty list.
## Examples
iex> Keyword.new()
[]
"""
@spec new :: []
def new, do: []
@doc """
Creates a keyword from an enumerable.
Duplicated entries are removed, the latest one prevails.
Unlike `Enum.into(enumerable, [])`, `Keyword.new(enumerable)`
guarantees the keys are unique.
## Examples
iex> Keyword.new([{:b, 1}, {:a, 2}])
[b: 1, a: 2]
iex> Keyword.new([{:a, 1}, {:a, 2}, {:a, 3}])
[a: 3]
"""
@spec new(Enum.t) :: t
def new(pairs) do
new(pairs, fn pair -> pair end)
end
@doc """
Creates a keyword from an enumerable via the transformation function.
Duplicated entries are removed, the latest one prevails.
Unlike `Enum.into(enumerable, [], fun)`,
`Keyword.new(enumerable, fun)` guarantees the keys are unique.
## Examples
iex> Keyword.new([:a, :b], fn (x) -> {x, x} end)
[a: :a, b: :b]
"""
@spec new(Enum.t, (term -> {key, value})) :: t
def new(pairs, transform) do
fun = fn el, acc ->
{k, v} = transform.(el)
put_new(acc, k, v)
end
:lists.foldl(fun, [], Enum.reverse(pairs))
end
@doc """
Gets the value for a specific `key`.
If `key` does not exist, return the default value
(`nil` if no default value).
If duplicated entries exist, the first one is returned.
Use `get_values/2` to retrieve all entries.
## Examples
iex> Keyword.get([], :a)
nil
iex> Keyword.get([a: 1], :a)
1
iex> Keyword.get([a: 1], :b)
nil
iex> Keyword.get([a: 1], :b, 3)
3
With duplicated keys:
iex> Keyword.get([a: 1, a: 2], :a, 3)
1
iex> Keyword.get([a: 1, a: 2], :b, 3)
3
"""
@spec get(t, key) :: value
@spec get(t, key, value) :: value
def get(keywords, key, default \\ nil) when is_list(keywords) and is_atom(key) do
case :lists.keyfind(key, 1, keywords) do
{^key, value} -> value
false -> default
end
end
@doc """
Gets the value for a specific `key`.
If `key` does not exist, lazily evaluates `fun` and returns its result.
This is useful if the default value is very expensive to calculate or
generally difficult to setup and teardown again.
If duplicated entries exist, the first one is returned.
Use `get_values/2` to retrieve all entries.
## Examples
iex> keyword = [a: 1]
iex> fun = fn ->
...> # some expensive operation here
...> 13
...> end
iex> Keyword.get_lazy(keyword, :a, fun)
1
iex> Keyword.get_lazy(keyword, :b, fun)
13
"""
@spec get_lazy(t, key, (() -> value)) :: value
def get_lazy(keywords, key, fun)
when is_list(keywords) and is_atom(key) and is_function(fun, 0) do
case :lists.keyfind(key, 1, keywords) do
{^key, value} -> value
false -> fun.()
end
end
@doc """
Gets the value from `key` and updates it, all in one pass.
This `fun` argument receives the value of `key` (or `nil` if `key`
is not present) and must return a two-element tuple: the "get" value
(the retrieved value, which can be operated on before being returned)
and the new value to be stored under `key`. The `fun` may also
return `:pop`, implying the current value shall be removed from the
keyword list and returned.
The returned value is a tuple with the "get" value returned by
`fun` and a new keyword list with the updated value under `key`.
## Examples
iex> Keyword.get_and_update([a: 1], :a, fn current_value ->
...> {current_value, "new value!"}
...> end)
{1, [a: "new value!"]}
iex> Keyword.get_and_update([a: 1], :b, fn current_value ->
...> {current_value, "new value!"}
...> end)
{nil, [b: "new value!", a: 1]}
iex> Keyword.get_and_update([a: 1], :a, fn _ -> :pop end)
{1, []}
iex> Keyword.get_and_update([a: 1], :b, fn _ -> :pop end)
{nil, [a: 1]}
"""
@spec get_and_update(t, key, (value -> {get, value} | :pop)) :: {get, t} when get: term
def get_and_update(keywords, key, fun)
when is_list(keywords) and is_atom(key),
do: get_and_update(keywords, [], key, fun)
defp get_and_update([{key, current}|t], acc, key, fun) do
case fun.(current) do
{get, value} -> {get, :lists.reverse(acc, [{key, value}|t])}
:pop -> {current, :lists.reverse(acc, t)}
end
end
defp get_and_update([h|t], acc, key, fun),
do: get_and_update(t, [h|acc], key, fun)
defp get_and_update([], acc, key, fun) do
case fun.(nil) do
{get, update} -> {get, [{key, update}|:lists.reverse(acc)]}
:pop -> {nil, :lists.reverse(acc)}
end
end
@doc """
Gets the value from `key` and updates it. Raises if there is no `key`.
This `fun` argument receives the value of `key` and must return a
two-element tuple: the "get" value (the retrieved value, which can be
operated on before being returned) and the new value to be stored under
`key`.
The returned value is a tuple with the "get" value returned by `fun` and a new
keyword list with the updated value under `key`.
## Examples
iex> Keyword.get_and_update!([a: 1], :a, fn(current_value) ->
...> {current_value, "new value!"}
...> end)
{1, [a: "new value!"]}
iex> Keyword.get_and_update!([a: 1], :b, fn current_value ->
...> {current_value, "new value!"}
...> end)
** (KeyError) key :b not found in: [a: 1]
"""
@spec get_and_update!(t, key, (value -> {get, value})) :: {get, t} | no_return when get: term
def get_and_update!(keywords, key, fun) do
get_and_update!(keywords, key, fun, [])
end
defp get_and_update!([{key, value}|keywords], key, fun, acc) do
{get, value} = fun.(value)
{get, :lists.reverse(acc, [{key, value}|delete(keywords, key)])}
end
defp get_and_update!([{_, _} = e|keywords], key, fun, acc) do
get_and_update!(keywords, key, fun, [e|acc])
end
defp get_and_update!([], key, _fun, acc) when is_atom(key) do
raise(KeyError, key: key, term: acc)
end
@doc """
Fetches the value for a specific `key` and returns it in a tuple.
If the `key` does not exist, returns `:error`.
## Examples
iex> Keyword.fetch([a: 1], :a)
{:ok, 1}
iex> Keyword.fetch([a: 1], :b)
:error
"""
@spec fetch(t, key) :: {:ok, value} | :error
def fetch(keywords, key) when is_list(keywords) and is_atom(key) do
case :lists.keyfind(key, 1, keywords) do
{^key, value} -> {:ok, value}
false -> :error
end
end
@doc """
Fetches the value for specific `key`.
If `key` does not exist, a `KeyError` is raised.
## Examples
iex> Keyword.fetch!([a: 1], :a)
1
iex> Keyword.fetch!([a: 1], :b)
** (KeyError) key :b not found in: [a: 1]
"""
@spec fetch!(t, key) :: value | no_return
def fetch!(keywords, key) when is_list(keywords) and is_atom(key) do
case :lists.keyfind(key, 1, keywords) do
{^key, value} -> value
false -> raise(KeyError, key: key, term: keywords)
end
end
@doc """
Gets all values for a specific `key`.
## Examples
iex> Keyword.get_values([], :a)
[]
iex> Keyword.get_values([a: 1], :a)
[1]
iex> Keyword.get_values([a: 1, a: 2], :a)
[1, 2]
"""
@spec get_values(t, key) :: [value]
def get_values(keywords, key) when is_list(keywords) and is_atom(key) do
fun = fn
{k, v} when k === key -> {true, v}
{_, _} -> false
end
:lists.filtermap(fun, keywords)
end
@doc """
Returns all keys from the keyword list.
Duplicated keys appear duplicated in the final list of keys.
## Examples
iex> Keyword.keys([a: 1, b: 2])
[:a, :b]
iex> Keyword.keys([a: 1, b: 2, a: 3])
[:a, :b, :a]
"""
@spec keys(t) :: [key]
def keys(keywords) when is_list(keywords) do
:lists.map(fn {k, _} -> k end, keywords)
end
@doc """
Returns all values from the keyword list.
Values from duplicated keys will be kept in the final list of values.
## Examples
iex> Keyword.values([a: 1, b: 2])
[1, 2]
iex> Keyword.values([a: 1, b: 2, a: 3])
[1, 2, 3]
"""
@spec values(t) :: [value]
def values(keywords) when is_list(keywords) do
:lists.map(fn {_, v} -> v end, keywords)
end
@doc """
Deletes the entries in the keyword list for a `key` with `value`.
If no `key` with `value` exists, returns the keyword list unchanged.
## Examples
iex> Keyword.delete([a: 1, b: 2], :a, 1)
[b: 2]
iex> Keyword.delete([a: 1, b: 2, a: 3], :a, 3)
[a: 1, b: 2]
iex> Keyword.delete([a: 1], :a, 5)
[a: 1]
iex> Keyword.delete([a: 1], :b, 5)
[a: 1]
"""
@spec delete(t, key, value) :: t
def delete(keywords, key, value) when is_list(keywords) and is_atom(key) do
:lists.filter(fn {k, v} -> k != key or v != value end, keywords)
end
@doc """
Deletes the entries in the keyword list for a specific `key`.
If the `key` does not exist, returns the keyword list unchanged.
Use `delete_first/2` to delete just the first entry in case of
duplicated keys.
## Examples
iex> Keyword.delete([a: 1, b: 2], :a)
[b: 2]
iex> Keyword.delete([a: 1, b: 2, a: 3], :a)
[b: 2]
iex> Keyword.delete([b: 2], :a)
[b: 2]
"""
@spec delete(t, key) :: t
def delete(keywords, key) when is_list(keywords) and is_atom(key) do
:lists.filter(fn {k, _} -> k != key end, keywords)
end
@doc """
Deletes the first entry in the keyword list for a specific `key`.
If the `key` does not exist, returns the keyword list unchanged.
## Examples
iex> Keyword.delete_first([a: 1, b: 2, a: 3], :a)
[b: 2, a: 3]
iex> Keyword.delete_first([b: 2], :a)
[b: 2]
"""
@spec delete_first(t, key) :: t
def delete_first(keywords, key) when is_list(keywords) and is_atom(key) do
:lists.keydelete(key, 1, keywords)
end
@doc """
Puts the given `value` under `key`.
If a previous value is already stored, all entries are
removed and the value is overridden.
## Examples
iex> Keyword.put([a: 1], :b, 2)
[b: 2, a: 1]
iex> Keyword.put([a: 1, b: 2], :a, 3)
[a: 3, b: 2]
iex> Keyword.put([a: 1, b: 2, a: 4], :a, 3)
[a: 3, b: 2]
"""
@spec put(t, key, value) :: t
def put(keywords, key, value) when is_list(keywords) and is_atom(key) do
[{key, value}|delete(keywords, key)]
end
@doc """
Evaluates `fun` and puts the result under `key`
in keyword list unless `key` is already present.
This is useful if the value is very expensive to calculate or
generally difficult to setup and teardown again.
## Examples
iex> keyword = [a: 1]
iex> fun = fn ->
...> # some expensive operation here
...> 3
...> end
iex> Keyword.put_new_lazy(keyword, :a, fun)
[a: 1]
iex> Keyword.put_new_lazy(keyword, :b, fun)
[b: 3, a: 1]
"""
@spec put_new_lazy(t, key, (() -> value)) :: t
def put_new_lazy(keywords, key, fun)
when is_list(keywords) and is_atom(key) and is_function(fun, 0) do
case :lists.keyfind(key, 1, keywords) do
{^key, _} -> keywords
false -> [{key, fun.()}|keywords]
end
end
@doc """
Puts the given `value` under `key` unless the entry `key`
already exists.
## Examples
iex> Keyword.put_new([a: 1], :b, 2)
[b: 2, a: 1]
iex> Keyword.put_new([a: 1, b: 2], :a, 3)
[a: 1, b: 2]
"""
@spec put_new(t, key, value) :: t
def put_new(keywords, key, value) when is_list(keywords) and is_atom(key) do
case :lists.keyfind(key, 1, keywords) do
{^key, _} -> keywords
false -> [{key, value}|keywords]
end
end
@doc """
Checks if two keywords are equal.
Two keywords are considered to be equal if they contain
the same keys and those keys contain the same values.
## Examples
iex> Keyword.equal?([a: 1, b: 2], [b: 2, a: 1])
true
iex> Keyword.equal?([a: 1, b: 2], [b: 1, a: 2])
false
iex> Keyword.equal?([a: 1, b: 2, a: 3], [b: 2, a: 3, a: 1])
true
"""
@spec equal?(t, t) :: boolean
def equal?(left, right) when is_list(left) and is_list(right) do
:lists.sort(left) == :lists.sort(right)
end
@doc """
Merges two keyword lists into one.
All keys, including duplicated keys, given in `keywords2` will be added
to `keywords1`, overriding any existing one.
There are no guarantees about the order of keys in the returned keyword.
## Examples
iex> Keyword.merge([a: 1, b: 2], [a: 3, d: 4])
[b: 2, a: 3, d: 4]
iex> Keyword.merge([a: 1, b: 2], [a: 3, d: 4, a: 5])
[b: 2, a: 3, d: 4, a: 5]
"""
@spec merge(t, t) :: t
def merge(keywords1, keywords2) when is_list(keywords1) and is_list(keywords2) do
fun = fn {k, _v} -> not has_key?(keywords2, k) end
:lists.filter(fun, keywords1) ++ keywords2
end
@doc """
Merges two keyword lists into one.
All keys, including duplicated keys, given in `keywords2` will be added
to `keywords1`. The given function will be invoked to solve conflicts.
If `keywords2` has duplicate keys, the given function will be invoked
for each matching pair in `keywords1`.
There are no guarantees about the order of keys in the returned keyword.
## Examples
iex> Keyword.merge([a: 1, b: 2], [a: 3, d: 4], fn _k, v1, v2 ->
...> v1 + v2
...> end)
[b: 2, a: 4, d: 4]
iex> Keyword.merge([a: 1, b: 2], [a: 3, d: 4, a: 5], fn :a, v1, v2 ->
...> v1 + v2
...> end)
[b: 2, a: 4, d: 4, a: 5]
iex> Keyword.merge([a: 1, b: 2, a: 3], [a: 3, d: 4, a: 5], fn :a, v1, v2 ->
...> v1 + v2
...> end)
[b: 2, a: 4, d: 4, a: 8]
"""
@spec merge(t, t, (key, value, value -> value)) :: t
def merge(keywords1, keywords2, fun) when is_list(keywords1) and is_list(keywords2) do
do_merge(keywords2, [], keywords1, keywords1, fun)
end
defp do_merge([{k, v2}|t], acc, rest, original, fun) do
case :lists.keyfind(k, 1, original) do
{^k, v1} ->
do_merge(t, [{k, fun.(k, v1, v2)}|acc],
delete(rest, k), :lists.keydelete(k, 1, original), fun)
false ->
do_merge(t, [{k, v2}|acc], rest, original, fun)
end
end
defp do_merge([], acc, rest, _original, _fun) do
rest ++ :lists.reverse(acc)
end
@doc """
Returns whether a given `key` exists in the given `keywords`.
## Examples
iex> Keyword.has_key?([a: 1], :a)
true
iex> Keyword.has_key?([a: 1], :b)
false
"""
@spec has_key?(t, key) :: boolean
def has_key?(keywords, key) when is_list(keywords) and is_atom(key) do
:lists.keymember(key, 1, keywords)
end
@doc """
Updates the `key` with the given function.
If the `key` does not exist, raises `KeyError`.
If there are duplicated keys, they are all removed and only the first one
is updated.
## Examples
iex> Keyword.update!([a: 1], :a, &(&1 * 2))
[a: 2]
iex> Keyword.update!([a: 1, a: 2], :a, &(&1 * 2))
[a: 2]
iex> Keyword.update!([a: 1], :b, &(&1 * 2))
** (KeyError) key :b not found in: [a: 1]
"""
@spec update!(t, key, (value -> value)) :: t | no_return
def update!(keywords, key, fun) do
update!(keywords, key, fun, keywords)
end
defp update!([{key, value}|keywords], key, fun, _dict) do
[{key, fun.(value)}|delete(keywords, key)]
end
defp update!([{_, _} = e|keywords], key, fun, dict) do
[e|update!(keywords, key, fun, dict)]
end
defp update!([], key, _fun, dict) when is_atom(key) do
raise(KeyError, key: key, term: dict)
end
@doc """
Updates the `key` in `keywords` with the given function.
If the `key` does not exist, inserts the given `initial` value.
If there are duplicated keys, they are all removed and only the first one
is updated.
## Examples
iex> Keyword.update([a: 1], :a, 13, &(&1 * 2))
[a: 2]
iex> Keyword.update([a: 1, a: 2], :a, 13, &(&1 * 2))
[a: 2]
iex> Keyword.update([a: 1], :b, 11, &(&1 * 2))
[a: 1, b: 11]
"""
@spec update(t, key, value, (value -> value)) :: t
def update(keywords, key, initial, fun)
def update([{key, value}|keywords], key, _initial, fun) do
[{key, fun.(value)}|delete(keywords, key)]
end
def update([{_, _} = e|keywords], key, initial, fun) do
[e|update(keywords, key, initial, fun)]
end
def update([], key, initial, _fun) when is_atom(key) do
[{key, initial}]
end
@doc """
Takes all entries corresponding to the given keys and extracts them into a
separate keyword list.
Returns a tuple with the new list and the old list with removed keys.
Keys for which there are no entires in the keyword list are ignored.
Entries with duplicated keys end up in the same keyword list.
## Examples
iex> Keyword.split([a: 1, b: 2, c: 3], [:a, :c, :e])
{[a: 1, c: 3], [b: 2]}
iex> Keyword.split([a: 1, b: 2, c: 3, a: 4], [:a, :c, :e])
{[a: 1, c: 3, a: 4], [b: 2]}
"""
def split(keywords, keys) when is_list(keywords) do
fun = fn {k, v}, {take, drop} ->
case k in keys do
true -> {[{k, v}|take], drop}
false -> {take, [{k, v}|drop]}
end
end
acc = {[], []}
{take, drop} = :lists.foldl(fun, acc, keywords)
{:lists.reverse(take), :lists.reverse(drop)}
end
@doc """
Takes all entries corresponding to the given keys and returns them in a new
keyword list.
Duplicated keys are preserved in the new keyword list.
## Examples
iex> Keyword.take([a: 1, b: 2, c: 3], [:a, :c, :e])
[a: 1, c: 3]
iex> Keyword.take([a: 1, b: 2, c: 3, a: 5], [:a, :c, :e])
[a: 1, c: 3, a: 5]
"""
def take(keywords, keys) when is_list(keywords) do
:lists.filter(fn {k, _} -> k in keys end, keywords)
end
@doc """
Drops the given keys from the keyword list.
Duplicated keys are preserved in the new keyword list.
## Examples
iex> Keyword.drop([a: 1, b: 2, c: 3], [:b, :d])
[a: 1, c: 3]
iex> Keyword.drop([a: 1, b: 2, b: 3, c: 3, a: 5], [:b, :d])
[a: 1, c: 3, a: 5]
"""
def drop(keywords, keys) when is_list(keywords) do
:lists.filter(fn {k, _} -> not k in keys end, keywords)
end
@doc """
Returns and removes all values associated with `key` in the keyword list.
All duplicated keys are removed. See `pop_first/3` for
removing only the first entry.
## Examples
iex> Keyword.pop([a: 1], :a)
{1, []}
iex> Keyword.pop([a: 1], :b)
{nil, [a: 1]}
iex> Keyword.pop([a: 1], :b, 3)
{3, [a: 1]}
iex> Keyword.pop([a: 1, a: 2], :a)
{1, []}
"""
@spec pop(t, key, value) :: {value, t}
def pop(keywords, key, default \\ nil) when is_list(keywords) do
case fetch(keywords, key) do
{:ok, value} ->
{value, delete(keywords, key)}
:error ->
{default, keywords}
end
end
@doc """
Lazily returns and removes all values associated with `key` in the keyword list.
This is useful if the default value is very expensive to calculate or
generally difficult to setup and teardown again.
All duplicated keys are removed. See `pop_first/3` for
removing only the first entry.
## Examples
iex> keyword = [a: 1]
iex> fun = fn ->
...> # some expensive operation here
...> 13
...> end
iex> Keyword.pop_lazy(keyword, :a, fun)
{1, []}
iex> Keyword.pop_lazy(keyword, :b, fun)
{13, [a: 1]}
"""
@spec pop_lazy(t, key, (() -> value)) :: {value, t}
def pop_lazy(keywords, key, fun)
when is_list(keywords) and is_function(fun, 0) do
case fetch(keywords, key) do
{:ok, value} ->
{value, delete(keywords, key)}
:error ->
{fun.(), keywords}
end
end
@doc """
Returns and removes the first value associated with `key` in the keyword list.
Duplicated keys are not removed.
## Examples
iex> Keyword.pop_first [a: 1], :a
{1, []}
iex> Keyword.pop_first [a: 1], :b
{nil, [a: 1]}
iex> Keyword.pop_first [a: 1], :b, 3
{3, [a: 1]}
iex> Keyword.pop_first [a: 1, a: 2], :a
{1, [a: 2]}
"""
@spec pop_first(t, key, value) :: {value, t}
def pop_first(keywords, key, default \\ nil) when is_list(keywords) do
case :lists.keytake(key, 1, keywords) do
{:value, {^key, value}, rest} -> {value, rest}
false -> {default, keywords}
end
end
@doc """
Returns the keyword list itself.
## Examples
iex> Keyword.to_list([a: 1])
[a: 1]
"""
def to_list(keyword) when is_list(keyword) do
keyword
end
@doc false
def size(keyword) do
IO.write :stderr, "warning: Keyword.size/1 is deprecated, please use Kernel.length/1\n" <>
Exception.format_stacktrace
length(keyword)
end
end
|
lib/elixir/lib/keyword.ex
| 0.898907 | 0.657291 |
keyword.ex
|
starcoder
|
defmodule Finch.MockHTTP2Server do
@moduledoc false
import ExUnit.Assertions
alias Mint.{HTTP2.Frame, HTTP2.HPACK}
defstruct [:socket, :encode_table, :decode_table]
@fixtures_dir Path.expand("../fixtures", __DIR__)
@ssl_opts [
mode: :binary,
packet: :raw,
active: false,
reuseaddr: true,
next_protocols_advertised: ["h2"],
alpn_preferred_protocols: ["h2"],
certfile: Path.join([@fixtures_dir, "selfsigned.pem"]),
keyfile: Path.join([@fixtures_dir, "selfsigned_key.pem"]),
]
def start_and_connect_with(options, fun) when is_list(options) and is_function(fun, 1) do
parent = self()
server_settings = Keyword.get(options, :server_settings, [])
{:ok, listen_socket} = :ssl.listen(0, @ssl_opts)
{:ok, {_address, port}} = :ssl.sockname(listen_socket)
task = Task.async(fn -> accept(listen_socket, parent, server_settings) end)
result = fun.(port)
{:ok, server_socket} = Task.await(task)
:ok = :ssl.setopts(server_socket, active: true)
server = %__MODULE__{
socket: server_socket,
encode_table: HPACK.new(4096),
decode_table: HPACK.new(4096)
}
{result, server}
end
@spec recv_next_frames(%__MODULE__{}, pos_integer()) :: [frame :: term(), ...]
def recv_next_frames(%__MODULE__{} = server, frame_count) when frame_count > 0 do
recv_next_frames(server, frame_count, [], "")
end
defp recv_next_frames(_server, 0, frames, buffer) do
if buffer == "" do
Enum.reverse(frames)
else
flunk("Expected no more data, got: #{inspect(buffer)}")
end
end
defp recv_next_frames(%{socket: server_socket} = server, n, frames, buffer) do
assert_receive {:ssl, ^server_socket, data}, 100
decode_next_frames(server, n, frames, buffer <> data)
end
defp decode_next_frames(_server, 0, frames, buffer) do
if buffer == "" do
Enum.reverse(frames)
else
flunk("Expected no more data, got: #{inspect(buffer)}")
end
end
defp decode_next_frames(server, n, frames, data) do
case Frame.decode_next(data) do
{:ok, frame, rest} ->
decode_next_frames(server, n - 1, [frame | frames], rest)
:more ->
recv_next_frames(server, n, frames, data)
other ->
flunk("Error decoding frame: #{inspect(other)}")
end
end
@spec encode_headers(%__MODULE__{}, Mint.Types.headers()) :: {%__MODULE__{}, hbf :: binary()}
def encode_headers(%__MODULE__{} = server, headers) when is_list(headers) do
headers = for {name, value} <- headers, do: {:store_name, name, value}
{hbf, encode_table} = HPACK.encode(headers, server.encode_table)
server = put_in(server.encode_table, encode_table)
{server, IO.iodata_to_binary(hbf)}
end
@spec decode_headers(%__MODULE__{}, binary()) :: {%__MODULE__{}, Mint.Types.headers()}
def decode_headers(%__MODULE__{} = server, hbf) when is_binary(hbf) do
assert {:ok, headers, decode_table} = HPACK.decode(hbf, server.decode_table)
server = put_in(server.decode_table, decode_table)
{server, headers}
end
def send_frames(%__MODULE__{socket: socket}, frames) when is_list(frames) and frames != [] do
# TODO: split the data at random places to increase fuzziness.
data = Enum.map(frames, &Frame.encode/1)
:ok = :ssl.send(socket, data)
end
@spec get_socket(%__MODULE__{}) :: :ssl.sslsocket()
def get_socket(server) do
server.socket
end
defp accept(listen_socket, parent, server_settings) do
{:ok, socket} = :ssl.transport_accept(listen_socket)
:ok = :ssl.ssl_accept(socket)
:ok = perform_http2_handshake(socket, server_settings)
# We transfer ownership of the socket to the parent so that this task can die.
:ok = :ssl.controlling_process(socket, parent)
{:ok, socket}
end
connection_preface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
defp perform_http2_handshake(socket, server_settings) do
import Mint.HTTP2.Frame, only: [settings: 1]
no_flags = Frame.set_flags(:settings, [])
ack_flags = Frame.set_flags(:settings, [:ack])
# First we get the connection preface.
{:ok, unquote(connection_preface) <> rest} = :ssl.recv(socket, 0, 100)
# Then we get a SETTINGS frame.
assert {:ok, frame, ""} = Frame.decode_next(rest)
assert settings(flags: ^no_flags, params: _params) = frame
# We reply with our SETTINGS.
:ok = :ssl.send(socket, Frame.encode(settings(params: server_settings)))
# We get the SETTINGS ack.
{:ok, data} = :ssl.recv(socket, 0, 100)
assert {:ok, frame, ""} = Frame.decode_next(data)
assert settings(flags: ^ack_flags, params: []) = frame
# We send the SETTINGS ack back.
:ok = :ssl.send(socket, Frame.encode(settings(flags: ack_flags, params: [])))
:ok
end
end
|
test/support/mock_http2_server.ex
| 0.658747 | 0.456955 |
mock_http2_server.ex
|
starcoder
|
defmodule GraphQL.Lang.AST do
defprotocol Visitor do
@moduledoc """
Implementations of Visitor are used by the ASTReducer to transform a GraphQL AST
into an arbitrary value.
The value can be the result of validations, or a transformation of the AST into
a new AST, for example.
The fallback implementations of 'enter' and 'leave' return the accumulator untouched.
"""
@fallback_to_any true
@doc """
Called when entering a node of the AST.
The return value should be:
{next_action, acc}
where next_action is either :break or :continue and acc is the new value of the accumulator.
:break will abort the visitor and AST traversal will cease returning the current value of the accumulator.
"""
def enter(visitor, node, accumulator)
@doc """
Called when leaving a node of the AST.
The return value should be:
acc
"""
def leave(visitor, node, accumulator)
end
defimpl Visitor, for: Any do
def enter(_visitor, _node, accumulator), do: {:continue, accumulator}
def leave(_visitor, _node, accumulator), do: accumulator
end
defprotocol InitialisingVisitor do
@moduledoc """
A Visitor that implements this protocol will have the opportunity to perform some
initialisation and set up the accumulator before AST traversal is started.
The fallback implementation returns the accumulator untouched.
"""
@fallback_to_any true
@doc """
Invoked before traversal begins. The Visitor can do any once-off accumulator initialisation here.
"""
def init(visitor, accumulator)
end
defprotocol PostprocessingVisitor do
@moduledoc """
A Visitor that implements this protocol will have the opportunity to transform
the accumulator into something more consumer friendly. It can often be the case that
the working form of the accumulator is not what should be returned from ASTReducer.reduce/3.
This also means that the accumulator (always a Map) can be transformed into an
arbitrary struct or other Erlang/Elixir type.
The fallback implementation returns the accumulator untouched.
"""
@fallback_to_any true
@doc """
Invoked once after traversal ends. This can be used to transform the accumulator
into an arbitrary value.
"""
def finish(visitor, accumulator)
end
defimpl InitialisingVisitor, for: Any do
def init(_visitor, accumulator), do: accumulator
end
defimpl PostprocessingVisitor, for: Any do
def finish(_visitor, accumulator), do: accumulator
end
end
|
lib/graphql/lang/ast/visitor.ex
| 0.842345 | 0.692499 |
visitor.ex
|
starcoder
|
defmodule GenRegistry do
@moduledoc """
GenRegistry provides a `Registry` like interface for managing processes.
"""
@behaviour GenRegistry.Behaviour
use GenServer
alias :ets, as: ETS
alias GenRegistry.Types
defstruct [:worker_module, :worker_type, :workers]
@typedoc """
GenRegistry State.
- worker_module: Module to spawn
- worker_type: `:supervisor` if the worker_module is a supervisor, `:worker` otherwise
- workers: ETS table id holding the worker tracking records.
"""
@type t :: %__MODULE__{
worker_module: module,
worker_type: :supervisor | :worker,
workers: ETS.tab()
}
@gen_module Application.get_env(:gen_registry, :gen_module, GenServer)
## Client
@doc """
Callback called by `Supervisor.init/2`
It is required that you provide a `:worker_module` argument or the call will fail.
"""
@spec child_spec(opts :: Keyword.t()) :: Supervisor.child_spec()
def child_spec(opts) do
worker_module = Keyword.fetch!(opts, :worker_module)
opts =
opts
|> Keyword.delete(:worker_module)
|> Keyword.put_new(:name, worker_module)
%{
id: opts[:name],
start: {__MODULE__, :start_link, [worker_module, opts]},
type: :supervisor
}
end
@doc """
Start a registry instance.
GenRegistry should be run under a supervision tree, it is not recommended to call this directly.
"""
@spec start_link(module, Keyword.t()) :: {:ok, pid} | {:error, any}
def start_link(module, opts \\ []) do
GenServer.start_link(__MODULE__, {module, opts[:name]}, opts)
end
@doc """
Lookup a running a process.
This is a fast path to the ETS table.
"""
@spec lookup(table :: ETS.tab(), id :: Types.id()) :: {:ok, pid} | {:error, :not_found}
def lookup(table, id) do
case ETS.lookup(table, id) do
[{^id, pid}] -> {:ok, pid}
[] -> {:error, :not_found}
end
end
@doc """
Attempts to lookup a running process by id.
If the id is not associated with a running process then it is spawned, the optional third
argument will be passed to `start_link` of the `worker_module` to spawn a new process.
"""
@spec lookup_or_start(registry :: GenServer.server(), id :: Types.id(), args :: [any], timeout :: integer) ::
{:ok, pid} | {:error, any}
def lookup_or_start(registry, id, args \\ [], timeout \\ 5_000)
def lookup_or_start(registry, id, args, timeout) when is_atom(registry) do
case lookup(registry, id) do
{:ok, pid} ->
{:ok, pid}
{:error, :not_found} ->
@gen_module.call(registry, {:lookup_or_start, id, args}, timeout)
end
end
def lookup_or_start(registry, id, args, timeout) do
@gen_module.call(registry, {:lookup_or_start, id, args}, timeout)
end
@doc """
Safely stops a process managed by the GenRegistry
In addition to stopping the process, the id is also removed from the GenRegistry
If the id provided is not registered this will return `{:error, :not_found}`
"""
@spec stop(registry :: GenServer.server(), id :: Types.id()) :: :ok | {:error, :not_found}
def stop(registry, id) do
@gen_module.call(registry, {:stop, id})
end
@doc """
Return the number of running processes in this registry.
"""
@spec count(table :: ETS.tab()) :: non_neg_integer()
def count(table) do
ETS.info(table, :size)
end
@doc """
Loop over all the processes and return result.
The function will be called with two arguments, a two-tuple of `{id, pid}` and then accumulator,
the function should return the accumulator.
There is no ordering guarantee when reducing.
"""
@spec reduce(table :: ETS.tab(), acc :: any, ({Types.id(), pid()}, any() -> any())) :: any
def reduce(table, acc, func) do
ETS.foldr(func, acc, table)
end
## Server Callbacks
def init({worker_module, name}) do
Process.flag(:trap_exit, true)
worker_type =
case worker_module.module_info[:attributes][:behaviour] do
[:supervisor] -> :supervisor
_ -> :worker
end
state = %__MODULE__{
workers:
ETS.new(name, [
:public,
:set,
:named_table,
{:read_concurrency, true}
]),
worker_module: worker_module,
worker_type: worker_type
}
{:ok, state}
end
def terminate(_reason, _state) do
for pid <- Process.get_keys(), is_pid(pid) do
Process.unlink(pid)
Process.exit(pid, :kill)
end
:ok
end
def handle_call({:lookup_or_start, id, args}, _from, state) do
{:reply, do_lookup_or_start(state, id, args), state}
end
def handle_call({:stop, id}, _from, state) do
{:reply, do_stop(state, id), state}
end
# Call from supervisor module.
def handle_call(
:which_children,
_from,
%__MODULE__{worker_type: worker_type, worker_module: worker_module} = state
) do
children =
for pid <- Process.get_keys(), is_pid(pid) do
{:undefined, pid, worker_type, [worker_module]}
end
{:reply, children, state}
end
def handle_call(
:count_children,
_from,
%__MODULE__{worker_type: worker_type, worker_module: worker_module} = state
) do
counts = [
specs: 1,
active: count(worker_module),
supervisors: 0,
workers: 0
]
counts =
case worker_type do
:worker -> Keyword.put(counts, :workers, counts[:active])
:supervisor -> Keyword.put(counts, :supervisors, counts[:active])
end
{:reply, counts, state}
end
def handle_call(_message, _from, state) do
{:reply, {:error, __MODULE__}, state}
end
def handle_info({:EXIT, pid, _reason}, %__MODULE__{workers: workers} = state) do
ETS.delete(workers, Process.delete(pid))
{:noreply, state}
end
def handle_info(_message, state) do
{:noreply, state}
end
## Private
@spec do_lookup_or_start(state :: t, id :: Types.id(), args :: [any]) ::
{:ok, pid} | {:error, any}
defp do_lookup_or_start(%__MODULE__{worker_module: worker_module, workers: workers}, id, args) do
case lookup(workers, id) do
{:ok, pid} ->
{:ok, pid}
{:error, :not_found} ->
case apply(worker_module, :start_link, args) do
{:ok, pid} ->
ETS.insert_new(workers, {id, pid})
Process.put(pid, id)
{:ok, pid}
{:error, reason} ->
{:error, reason}
end
end
end
@spec do_stop(state :: t, id :: Types.id()) :: :ok | {:error, :not_found}
defp do_stop(%__MODULE__{workers: workers}, id) do
with {:ok, pid} <- lookup(workers, id) do
Process.unlink(pid)
Process.exit(pid, :shutdown)
ETS.delete(workers, Process.delete(pid))
:ok
end
end
end
|
lib/gen_registry.ex
| 0.8488 | 0.404537 |
gen_registry.ex
|
starcoder
|
defmodule Sanbase.Billing.Plan.AccessChecker do
@moduledoc """
Module that contains functions for determining access based on the subscription
plan.
Adding new queries or updating the subscription plan does not require this
module to be changed.
The subscription plan needed for a given query is given in the query definition
```
field :network_growth, list_of(:network_growth) do
meta(access: :restricted, min_plan: [sanapi: :pro, sanbase: :free])
...
end
```
This module knows how to inspect the GraphQL schema that is being build
compile-time and build the needed sets of data also compile time. There are no
checks for mutations - mutations
Additionally, this module will raise a compile-time warning if there is a
query without a subscription plan defined.
The actual historical/realtime restrictions are implemented in modules:
- ApiAccessChecker
- SanbaseAccessChecker
as we have different restrictions.
"""
alias Sanbase.Billing.{Product, Subscription, GraphqlSchema}
alias Sanbase.Billing.Plan.{CustomAccess, ApiAccessChecker, SanbaseAccessChecker}
@doc documentation_ref: "# DOCS access-plans/index.md"
case GraphqlSchema.get_queries_without_access_level() do
[] ->
:ok
queries ->
require Sanbase.Break, as: Break
Break.break("""
There are GraphQL queries defined without specifying their access level.
The access level could be either `free` or `restricted`.
Queries without access level: #{inspect(queries)}
""")
end
@type query_or_argument :: {:metric, String.t()} | {:signal, String.t()} | {:query, atom()}
@extension_metrics GraphqlSchema.get_all_with_access_level(:extension)
def extension_metrics(), do: @extension_metrics
@free_query_or_argument GraphqlSchema.get_all_with_access_level(:free)
@free_query_or_argument_mapset MapSet.new(@free_query_or_argument)
def free_query_or_argument_mapset(), do: @free_query_or_argument_mapset
@restricted_metrics GraphqlSchema.get_all_with_access_level(:restricted)
@restricted_metrics_mapset MapSet.new(@restricted_metrics)
def restricted_metrics_mapset(), do: @restricted_metrics_mapset
@all_metrics @free_query_or_argument ++ @restricted_metrics
def all_metrics, do: @all_metrics
@custom_access_queries_stats CustomAccess.get()
@custom_access_queries @custom_access_queries_stats |> Map.keys() |> Enum.sort()
@custom_access_queries_mapset MapSet.new(@custom_access_queries)
@free_subscription Subscription.free_subscription()
@min_plan_map GraphqlSchema.min_plan_map()
# Raise an error if there are queries with custom access logic that are marked
# as free. If there are such queries the access restriction logic will never
# be applied
free_and_custom_intersection =
MapSet.intersection(@custom_access_queries_mapset, @free_query_or_argument_mapset)
case Enum.empty?(free_and_custom_intersection) do
true ->
:ok
false ->
require Sanbase.Break, as: Break
Break.break("""
There are queries with access level `:free` that are defined in the
CustomAccess module. These queries custom access logic will never be
executed.
Queries defined in the CustomAccess module but do not have the `:restricted`
access level field: #{inspect(free_and_custom_intersection |> Enum.to_list())}
""")
end
@doc ~s"""
Check if a query full access is given only to users with a plan higher than free.
A query can be restricted but still accessible by not-paid users or users with
lower plans. In this case historical and/or realtime data access can be cut off
"""
@spec is_restricted?(query_or_argument) :: boolean()
def is_restricted?(query_or_argument),
do: query_or_argument not in @free_query_or_argument_mapset
@spec plan_has_access?(plan, product, query_or_argument) :: boolean()
when plan: atom(), product: binary()
def plan_has_access?(plan, product, query_or_argument) do
case min_plan(product, query_or_argument) do
:free -> true
:basic -> plan != :free
:pro -> plan not in [:free, :basic]
:premium -> plan not in [:free, :basic, :pro]
:custom -> plan == :custom
# extensions plans can be with other plan. They're handled separately
_ -> true
end
end
@spec min_plan(product, query_or_argument) :: atom() when product: binary()
def min_plan(product, query_or_argument) do
@min_plan_map[query_or_argument][product] || :free
end
@spec get_available_metrics_for_plan(product, plan, restriction_type) :: list(binary())
when plan: atom(), product: binary(), restriction_type: atom()
def get_available_metrics_for_plan(product, plan, restriction_type \\ :all) do
case restriction_type do
:free -> @free_query_or_argument
:restricted -> @restricted_metrics
:custom -> @custom_access_queries
:all -> @all_metrics
end
|> Stream.filter(&match?({:metric, _}, &1))
|> Stream.filter(&plan_has_access?(plan, product, &1))
|> Enum.map(fn {_, name} -> name end)
end
def custom_access_queries_stats(), do: @custom_access_queries_stats
def custom_access_queries(), do: @custom_access_queries
def is_historical_data_allowed?({:metric, metric}) do
Sanbase.Metric.is_historical_data_allowed?(metric)
end
def is_historical_data_allowed?({:signal, signal}) do
Sanbase.Signal.is_historical_data_allowed?(signal)
end
# The call in historical_data_in_days/3 may pass down {:query, _}
def is_historical_data_allowed?({:query, _}), do: false
def is_realtime_data_allowed?({:metric, metric}) do
Sanbase.Metric.is_realtime_data_allowed?(metric)
end
def is_realtime_data_allowed?({:signal, signal}) do
Sanbase.Signal.is_realtime_data_allowed?(signal)
end
# The call in realtime_data_cut_off_in_days/3 may pass down {:query, _}
def is_realtime_data_allowed?({:query, _}), do: false
@product_to_access_module [
{Product.product_api(), ApiAccessChecker},
{Product.product_sanbase(), SanbaseAccessChecker}
]
@doc """
If the result from this function is nil, then no restrictions are applied.
Respectively the `restrictedFrom` field has a value of nil as well.
"""
@spec historical_data_in_days(atom(), non_neg_integer(), query_or_argument()) ::
non_neg_integer() | nil
def historical_data_in_days(plan, _product_id, query_or_argument)
when query_or_argument in @custom_access_queries do
if not is_historical_data_allowed?(query_or_argument) do
Map.get(@custom_access_queries_stats, query_or_argument)
|> get_in([:plan_access, plan, :historical_data_in_days])
end
end
for {product_id, module} <- @product_to_access_module do
def historical_data_in_days(plan, unquote(product_id), query_or_argument) do
if not is_historical_data_allowed?(query_or_argument) do
unquote(module).historical_data_in_days(plan, query_or_argument)
end
end
end
@doc """
If the result from this function is nil, then no restrictions are applied.
Respectively the `restrictedTo` field has a value of nil as well.
"""
@spec realtime_data_cut_off_in_days(atom(), non_neg_integer(), query_or_argument()) ::
non_neg_integer() | nil
def realtime_data_cut_off_in_days(plan, _product_id, query_or_argument)
when query_or_argument in @custom_access_queries do
if not is_realtime_data_allowed?(query_or_argument) do
Map.get(@custom_access_queries_stats, query_or_argument)
|> get_in([:plan_access, plan, :realtime_data_cut_off_in_days])
end
end
for {product_id, module} <- @product_to_access_module do
def realtime_data_cut_off_in_days(plan, unquote(product_id), query_or_argument) do
if not is_realtime_data_allowed?(query_or_argument) do
unquote(module).realtime_data_cut_off_in_days(plan, query_or_argument)
end
end
end
def user_can_create_alert?(user, subscription) do
subscription = subscription || @free_subscription
SanbaseAccessChecker.alerts_limits_not_reached?(user, subscription)
end
def alerts_limits_upgrade_message(), do: SanbaseAccessChecker.alerts_limits_upgrade_message()
end
|
lib/sanbase/billing/plan/access_checker.ex
| 0.897318 | 0.763087 |
access_checker.ex
|
starcoder
|
defmodule Resx.Resource.Reference.Integrity do
@moduledoc """
The integrity of a resource.
%Resx.Resource.Reference.Integrity{
checksum: { :crc32, 3829359344 },
timestamp: DateTime.utc_now
}
"""
alias Resx.Resource.Reference.Integrity
@enforce_keys [:timestamp]
defstruct [:checksum, :timestamp]
@type algo :: atom
@type checksum :: { algo, any }
@type t :: %Integrity{
checksum: nil | checksum,
timestamp: DateTime.t
}
@doc """
Compare two integrities.
The result is a tuple with the first element being the result of the
comparison between the two checksums. If the checksums are equal then it will
be true, if the checksum algorithms are the same but the hashes are not equal
it will be false, otherwise if the checksums cannot be compared it will be
`nil`. The second element will be the result of a `DateTime.compare/2` on the
two timestamps.
iex> Resx.Resource.Reference.Integrity.compare(%Resx.Resource.Reference.Integrity{ timestamp: DateTime.from_unix!(0) }, %Resx.Resource.Reference.Integrity{ timestamp: DateTime.from_unix!(0) })
{ nil, :eq }
iex> Resx.Resource.Reference.Integrity.compare(%Resx.Resource.Reference.Integrity{ checksum: { :foo, 1 }, timestamp: DateTime.from_unix!(1) }, %Resx.Resource.Reference.Integrity{ checksum: { :foo, 1 }, timestamp: DateTime.from_unix!(0) })
{ true, :gt }
iex> Resx.Resource.Reference.Integrity.compare(%Resx.Resource.Reference.Integrity{ checksum: { :foo, 2 }, timestamp: DateTime.from_unix!(0) }, %Resx.Resource.Reference.Integrity{ checksum: { :foo, 1 }, timestamp: DateTime.from_unix!(1) })
{ false, :lt }
iex> Resx.Resource.Reference.Integrity.compare(%Resx.Resource.Reference.Integrity{ checksum: { :bar, 1 }, timestamp: DateTime.from_unix!(0) }, %Resx.Resource.Reference.Integrity{ checksum: { :foo, 1 }, timestamp: DateTime.from_unix!(0) })
{ nil, :eq }
"""
@spec compare(t, t) :: { checksum :: nil | boolean, timestamp :: :lt | :eq | :gt }
def compare(%Integrity{ checksum: nil, timestamp: a }, %Integrity{ checksum: nil, timestamp: b }), do: { nil, DateTime.compare(a, b)}
def compare(%Integrity{ checksum: checksum, timestamp: a }, %Integrity{ checksum: checksum, timestamp: b }), do: { true, DateTime.compare(a, b)}
def compare(%Integrity{ checksum: { algo, _ }, timestamp: a }, %Integrity{ checksum: { algo, _ }, timestamp: b }), do: { false, DateTime.compare(a, b)}
def compare(%Integrity{ timestamp: a }, %Integrity{ timestamp: b }), do: { nil, DateTime.compare(a, b)}
end
|
lib/resx/resource/reference/integrity.ex
| 0.910202 | 0.468547 |
integrity.ex
|
starcoder
|
defmodule Mix.Tasks.Alice.New.Handler do
@moduledoc ~S"""
Generates a new Alice handler.
This is the easiest way to set up a new Alice handler.
## Install `alice.new`
```bash
mix archive.install hex alice_new
```
## Build a Handler
First, navigate the command-line to the directory where you want to create
your new Alice handler. Then run the following commands: (change `my_handler`
to the name of your handler)
```bash
mix alice.new.handler my_handler
cd alice_my_handler
mix deps.get
```
## Writing Route Handlers
In lib/alice/handlers/my_handler.ex:
```elixir
defmodule Alice.Handlers.MyHandler do
use Alice.Router
command ~r/repeat after me: (?<term>.+)/i, :repeat
route ~r/repeat after me: (?<term>.+)/i, :repeat
@doc "`repeat after me: thing` - replies you said, 'thing'"
def repeat(conn) do
term = Alice.Conn.last_capture(conn)
response_text = "you said, '#{term}'"
reply(conn, response_text)
end
end
```
## Testing Handlers
Alice provides several helpers to make it easy to test your handlers. First
you'll need to invoke to add `use Alice.HandlerCase, handlers:
[YourHandler]` passing it the handler you're trying to test. Then you can use
`message_received()` within your test, which will simulate a message coming
in from the chat backend and route it through to the handlers appropriately.
If you're wanting to invoke a command, you'll need to make sure your message
includes `<@alice>` within the string. From there you can use either
`first_reply()` to get the first reply sent out or `all_replies()` which will
return a List of replies that have been received during your test. You can
use either to use normal assertions on to ensure your handler behaves in the
manner you expect.
In `test/alice/handlers/my_handler_test.exs`:
```elixir
defmodule Alice.Handlers.MyHandlerTest do
use Alice.HandlerCase, handlers: Alice.Handlers.MyHandler
test "the repeat command repeats a term" do
send_message("<@alice> repeat after me: this is a boring handler")
assert first_reply() == "you said, 'this is a boring handler'"
end
test "the repeat route repeats a term" do
send_message("repeat after me: this is a boring handler")
assert first_reply() == "you said, 'this is a boring handler'"
end
end
```
## Registering Handlers
In the `mix.exs` file of your bot, add your handler to the list of handlers
to register on start
```elixir
def application do
[ applications: [:alice],
mod: {Alice, [Alice.Handlers.MyHandler] } ]
end
```
"""
use Mix.Task
alias AliceNew.{
HandlerGenerator,
Utilities
}
@shortdoc "Creates a new Alice v#{Utilities.alice_version()} handler"
@switches [
name: :string,
module: :string
]
def run([version]) when version in ~w[-v --version] do
Mix.shell().info("Alice v#{Utilities.alice_version()}")
end
def run(argv) do
case parse_opts(argv) do
{_opts, []} ->
Mix.Tasks.Help.run(["alice.new.handler"])
{opts, [path | _]} ->
Utilities.elixir_version_check!()
basename = Path.basename(Path.expand(path))
path = Path.join([Path.dirname(path), "alice_#{basename}"])
handler_name = opts[:name] || basename
app = "alice_#{handler_name}"
Utilities.check_handler_name!(handler_name, !opts[:name])
module_name = opts[:module] || Macro.camelize(handler_name)
Utilities.check_mod_name_validity!(module_name)
module = Utilities.handler_module(module_name)
unless path == "." do
Utilities.check_directory_existence!(path)
File.mkdir_p!(path)
end
File.cd!(path, fn ->
HandlerGenerator.generate(app, handler_name, module, path)
end)
end
end
defp parse_opts(argv) do
case OptionParser.parse(argv, strict: @switches) do
{opts, argv, []} ->
{opts, argv}
{_opts, _argv, [{name, _val} | _]} ->
Mix.raise("Invalid option: #{name}")
end
end
end
|
lib/mix/tasks/alice.new.handler.ex
| 0.791418 | 0.728941 |
alice.new.handler.ex
|
starcoder
|
defmodule Ueberauth.Strategy.Todoist do
@moduledoc """
"""
use Ueberauth.Strategy,
uid_field: :id,
default_scope: "",
oauth2_module: Ueberauth.Strategy.Todoist.OAuth
alias Ueberauth.Auth.Info
alias Ueberauth.Auth.Credentials
alias Ueberauth.Auth.Extra
@doc """
Handles the initial redirect to the todoist authentication page.
To customize the scope (permissions) that are requested by todoist include them as part of your url:
"/auth/todoist?scope=data:read"
You can also include a `state` param that todoist will return to you.
"""
def handle_request!(conn) do
scopes = conn.params["scope"] || option(conn, :default_scope)
opts =
if conn.params["state"] do
[scope: scopes, state: conn.params["state"]]
else
[scope: scopes]
end
module = option(conn, :oauth2_module)
redirect!(conn, apply(module, :authorize_url!, [opts]))
end
@doc """
Handles the callback from Todoist. When there is a failure from Todoist the failure is included in the
`ueberauth_failure` struct. Otherwise the information returned from Todoist is returned in the `Ueberauth.Auth` struct.
"""
def handle_callback!(%Plug.Conn{params: %{"code" => code}} = conn) do
module = option(conn, :oauth2_module)
token = apply(module, :get_token!, [[code: code]])
if token.access_token == nil do
set_errors!(conn, [
error(token.other_params["error"], nil)
])
else
fetch_user(conn, token)
end
end
@doc false
def handle_callback!(conn) do
set_errors!(conn, [error("missing_code", "No code received")])
end
@doc """
Cleans up the private area of the connection used for passing the raw Todoist response around during the callback.
"""
def handle_cleanup!(conn) do
conn
|> put_private(:todoist_user, nil)
|> put_private(:todoist_token, nil)
end
@doc """
Fetches the uid field from the Todoist response. This defaults to the option `uid_field` which in-turn defaults to `id`
"""
def uid(conn) do
conn |> option(:uid_field) |> to_string() |> fetch_uid(conn)
end
@doc """
Includes the credentials from the Todoist response.
"""
def credentials(conn) do
token = conn.private.todoist_token
scope_string = token.other_params["scope"] || ""
scopes = String.split(scope_string, ",")
%Credentials{
token: token.access_token,
refresh_token: token.refresh_token,
expires_at: token.expires_at,
token_type: token.token_type,
expires: !!token.expires_at,
scopes: scopes
}
end
@doc """
Fetches the fields to populate the info section of the `Ueberauth.Auth` struct.
"""
def info(conn) do
user = conn.private.todoist_user
%Info{
name: user["full_name"],
email: user["email"],
location: user["location"],
image: user["avatar_medium"],
phone: user["mobile_number"]
}
end
@doc """
Stores the raw information (including the token) obtained from the Todoist callback.
"""
def extra(conn) do
%Extra{
raw_info: %{
token: conn.private.todoist_token,
user: conn.private.todoist_user
}
}
end
defp fetch_uid(field, conn) do
conn.private.todoist_user[field]
end
defp fetch_user(conn, token) do
conn = put_private(conn, :todoist_token, token)
resource_types = Ueberauth.json_library().encode!(["user"])
case Ueberauth.Strategy.Todoist.OAuth.post(
token,
"/sync",
%{sync_token: "*", resource_types: resource_types},
[{"content-type", "application/x-www-form-urlencoded"}]
) do
{:ok, %OAuth2.Response{status_code: 401, body: _body}} ->
set_errors!(conn, [error("token", "unauthorized")])
{:ok, %OAuth2.Response{status_code: status_code, body: %{"user" => user}}}
when status_code in 200..399 ->
put_private(conn, :todoist_user, user)
{:error, %OAuth2.Error{reason: reason}} ->
set_errors!(conn, [error("OAuth2", reason)])
end
end
defp option(conn, key) do
Keyword.get(options(conn), key, Keyword.get(default_options(), key))
end
end
|
lib/ueberauth/strategy/todoist.ex
| 0.628863 | 0.431165 |
todoist.ex
|
starcoder
|
defmodule EvictionOperator.Pod do
@moduledoc """
Finds pods that are candidates for eviction.
"""
@default_max_lifetime 600
alias K8s.{Client, Operation, Selector}
alias EvictionOperator.{Node, Event}
@doc """
Gets all pods with eviction enabled.
"""
@spec candidates(map()) :: {:ok, Enumerable.t()} | {:error, HTTPoison.Response.t()}
def candidates(%{} = policy) do
op = Client.list("v1", :pods, namespace: :all)
selector = Selector.parse(policy)
op_w_selector = %Operation{op | label_selector: selector}
response = Client.stream(op_w_selector, :default)
case response do
{:ok, stream} ->
Event.pods_list_candidates_succeeded(%{}, %{})
{:ok, stream}
{:error, _any} = error ->
Event.pods_list_candidates_failed(%{}, %{})
error
end
end
@doc """
Get a list of evictable pods on the given node pool.
Filters `candidates/1` by `pod_started_before/1` and optionally `on_nonpreferred_node/N`
"""
@spec evictable(map) :: {:ok, Enumerable.t()} | {:error, HTTPoison.Response.t()}
def evictable(%{} = policy) do
with {:ok, nodes} <- Node.list(),
{:ok, stream} <- candidates(policy) do
max_lifetime = max_lifetime(policy)
started_before = pods_started_before(stream, max_lifetime)
ready_for_eviction =
case mode(policy) do
:all -> started_before
:nonpreferred -> pods_on_nonpreferred_node(started_before, nodes)
end
{:ok, ready_for_eviction}
end
end
@spec pods_on_nonpreferred_node(Enumerable.t(), list(map)) :: Enumerable.t()
defp pods_on_nonpreferred_node(pods, nodes) do
Stream.filter(pods, fn pod -> pod_on_nonpreferred_node(pod, nodes) end)
end
@doc false
@spec pods_started_before(Enumerable.t(), pos_integer) :: Enumerable.t()
def pods_started_before(pods, max_lifetime) do
Stream.filter(pods, fn pod -> pod_started_before(pod, max_lifetime) end)
end
@spec pod_on_nonpreferred_node(map, list(map)) :: boolean
def pod_on_nonpreferred_node(
%{
"spec" => %{
"nodeName" => node_name,
"affinity" => %{
"nodeAffinity" => %{"preferredDuringSchedulingIgnoredDuringExecution" => affinity}
}
}
},
nodes
) do
prefs = Enum.map(affinity, fn a -> Map.get(a, "preference") end)
preferred =
nodes
|> find_node_by_name(node_name)
|> Node.matches_preferences?(prefs)
!preferred
end
def pod_on_nonpreferred_node(_pod_with_no_affinity, _nodes), do: false
@spec find_node_by_name(list(map), binary()) :: map() | nil
defp find_node_by_name(nodes, node_name) do
Enum.find(nodes, fn %{"metadata" => %{"name" => name}} -> name == node_name end)
end
@doc """
Check if a pod started before a given time
## Examples
iex> start_time = DateTime.utc_now |> DateTime.add(-61, :second) |> DateTime.to_string
...> EvictionOperator.Pod.pod_started_before(%{"status" => %{"startTime" => start_time}}, 60)
true
iex> start_time = DateTime.utc_now |> DateTime.to_string
...> EvictionOperator.Pod.pod_started_before(%{"status" => %{"startTime" => start_time}}, 60)
false
"""
@spec pod_started_before(map, pos_integer) :: boolean
def pod_started_before(%{"status" => %{"startTime" => start_time}}, seconds) do
seconds_ago = -parse_seconds(seconds)
cutoff_time = DateTime.utc_now() |> DateTime.add(seconds_ago, :second)
with {:ok, start_time, _} <- DateTime.from_iso8601(start_time),
:lt <- DateTime.compare(start_time, cutoff_time) do
true
else
_ -> false
end
end
def pod_started_before(_, _), do: false
@spec max_lifetime(map()) :: pos_integer()
defp max_lifetime(%{"spec" => %{"maxLifetime" => sec}}), do: parse_seconds(sec)
defp max_lifetime(_), do: @default_max_lifetime
@spec mode(map()) :: :all | :nonpreferred
defp mode(%{"spec" => %{"mode" => "nonpreferred"}}), do: :nonpreferred
defp mode(_), do: :all
@spec parse_seconds(binary() | pos_integer() | {pos_integer(), term()}) :: pos_integer()
defp parse_seconds(sec) when is_binary(sec), do: sec |> Integer.parse() |> parse_seconds
defp parse_seconds(sec) when is_integer(sec), do: sec
defp parse_seconds({sec, _}), do: sec
defp parse_seconds(_), do: 0
end
|
lib/eviction_operator/pod.ex
| 0.869105 | 0.453867 |
pod.ex
|
starcoder
|
defmodule LetItGo.Application do
@moduledoc """
Under the application supervisor, we spin up processes for reading from, writing to, and
managing Kafka topics. See `LetItGo.TopicCreator` and `LetItGo.KafkaWriter` module docs
to learn more about writing to and managing topics.
We spin up group consumer processes under an `Elsa.Supervisor` with the `{Elsa.Supervisor, ...}`
tuple below. The `:connection` configuration value is used to namespace processes and registries
for this particular set of Kafka infrastructure. We stand up another `:connection` namespace to
write to Kafka elsewhere, but both reading/writing could use the same namespace.
Consumer group configuration (under `:group_consumer`) allows you to customize the group name
(`:group`), a list of topics to read from (`:topics`), a process to forward read messages to
(`:handler`) and the initial configuration of that handler process (`:handler_init_args`).
`Elsa` uses `:brod` and `:kafka_protocol` under the hood. Configuration of those two Erlang
libraries is passed through Elsa with the `:config` field.
## App configuration
Set application environment to change which Kafka brokers or topic this application is pointed at:
config :let_it_go,
kafka: [broker_host_name: 9999],
topic: "custom-topic-name"
If not set, `LetItGo` will default to working with [DivoKafka](https://hex.pm/packages/divo_kakfa).
"""
use Application
def start(_type, _args) do
children = [
LetItGo.TopicCreator,
LetItGo.KafkaWriter,
{
Elsa.Supervisor,
endpoints: Application.get_env(:let_it_go, :kafka, [localhost: 9092]),
connection: :let_it_go_reader,
group_consumer: [
group: "let_it_go_arbitrary_group_name",
topics: [Application.get_env(:let_it_go, :topic, "let-it-go-topic")],
handler: LetItGo.MessageHandler,
handler_init_args: [output_dir: ".output", filename: "messages"],
config: [
begin_offset: :earliest,
offset_reset_policy: :reset_to_earliest,
prefetch_count: 0,
prefetch_bytes: 2_097_152
]
]
}
]
opts = [strategy: :one_for_one, name: LetItGo.Supervisor]
Supervisor.start_link(children, opts)
end
end
|
lib/let_it_go/application.ex
| 0.785802 | 0.693953 |
application.ex
|
starcoder
|
defmodule Config.Reader do
@moduledoc """
API for reading config files defined with `Config`.
## As a provider
`Config.Reader` can also be used as a `Config.Provider`. When used
as a provider, it expects a single argument: the configuration path
(as outlined in `t:Config.Provider.config_path/0`) for the file to
be read and loaded during the system boot.
For example, if you expect the target system to have a config file
in an absolute path, you can configure your `mix release` as:
config_providers: [{Config.Reader, "/etc/config.json"}]
Or if you want to read a custom path inside the release:
config_provider: [{Config.Reader, {:system, "RELEASE_ROOT", "/config.exs"}}]
Note by default Mix releases supports runtime configuration via
a `config/releases.exs`. If a `config/releases.exs` exists in your
application, it is automatically copied inside the release and
automatically set as a config provider.
"""
@behaviour Config.Provider
@impl true
def init(path) do
Config.Provider.validate_config_path!(path)
path
end
@impl true
def load(config, path) do
merge(config, path |> Config.Provider.resolve_config_path!() |> read!())
end
@doc """
Reads the configuration file.
The same as `read_imports!/2` but only returns the configuration
in the given file, without returning the imported paths.
It exists for convenience purposes. For example, you could
invoke it inside your `mix.exs` to read some external data
you decided to move to a configuration file:
releases: Config.Reader.read!("rel/releases.exs")
"""
@doc since: "1.9.0"
@spec read!(Path.t(), [Path.t()]) :: keyword
def read!(file, imported_paths \\ [])
when is_binary(file) and is_list(imported_paths) do
Config.__eval__!(file, imported_paths) |> elem(0)
end
@doc """
Reads the given configuration file alongside its imports.
It accepts a list of `imported_paths` that should raise if attempted
to be imported again (to avoid recursive imports).
It returns a tuple with the configuration and the imported paths.
"""
@doc since: "1.9.0"
@spec read_imports!(Path.t(), [Path.t()]) :: {keyword, [Path.t()]}
def read_imports!(file, imported_paths \\ [])
when is_binary(file) and is_list(imported_paths) do
Config.__eval__!(file, imported_paths)
end
@doc """
Merges two configurations.
The configurations are merged together with the values in
the second one having higher preference than the first in
case of conflicts. In case both values are set to keyword
lists, it deep merges them.
## Examples
iex> Config.Reader.merge([app: [k: :v1]], [app: [k: :v2]])
[app: [k: :v2]]
iex> Config.Reader.merge([app: [k: [v1: 1, v2: 2]]], [app: [k: [v2: :a, v3: :b]]])
[app: [k: [v1: 1, v2: :a, v3: :b]]]
iex> Config.Reader.merge([app1: []], [app2: []])
[app1: [], app2: []]
"""
@doc since: "1.9.0"
@spec merge(keyword, keyword) :: keyword
def merge(config1, config2) when is_list(config1) and is_list(config2) do
Config.__merge__(config1, config2)
end
end
|
lib/elixir/lib/config/reader.ex
| 0.887616 | 0.414425 |
reader.ex
|
starcoder
|
defprotocol Presence do
@moduledoc ~S"""
The `Presence` protocol is responsible for
checking the presence of a value.
The functions required to be implemented are
`is_blank/1`, `is_present/1` and `presence/1`.
These functions are not automatically imported
by `Kernel`.
Currently, these modules implements `Presence` protocol.
- `Atom`
- `BitString`
- `Float`
- `Integer`
- `List`
- `Map`
- `Tuple`
"""
@typedoc """
The return value of `presence` function.
"""
@type t :: \
atom
| bitstring
| float
| integer
| list
| map
| tuple
@doc ~S"""
A value is blank if it's nil, false, empty, or a whitespace string.
For example
- `nil`
- `false`
- `[]`
- `''`
- `' '`
- `%{}`
- `{}`
- `""`
- `" "`
are all blank.
This simplifies
!address || Enum.empty?(address)
to
is_blank(address)
## Examples
### Atom
`nil` is blank:
iex> is_blank(nil)
true
`false` is blank:
iex> is_blank(false)
true
`:false` atom is blank:
iex> is_blank(:false)
true
### BitString
A bit string (or simply string) is blank if it's empty or contains whitespaces only:
iex> is_blank("")
true
iex> is_blank(" ")
true
iex> is_blank("\t\n\r")
true
iex> is_blank(" blah ")
false
Unicode whitespace is supported:
iex> is_blank("\u00a0")
true
### Float
`1.1` is not blank:
iex> is_blank(1.1)
false
### Integer
`1` is not blank:
iex> is_blank(1)
false
### List
`[]` is blank:
iex> is_blank([])
true
`' '` is blank:
iex> is_blank(' ')
true
### Map
`%{}` is blank:
iex> is_blank(%{})
true
### Tuple
`{}` is blank:
iex> is_blank({})
true
"""
@spec is_blank(t) :: boolean
def is_blank(value)
@doc ~S"""
A value is present if it's not blank.
"""
@spec is_present(t) :: boolean
def is_present(value)
@doc ~S"""
Returns the `value` if it's present otherwise returns `nil`.
presence(value)
is equivalent to
is_present(value) ? value : nil
For example, something like
if is_present(state), do: state = state
if is_present(country), do: country = country
region = state || country || "US"
becomes
region = presence(state) || presence(country) || "US"
"""
@spec presence(t) :: t | nil
def presence(value)
end
defimpl Presence, for: Atom do
def is_blank(atom) do
cond do
is_nil(atom) -> true
is_boolean(atom) -> atom == false
is_atom(atom) -> atom == false
end
end
def is_present(atom), do: !is_blank(atom)
def presence(atom) do
if is_present(atom), do: atom, else: nil
end
end
defimpl Presence, for: BitString do
def is_blank(string) do
String.strip(string) == ""
end
def is_present(string), do: !is_blank(string)
def presence(string) do
if is_present(string), do: string, else: nil
end
end
defimpl Presence, for: Float do
def is_blank(_), do: false
def is_present(_), do: true
def presence(float), do: float
end
defimpl Presence, for: Integer do
def is_blank(_), do: false
def is_present(_), do: true
def presence(integer), do: integer
end
defimpl Presence, for: List do
def is_blank(charlist) do
charlist = charlist
|> to_string
|> String.replace(" ", "")
byte_size(charlist) == 0
end
def is_present(charlist), do: !is_blank(charlist)
def presence(charlist) do
if is_present(charlist), do: charlist, else: nil
end
end
defimpl Presence, for: Map do
def is_blank(map) do
keys = Map.keys(map)
length(keys) == 0
end
def is_present(map), do: !is_blank(map)
def presence(map) do
if is_present(map), do: map, else: nil
end
end
defimpl Presence, for: Tuple do
def is_blank(tuple) do
list = Tuple.to_list(tuple)
length(list) == 0
end
def is_present(tuple), do: !is_blank(tuple)
def presence(tuple) do
if is_present(tuple), do: tuple, else: nil
end
end
|
lib/presence.ex
| 0.912099 | 0.630998 |
presence.ex
|
starcoder
|
defmodule Ash.Dsl.Entity do
@moduledoc """
Declares a DSL entity.
A dsl entity represents a dsl constructor who's resulting value is a struct.
This lets the user create complex objects with arbitrary(mostly) validation rules.
The lifecycle of creating entities is complex, happening as Elixir is compiling
the modules in question. Some of the patterns around validating/transforming entities
have not yet solidified. If you aren't careful and don't follow the guidelines listed
here, you can have subtle and strange bugs during compilation. Anything not isolated to
simple value validations should be done in `transformers`. See `Ash.Dsl.Transformer`.
An entity has a `target` indicating which struct will ultimately be built. An entity
also has a `schema`. This schema is used for documentation, and the options are validated
against it before continuing on with the DSL.
To create positional arguments to the builder, use `args`. The values provided to
`args` need to be in the provided schema as well. They will be positional arguments
in the same order that they are provided in the `args` key.
`auto_set_fields` will set the provided values into the produced struct (they do not need
to be included in the schema).
`transform` is a function that takes a created struct and can alter it. This happens immediately
after handling the DSL options, and can be useful for setting field values on a struct based on
other values in that struct. If you need things that aren't contained in that struct, use an
`Ash.Dsl.Transformer`.
`entities` allows you to specify a keyword list of nested entities. Nested entities are stored
on the struct in the corresponding key, and are used in the same way entities are otherwise.
For a full example, see `Ash.Dsl.Extension`.
"""
defstruct [
:name,
:target,
:transform,
examples: [],
entities: [],
describe: "",
args: [],
hide: [],
modules: [],
schema: [],
auto_set_fields: []
]
@type t :: %__MODULE__{
name: atom,
describe: String.t(),
target: module,
examples: [String.t()],
transform: mfa | nil,
args: [atom],
hide: [atom],
entities: Keyword.t(),
auto_set_fields: Keyword.t(),
schema: Ash.OptionsHelpers.schema()
}
def build(
%{target: target, schema: schema, auto_set_fields: auto_set_fields, transform: transform},
opts,
nested_entities
) do
{before_validate_auto, after_validate_auto} =
Keyword.split(auto_set_fields || [], Keyword.keys(schema))
with {:ok, opts} <-
Ash.OptionsHelpers.validate(Keyword.merge(opts || [], before_validate_auto), schema),
opts <- Keyword.merge(opts, after_validate_auto),
built <- struct(target, opts),
built <- struct(built, nested_entities),
{:ok, built} <-
transform(transform, built) do
{:ok, built}
end
end
defp transform(nil, built), do: {:ok, built}
defp transform({module, function, args}, built) do
apply(module, function, [built | args])
end
end
|
lib/ash/dsl/entity.ex
| 0.865551 | 0.783492 |
entity.ex
|
starcoder
|
defmodule Plug.UploadError do
defexception [:message]
end
defmodule Plug.Upload do
@moduledoc """
A server (a `GenServer` specifically) that manages uploaded files.
Uploaded files are stored in a temporary directory
and removed from that directory after the process that
requested the file dies.
During the request, files are represented with
a `Plug.Upload` struct that contains three fields:
* `:path` - the path to the uploaded file on the filesystem
* `:content_type` - the content type of the uploaded file
* `:filename` - the filename of the uploaded file given in the request
**Note**: as mentioned in the documentation for `Plug.Parsers`, the `:plug`
application has to be started in order to upload files and use the
`Plug.Upload` module.
"""
use GenServer
defstruct [:path, :content_type, :filename]
@type t :: %__MODULE__{
path: Path.t(),
filename: binary,
content_type: binary | nil
}
@table __MODULE__
@max_attempts 10
@temp_env_vars ~w(PLUG_TMPDIR TMPDIR TMP TEMP)s
@doc """
Requests a random file to be created in the upload directory
with the given prefix.
"""
@spec random_file(binary) ::
{:ok, binary}
| {:too_many_attempts, binary, pos_integer}
| {:no_tmp, [binary]}
def random_file(prefix) do
case ensure_tmp() do
{:ok, tmp, paths} ->
open_random_file(prefix, tmp, 0, paths)
{:no_tmp, tmps} ->
{:no_tmp, tmps}
end
end
defp ensure_tmp() do
pid = self()
server = plug_server()
case :ets.lookup(@table, pid) do
[{^pid, tmp, paths}] ->
{:ok, tmp, paths}
[] ->
{:ok, tmps} = GenServer.call(server, :upload)
{mega, _, _} = :os.timestamp()
subdir = "/plug-" <> i(mega)
if tmp = Enum.find_value(tmps, &make_tmp_dir(&1 <> subdir)) do
true = :ets.insert_new(@table, {pid, tmp, []})
{:ok, tmp, []}
else
{:no_tmp, tmps}
end
end
end
defp make_tmp_dir(path) do
case File.mkdir_p(path) do
:ok -> path
{:error, _} -> nil
end
end
defp open_random_file(prefix, tmp, attempts, paths) when attempts < @max_attempts do
path = path(prefix, tmp)
case :file.write_file(path, "", [:write, :raw, :exclusive, :binary]) do
:ok ->
:ets.update_element(@table, self(), {3, [path | paths]})
{:ok, path}
{:error, reason} when reason in [:eexist, :eacces] ->
open_random_file(prefix, tmp, attempts + 1, paths)
end
end
defp open_random_file(_prefix, tmp, attempts, _paths) do
{:too_many_attempts, tmp, attempts}
end
defp path(prefix, tmp) do
sec = :os.system_time(:second)
rand = :rand.uniform(999_999_999_999_999)
scheduler_id = :erlang.system_info(:scheduler_id)
tmp <> "/" <> prefix <> "-" <> i(sec) <> "-" <> i(rand) <> "-" <> i(scheduler_id)
end
@compile {:inline, i: 1}
defp i(integer), do: Integer.to_string(integer)
@doc """
Requests a random file to be created in the upload directory
with the given prefix. Raises on failure.
"""
@spec random_file!(binary) :: binary | no_return
def random_file!(prefix) do
case random_file(prefix) do
{:ok, path} ->
path
{:too_many_attempts, tmp, attempts} ->
raise Plug.UploadError,
"tried #{attempts} times to create an uploaded file at #{tmp} but failed. " <>
"Set PLUG_TMPDIR to a directory with write permission"
{:no_tmp, _tmps} ->
raise Plug.UploadError,
"could not create a tmp directory to store uploads. " <>
"Set PLUG_TMPDIR to a directory with write permission"
end
end
defp plug_server do
Process.whereis(__MODULE__) ||
raise Plug.UploadError,
"could not find process Plug.Upload. Have you started the :plug application?"
end
@doc """
Starts the upload handling server.
"""
def start_link() do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
## Callbacks
def init(:ok) do
Process.flag(:trap_exit, true)
tmp = Enum.find_value(@temp_env_vars, "/tmp", &System.get_env/1)
cwd = Path.join(File.cwd!(), "tmp")
:ets.new(@table, [:named_table, :public, :set])
{:ok, [tmp, cwd]}
end
def handle_call(:upload, {pid, _ref}, dirs) do
Process.monitor(pid)
{:reply, {:ok, dirs}, dirs}
end
def handle_info({:DOWN, _ref, :process, pid, _reason}, state) do
case :ets.lookup(@table, pid) do
[{pid, _tmp, paths}] ->
:ets.delete(@table, pid)
delete_paths(paths)
[] ->
:ok
end
{:noreply, state}
end
def handle_info(_msg, state) do
{:noreply, state}
end
def terminate(_reason, _state) do
folder = fn {_pid, _tmp, paths}, _ -> delete_paths(paths) end
:ets.foldl(folder, :ok, @table)
:ok
end
defp delete_paths(paths) do
for path <- paths, do: :file.delete(path)
:ok
end
end
|
lib/plug/upload.ex
| 0.730482 | 0.482063 |
upload.ex
|
starcoder
|
defmodule Crux.Structs.Permissions do
@moduledoc """
Custom non discord api module to help with working with [permissions](https://discord.com/developers/docs/topics/permissions).
"""
@moduledoc since: "0.1.3"
alias Crux.Structs
use Bitwise
permissions = %{
create_instant_invite: 1 <<< 0,
kick_members: 1 <<< 1,
ban_members: 1 <<< 2,
administrator: 1 <<< 3,
manage_channels: 1 <<< 4,
manage_guild: 1 <<< 5,
add_reactions: 1 <<< 6,
view_audit_log: 1 <<< 7,
priority_speaker: 1 <<< 8,
stream: 1 <<< 9,
view_channel: 1 <<< 10,
send_messages: 1 <<< 11,
send_tts_message: 1 <<< 12,
manage_messages: 1 <<< 13,
embed_links: 1 <<< 14,
attach_files: 1 <<< 15,
read_message_history: 1 <<< 16,
mention_everyone: 1 <<< 17,
use_external_emojis: 1 <<< 18,
view_guild_insights: 1 <<< 19,
connect: 1 <<< 20,
speak: 1 <<< 21,
mute_members: 1 <<< 22,
deafen_members: 1 <<< 23,
move_members: 1 <<< 24,
use_vad: 1 <<< 25,
change_nickname: 1 <<< 26,
manage_nicknames: 1 <<< 27,
manage_roles: 1 <<< 28,
manage_webhooks: 1 <<< 29,
manage_emojis: 1 <<< 30
}
use Crux.Structs.BitField, permissions
@typedoc """
Union type of all valid permission name atoms.
"""
@typedoc since: "0.2.0"
@type name ::
:create_instant_invite
| :kick_members
| :ban_members
| :administrator
| :manage_channels
| :manage_guild
| :add_reactions
| :view_audit_log
| :priority_speaker
| :stream
| :view_channel
| :send_messages
| :send_tts_message
| :manage_messages
| :embed_links
| :attach_files
| :read_message_histroy
| :mention_everyone
| :use_external_emojis
| :view_guild_insights
| :connect
| :speak
| :mute_members
| :deafen_members
| :move_members
| :use_vad
| :change_nickname
| :manage_nicknames
| :manage_roles
| :manage_webhooks
| :manage_emojis
@doc """
Resolves permissions for a user in a guild, optionally including channel permission overwrites.
> Raises when the member is not cached.
> The guild-wide administrator flag or being owner implicitly grants all permissions, see `explicit/3`.
"""
@doc since: "0.2.0"
@spec implicit(
member :: Structs.Member.t() | Structs.User.t() | Structs.Snowflake.t(),
guild :: Structs.Guild.t(),
channel :: Structs.Channel.t() | nil
) :: t()
def implicit(member, guild, channel \\ nil)
def implicit(%Structs.User{id: user_id}, guild, channel), do: implicit(user_id, guild, channel)
def implicit(%Structs.Member{user: user_id}, guild, channel),
do: implicit(user_id, guild, channel)
def implicit(user_id, %Structs.Guild{owner_id: user_id}, _), do: @all
def implicit(user_id, guild, channel) do
permissions = explicit(user_id, guild)
cond do
has(permissions, :administrator) ->
@all
channel ->
explicit(user_id, guild, channel)
true ->
permissions
end
end
@doc """
Resolves permissions for a user in a guild, optionally including channel permission overwrites.
> Raises when the member is not cached.
> The administrator flag or being owner implicitly does not grant permissions, see `implicit/3`.
"""
@doc since: "0.2.0"
@spec explicit(
member :: Structs.Member.t() | Structs.User.t() | Structs.Snowflake.t(),
guild :: Structs.Guild.t(),
channel :: Structs.Channel.t() | nil
) :: t()
def explicit(member, guild, channel \\ nil)
def explicit(%Structs.Member{user: user_id}, guild, channel),
do: explicit(user_id, guild, channel)
def explicit(%Structs.User{id: user_id}, guild, channel), do: explicit(user_id, guild, channel)
# -> compute_base_permissions from
# https://discord.com/developers/docs/topics/permissions#permission-overwrites
def explicit(user_id, %Structs.Guild{id: guild_id, members: members, roles: roles}, nil) do
member =
Map.get(members, user_id) ||
raise ArgumentError, """
There is no member with the ID "#{inspect(user_id)}" in the cache of the guild.
The member is uncached or not in the guild.
"""
permissions =
roles
|> Map.get(guild_id)
|> Map.get(:permissions)
member_roles =
member.roles
|> MapSet.put(guild_id)
|> MapSet.to_list()
roles
|> Map.take(member_roles)
|> Enum.map(fn {_id, %{permissions: permissions}} -> permissions end)
|> List.insert_at(0, permissions)
|> resolve()
end
# -> compute_permissions and compute_overwrites from
# https://discord.com/developers/docs/topics/permissions#permission-overwrites
def explicit(
user_id,
%Structs.Guild{id: guild_id, members: members} = guild,
%Structs.Channel{permission_overwrites: overwrites}
) do
permissions = explicit(user_id, guild)
# apply @everyone overwrite
base_permissions =
overwrites
|> Map.get(guild_id)
|> apply_overwrite(permissions)
role_ids =
members
|> Map.get(user_id)
|> Map.get(:roles)
|> MapSet.to_list()
# apply all other overwrites
role_permissions =
overwrites
|> Map.take(role_ids)
|> Map.values()
# reduce all relevant overwrites into a single dummy one
|> Enum.reduce(%{allow: 0, deny: 0}, &acc_overwrite/2)
# apply it to the base permissions
|> apply_overwrite(base_permissions)
# apply user overwrite
overwrites
|> Map.get(user_id)
|> apply_overwrite(role_permissions)
end
defp acc_overwrite(nil, acc), do: acc
defp acc_overwrite(%{allow: cur_allow, deny: cur_deny}, %{allow: allow, deny: deny}) do
%{allow: cur_allow ||| allow, deny: cur_deny ||| deny}
end
defp apply_overwrite(nil, permissions), do: permissions
defp apply_overwrite(%{allow: allow, deny: deny}, permissions) do
permissions
|> band(~~~deny)
|> bor(allow)
end
end
|
lib/structs/permissions.ex
| 0.848596 | 0.503601 |
permissions.ex
|
starcoder
|
defmodule Benchee.Profile do
alias Benchee.Output.ProfilePrinter, as: Printer
alias Benchee.Suite
@default_profiler :eprof
@builtin_profilers [:cprof, :eprof, :fprof]
defmodule Benchee.UnknownProfilerError do
defexception message: "error"
end
@moduledoc """
Profiles each scenario after benchmarking them if the `profile_after` option is either set to:
* `true`,
* a valid `profiler`,
* a tuple of a valid `profiler` and a list of options to pass to it, e.g., `{:fprof, [sort: :own]}`.
The profiler that will be used is either the one set by the `profiler_after` option or, if set to `true`,
the default one (`:eprof`). It accepts however the following profilers:
* `:cprof` will profile with [`Mix.Task.Profile.Cprof`](https://hexdocs.pm/mix/Mix.Tasks.Profile.Cprof.html).
It provides information related to the number of function calls.
* `:eprof` will profile with [`Mix.Task.Profile.Eprof`](https://hexdocs.pm/mix/Mix.Tasks.Profile.Eprof.html).
It provides information related to the time spent on each function in regard to the total execution time.
* `:fprof` will profile with [`Mix.Task.Profile.Fprof`](https://hexdocs.pm/mix/Mix.Tasks.Profile.Cprof.html).
It provides information related to the time spent on each function, both the *total* time spent on it and the time spent on it,
*excluding* the time of called functions.
"""
@doc """
Returns the atom corresponding to the default profiler.
"""
@spec default_profiler() :: unquote(@default_profiler)
def default_profiler, do: @default_profiler
@doc """
Runs for each scenario found in the suite the `profile/2` function from the given profiler.
"""
@spec profile(Suite.t(), module) :: Suite.t()
def profile(suite, printer \\ Printer)
def profile(suite = %{configuration: %{profile_after: false}}, _printer), do: suite
def profile(
suite = %{
scenarios: scenarios,
configuration: %{profile_after: true}
},
printer
) do
do_profile(scenarios, {@default_profiler, []}, printer)
suite
end
def profile(
suite = %{
scenarios: scenarios,
configuration: %{profile_after: {profiler, profiler_opts}}
},
printer
) do
do_profile(scenarios, {profiler, profiler_opts}, printer)
suite
end
def profile(
suite = %{
scenarios: scenarios,
configuration: %{profile_after: profiler}
},
printer
) do
do_profile(scenarios, {profiler, []}, printer)
suite
end
defp do_profile(scenarios, {profiler, profiler_opts}, printer) do
profiler_module = profiler_to_module(profiler)
Enum.each(scenarios, fn scenario ->
run(scenario, {profiler, profiler_module, profiler_opts}, printer)
end)
end
defp run(
%{name: name, function: fun_to_profile},
{profiler, profiler_module, profiler_opts},
printer
) do
printer.profiling(name, profiler)
apply(profiler_module, :profile, [fun_to_profile, profiler_opts])
end
# If given a builtin profiler the function will return its proper module.
# In the case of an unknown profiler, it will raise an `UnknownProfilerError` exception.
defp profiler_to_module(profiler) do
if Enum.member?(@builtin_profilers, profiler) do
profiler =
profiler
|> Atom.to_string()
|> String.capitalize()
Module.concat(Mix.Tasks.Profile, profiler)
else
raise Benchee.UnknownProfilerError,
message: "Got an unknown '#{inspect(profiler)}' built-in profiler."
end
end
end
|
lib/benchee/profile.ex
| 0.814164 | 0.47384 |
profile.ex
|
starcoder
|
defmodule CoderRing.GenRing do
@moduledoc """
GenServer wrapper for a CoderRing.
Keeping memo state in memory with a process means some database reads can
be skipped. State is, however, always synced to the database so it can be
restored properly on app restart.
Take care to only have one GenRing proc running for a ring at any given
time. For instance, if running on a multi-server deployment, use Erlang's
clustered mode and a global process registry like
[Horde](https://github.com/derekkraan/horde) to guarantee no more than one
proc for a ring across the cluster.
When running on a single server, it should be sufficient to use GenRing.
## Usage
Create a module in your application:
```elixir
defmodule MyApp.CoderRing do
use CoderRing.GenRing, otp_app: :my_app
end
```
Then, add it to your application supervisor:
```elixir
def start(_type, _args) do
children =
[
...
] ++
MyApp.CoderRing.child_specs()
opts = [...]
{:ok, pid} = Supervisor.start_link(children, opts)
end
```
"""
defmacro __using__(otp_app: otp_app) do
quote do
use CoderRing, otp_app: unquote(otp_app), module: __MODULE__
use GenServer
@doc false
@spec child_spec(atom) :: Supervisor.child_spec()
def child_spec(name) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [name]}
}
end
@doc "Get a list of child specs for all configured rings."
@spec child_specs :: [Supervisor.child_spec()]
def child_specs do
Enum.map(rings(), &child_spec(&1.name))
end
@doc """
Start a GenServer and quietly ignore it when Horde has already started this
proc somewhere.
"""
@spec start_link(atom) :: GenServer.on_start()
def start_link(name) do
GenServer.start_link(__MODULE__, name, name: :"#{__MODULE__}_#{name}")
end
@impl CoderRing
def call(name, message) do
GenServer.call(:"#{__MODULE__}_#{name}", message)
end
@impl GenServer
def init(name) do
{:ok, name |> ring() |> load_memo() |> populate_if_empty()}
end
@impl GenServer
def handle_call(:stop, _from, state) do
{:stop, :normal, :ok, state}
end
def handle_call(message, _from, state) do
{reply, state} = invoke(state, message)
{:reply, reply, state}
end
end
end
end
|
lib/coder_ring/gen_ring.ex
| 0.789396 | 0.686908 |
gen_ring.ex
|
starcoder
|
defmodule AWS.CodeDeploy do
@moduledoc """
AWS CodeDeploy
**Overview**
This reference guide provides descriptions of the AWS CodeDeploy APIs. For
more information about AWS CodeDeploy, see the [AWS CodeDeploy User
Guide](http://docs.aws.amazon.com/codedeploy/latest/userguide).
**Using the APIs**
You can use the AWS CodeDeploy APIs to work with the following:
<ul> <li> Applications are unique identifiers used by AWS CodeDeploy to
ensure the correct combinations of revisions, deployment configurations,
and deployment groups are being referenced during deployments.
You can use the AWS CodeDeploy APIs to create, delete, get, list, and
update applications.
</li> <li> Deployment configurations are sets of deployment rules and
success and failure conditions used by AWS CodeDeploy during deployments.
You can use the AWS CodeDeploy APIs to create, delete, get, and list
deployment configurations.
</li> <li> Deployment groups are groups of instances to which application
revisions can be deployed.
You can use the AWS CodeDeploy APIs to create, delete, get, list, and
update deployment groups.
</li> <li> Instances represent Amazon EC2 instances to which application
revisions are deployed. Instances are identified by their Amazon EC2 tags
or Auto Scaling group names. Instances belong to deployment groups.
You can use the AWS CodeDeploy APIs to get and list instance.
</li> <li> Deployments represent the process of deploying revisions to
instances.
You can use the AWS CodeDeploy APIs to create, get, list, and stop
deployments.
</li> <li> Application revisions are archive files stored in Amazon S3
buckets or GitHub repositories. These revisions contain source content
(such as source code, web pages, executable files, and deployment scripts)
along with an application specification (AppSpec) file. (The AppSpec file
is unique to AWS CodeDeploy; it defines the deployment actions you want AWS
CodeDeploy to execute.) For application revisions stored in Amazon S3
buckets, an application revision is uniquely identified by its Amazon S3
object key and its ETag, version, or both. For application revisions stored
in GitHub repositories, an application revision is uniquely identified by
its repository name and commit ID. Application revisions are deployed
through deployment groups.
You can use the AWS CodeDeploy APIs to get, list, and register application
revisions.
</li> </ul>
"""
@doc """
Adds tags to on-premises instances.
"""
def add_tags_to_on_premises_instances(client, input, options \\ []) do
request(client, "AddTagsToOnPremisesInstances", input, options)
end
@doc """
Gets information about one or more application revisions.
"""
def batch_get_application_revisions(client, input, options \\ []) do
request(client, "BatchGetApplicationRevisions", input, options)
end
@doc """
Gets information about one or more applications.
"""
def batch_get_applications(client, input, options \\ []) do
request(client, "BatchGetApplications", input, options)
end
@doc """
Gets information about one or more deployment groups.
"""
def batch_get_deployment_groups(client, input, options \\ []) do
request(client, "BatchGetDeploymentGroups", input, options)
end
@doc """
Gets information about one or more instance that are part of a deployment
group.
"""
def batch_get_deployment_instances(client, input, options \\ []) do
request(client, "BatchGetDeploymentInstances", input, options)
end
@doc """
Gets information about one or more deployments.
"""
def batch_get_deployments(client, input, options \\ []) do
request(client, "BatchGetDeployments", input, options)
end
@doc """
Gets information about one or more on-premises instances.
"""
def batch_get_on_premises_instances(client, input, options \\ []) do
request(client, "BatchGetOnPremisesInstances", input, options)
end
@doc """
Starts the process of rerouting traffic from instances in the original
environment to instances in thereplacement environment without waiting for
a specified wait time to elapse. (Traffic rerouting, which is achieved by
registering instances in the replacement environment with the load
balancer, can start as soon as all instances have a status of Ready.)
"""
def continue_deployment(client, input, options \\ []) do
request(client, "ContinueDeployment", input, options)
end
@doc """
Creates an application.
"""
def create_application(client, input, options \\ []) do
request(client, "CreateApplication", input, options)
end
@doc """
Deploys an application revision through the specified deployment group.
"""
def create_deployment(client, input, options \\ []) do
request(client, "CreateDeployment", input, options)
end
@doc """
Creates a deployment configuration.
"""
def create_deployment_config(client, input, options \\ []) do
request(client, "CreateDeploymentConfig", input, options)
end
@doc """
Creates a deployment group to which application revisions will be deployed.
"""
def create_deployment_group(client, input, options \\ []) do
request(client, "CreateDeploymentGroup", input, options)
end
@doc """
Deletes an application.
"""
def delete_application(client, input, options \\ []) do
request(client, "DeleteApplication", input, options)
end
@doc """
Deletes a deployment configuration.
<note> A deployment configuration cannot be deleted if it is currently in
use. Predefined configurations cannot be deleted.
</note>
"""
def delete_deployment_config(client, input, options \\ []) do
request(client, "DeleteDeploymentConfig", input, options)
end
@doc """
Deletes a deployment group.
"""
def delete_deployment_group(client, input, options \\ []) do
request(client, "DeleteDeploymentGroup", input, options)
end
@doc """
Deregisters an on-premises instance.
"""
def deregister_on_premises_instance(client, input, options \\ []) do
request(client, "DeregisterOnPremisesInstance", input, options)
end
@doc """
Gets information about an application.
"""
def get_application(client, input, options \\ []) do
request(client, "GetApplication", input, options)
end
@doc """
Gets information about an application revision.
"""
def get_application_revision(client, input, options \\ []) do
request(client, "GetApplicationRevision", input, options)
end
@doc """
Gets information about a deployment.
"""
def get_deployment(client, input, options \\ []) do
request(client, "GetDeployment", input, options)
end
@doc """
Gets information about a deployment configuration.
"""
def get_deployment_config(client, input, options \\ []) do
request(client, "GetDeploymentConfig", input, options)
end
@doc """
Gets information about a deployment group.
"""
def get_deployment_group(client, input, options \\ []) do
request(client, "GetDeploymentGroup", input, options)
end
@doc """
Gets information about an instance as part of a deployment.
"""
def get_deployment_instance(client, input, options \\ []) do
request(client, "GetDeploymentInstance", input, options)
end
@doc """
Gets information about an on-premises instance.
"""
def get_on_premises_instance(client, input, options \\ []) do
request(client, "GetOnPremisesInstance", input, options)
end
@doc """
Lists information about revisions for an application.
"""
def list_application_revisions(client, input, options \\ []) do
request(client, "ListApplicationRevisions", input, options)
end
@doc """
Lists the applications registered with the applicable IAM user or AWS
account.
"""
def list_applications(client, input, options \\ []) do
request(client, "ListApplications", input, options)
end
@doc """
Lists the deployment configurations with the applicable IAM user or AWS
account.
"""
def list_deployment_configs(client, input, options \\ []) do
request(client, "ListDeploymentConfigs", input, options)
end
@doc """
Lists the deployment groups for an application registered with the
applicable IAM user or AWS account.
"""
def list_deployment_groups(client, input, options \\ []) do
request(client, "ListDeploymentGroups", input, options)
end
@doc """
Lists the instance for a deployment associated with the applicable IAM user
or AWS account.
"""
def list_deployment_instances(client, input, options \\ []) do
request(client, "ListDeploymentInstances", input, options)
end
@doc """
Lists the deployments in a deployment group for an application registered
with the applicable IAM user or AWS account.
"""
def list_deployments(client, input, options \\ []) do
request(client, "ListDeployments", input, options)
end
@doc """
Gets a list of names for one or more on-premises instances.
Unless otherwise specified, both registered and deregistered on-premises
instance names will be listed. To list only registered or deregistered
on-premises instance names, use the registration status parameter.
"""
def list_on_premises_instances(client, input, options \\ []) do
request(client, "ListOnPremisesInstances", input, options)
end
@doc """
Registers with AWS CodeDeploy a revision for the specified application.
"""
def register_application_revision(client, input, options \\ []) do
request(client, "RegisterApplicationRevision", input, options)
end
@doc """
Registers an on-premises instance.
<note> Only one IAM ARN (an IAM session ARN or IAM user ARN) is supported
in the request. You cannot use both.
</note>
"""
def register_on_premises_instance(client, input, options \\ []) do
request(client, "RegisterOnPremisesInstance", input, options)
end
@doc """
Removes one or more tags from one or more on-premises instances.
"""
def remove_tags_from_on_premises_instances(client, input, options \\ []) do
request(client, "RemoveTagsFromOnPremisesInstances", input, options)
end
@doc """
In a blue/green deployment, overrides any specified wait time and starts
terminating instances immediately after the traffic routing is completed.
"""
def skip_wait_time_for_instance_termination(client, input, options \\ []) do
request(client, "SkipWaitTimeForInstanceTermination", input, options)
end
@doc """
Attempts to stop an ongoing deployment.
"""
def stop_deployment(client, input, options \\ []) do
request(client, "StopDeployment", input, options)
end
@doc """
Changes the name of an application.
"""
def update_application(client, input, options \\ []) do
request(client, "UpdateApplication", input, options)
end
@doc """
Changes information about a deployment group.
"""
def update_deployment_group(client, input, options \\ []) do
request(client, "UpdateDeploymentGroup", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "codedeploy"}
host = get_host("codedeploy", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "CodeDeploy_20141006.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/code_deploy.ex
| 0.802594 | 0.44571 |
code_deploy.ex
|
starcoder
|
defmodule Cldr.Print do
@moduledoc """
Implements `printf/3`, `sprintf/3` and `lprintf/3` in a manner
largely compatible with the standard `C` language implementations.
"""
alias Cldr.Print.Parser
import Cldr.Print.Splice
@doc """
Formats and prints its arguments under control of a format.
The format is a character string which contains two types of objects:
plain characters, which are simply copied to standard output and format
specifications, each of which causes printing of the next successive argument.
## Arguments
* `format` is a format string. Information on the definition of a
format string is below.
* `args` is a list of arguments that are formatted according to
the directives in the format string. The number of `args` in the list
must be at least equal to the number of format specifiers in the format
string.
* `options` is a keyword list defining how the number is to be formatted. The
valid options are:
## Options
* `backend` is any `Cldr` backend. That is, any module that
contains `use Cldr`. The default is the included `Cldr.Print.Backend`
which is configured with only the locale `en`.
* `:rounding_mode`: determines how a number is rounded to meet the precision
of the format requested. The available rounding modes are `:down`,
:half_up, :half_even, :ceiling, :floor, :half_down, :up. The default is
`:half_even`.
* `:number_system`: determines which of the number systems for a locale
should be used to define the separators and digits for the formatted
number. If `number_system` is an `atom` then `number_system` is
interpreted as a number system. See
`Cldr.Number.System.number_systems_for/2`. If the `:number_system` is
`binary` then it is interpreted as a number system name. See
`Cldr.Number.System.number_system_names_for/2`. The default is `:default`.
* `:locale`: determines the locale in which the number is formatted. See
`Cldr.known_locale_names/0`. The default is`Cldr.get_locale/0` which is the
locale currently in affect for this `Process` and which is set by
`Cldr.put_locale/1`.
* `:device` which is used to define the output device for `printf/3`. The default is
`:stdout`.
## Returns
* `:ok` on success
* `{:error, {exception, reason}}` if an error is detected
## Format definition
Each format specification is introduced by the percent character (`%`).
The remainder of the format specification includes, in the following order:
* Optional format flags
* Optional field width
* Optional precision
* Required format type
The can be represented as:
```
%[flags][width][.precision]format_type
```
## Format flags
Zero or more of the following flags:
| Flag | Description |
| ----- | -------------------------------------------------------------------------------|
| # | A `#` character specifying that the value should be printed in an alternate form. For `b`, `c`, `d`, `s` and `u` formats, this option has no effect. For the `o` formats the precision of the number is increased to force the first character of the output string to a zero. For the `x` (`X`) format, a non-zero result has the string `0x` (`0X`) prepended to it. For `a`, `A`, `e`, `E`, `f`, `F`, `g` and `G` formats, the result will always contain a decimal point, even if no digits follow the point (normally, a decimal point only appears in the results of those formats if a digit follows the decimal point). For `g` and `G` formats, trailing zeros are not removed from the result as they would otherwise be. |
| - | A minus sign `-' which specifies left adjustment of the output in the indicated field. |
| + | A `+` character specifying that there should always be a sign placed before the number when using signed formats. |
| space | A space character specifying that a blank should be left before a positive number for a signed format. A `+` overrides a space if both are used. |
| 0 | A zero `0` character indicating that zero-padding should be used rather than blank-padding. A `-` overrides a `0` if both are used. |
| ' | Formats a number with digit grouping applied. The group size and grouping character are determined based upon the current processes locale or the `:locale` option to `printf/3` if provided. |
| I | Formats a number using the native number system digits of the current processes locale or the `:locale` option to `printf/3` if provided. The option `:number_system` if provided takes precedence over this flag. |
## Field Width
An optional digit string specifying a field width; if the output string has fewer bytes than the field
width it will be blank-padded on the left (or right, if the left-adjustment indicator has been given)
to make up the field width (note that a leading zero is a flag, but an embedded zero is part of a
field width).
## Precision
An optional period, `.`, followed by an optional digit string giving a precision which specifies the
number of digits to appear after the decimal point, for `e` and `f` formats, or the maximum number of
graphemes to be printed from a string. If the digit string is missing, the precision is treated as zero.
## Format Type
A character which indicates the type of format to use (one of `diouxXfFeEgGaAs`). The uppercase
formats differ from their lowercase counterparts only in that the output of the former is entirely in
uppercase.
| Format | Description |
| ------ | -------------------------------------------------------------------------------|
| diouXx | The argument is printed as a signed decimal (d or i), unsigned octal, unsigned decimal, or unsigned hexadecimal (X or x), respectively. |
| fF | The argument is printed in the style `[-]ddd.ddd` where the number of d's after the decimal point is equal to the precision specification for the argument. If the precision is missing, 6 digits are given; if the precision is explicitly 0, no digits and no decimal point are printed. The values infinity and NaN are printed as `inf' and `nan', respectively. |
| eE | The argument is printed in the style e `[-d.ddd+-dd]` where there is one digit before the decimal point and the number after is equal to the precision specification for the argument; when the precision is missing, 6 digits are produced. The values infinity and NaN are printed as `inf` and `nan`, respectively. |
| gG | The argument is printed in style f or e (or in style E for a G format code), with the precision specifying the number of significant digits. The style used depends on the value converted: style e will be used only if the exponent resulting from the conversion is less than -4 or greater than the precision. Trailing zeroes are removed from the result; a decimal point appears only if it is followed by a digit. |
| aA | The argument is printed in style `[-h.hhh+-pd]` where there is one digit before the hexadecimal point and the number after is equal to the precision specification for the argument; when the precision is missing, enough digits are produced to convey the argument's exact double-precision floating-point representation. The values infinity and NaN are printed as `inf` and `nan`, respectively. |
| s | Graphemes from the string argument are printed until the end is reached or until the number of graphemes indicated by the precision specification is reached; however if the precision is 0 or missing, the string is printed entirely. |
| % | Print a `%`; no argument is used. |
## Notes
* The grouping separator, decimal point and exponent characters are defined in the current
processes locale or as specified in the `:locale` option to `printf/3`.
* In no case does a non-existent or small field width cause truncation of a field; padding
takes place only if the specified field width exceeds the actual width.
* `printf/3` calls `IO.write/2` and therefore there are no control characters emitted
unless provided in the format string. This is consistent with the `C` implementation
but different from `IO.puts/2`.
"""
def printf(format, args, options \\ []) do
{device, options} = Keyword.pop(options, :device, :stdio)
with {:ok, io_list} <- lprintf(format, args, options) do
IO.write(device, io_list)
end
end
@doc """
Returns a `{:ok, string}` after applying a format to a list of arguments.
The arguments and options are the same as those for `printf/3`
"""
def sprintf(format, args, options \\ []) do
with {:ok, io_list} <- lprintf(format, args, options) do
{:ok, IO.iodata_to_binary(io_list)}
end
end
@doc """
Returns a `string` or raises after applying a format to
a list of arguments.
The arguments and options are the same as those for `printf/3`
"""
def sprintf!(format, args, options \\ []) do
case sprintf(format, args, options) do
{:ok, string} -> string
{:error, {exception, reason}} -> raise exception, reason
end
end
@doc """
Returns an `{:ok, io_list}` after applying a format to a list of arguments.
The arguments and options are the same as those for `printf/3`
"""
def lprintf(format, args, options \\ [])
def lprintf(format, args, options) when is_list(args) do
with {:ok, tokens} <- Parser.parse(format),
{:ok, io_list} <- splice_arguments(tokens, args, options, &format/2) do
{:ok, Enum.reverse(io_list)}
end
end
def lprintf(format, arg, options) do
lprintf(format, [arg], options)
end
@doc """
Returns an `io_list` or raises after applying a format to
a list of arguments.
The arguments and options are the same as those for `printf/3`
"""
def lprintf!(format, args, options \\ []) do
case lprintf(format, args, options) do
{:ok, string} -> string
{:error, {exception, reason}} -> raise exception, reason
end
end
@doc false
defmacro mprintf(format, args, options \\ []) do
args = if is_list(args), do: args, else: [args]
with {:ok, tokens} <- Parser.parse(format),
{:ok, io_list} <- splice_arguments(tokens, args, options, &identity/2) do
quote do
IO.write Keyword.get(unquote(options), :device, :stdout),
IO.iodata_to_binary(format_list(unquote(Enum.reverse(io_list))))
end
end
end
@doc false
defmacro msprintf(format, args, options \\ []) do
args = if is_list(args), do: args, else: [args]
with {:ok, tokens} <- Parser.parse(format),
{:ok, io_list} <- splice_arguments(tokens, args, options, &identity/2) do
quote do
{:ok, format_list(unquote(Enum.reverse(io_list))) |> IO.iodata_to_binary}
end
end
end
@doc false
defmacro msprintf!(format, args, options \\ []) do
args = if is_list(args), do: args, else: [args]
with {:ok, tokens} <- Parser.parse(format),
{:ok, io_list} <- splice_arguments(tokens, args, options, &identity/2) do
quote do
format_list(unquote(Enum.reverse(io_list))) |> IO.iodata_to_binary
end
end
end
@doc false
defmacro mlprintf(format, args, options \\ []) do
args = if is_list(args), do: args, else: [args]
with {:ok, tokens} <- Parser.parse(format),
{:ok, io_list} <- splice_arguments(tokens, args, options, &identity/2) do
quote do
{:ok, format_list(unquote(Enum.reverse(io_list)))}
end
end
end
@doc false
defmacro mlprintf!(format, args, options \\ []) do
args = if is_list(args), do: args, else: [args]
with {:ok, tokens} <- Parser.parse(format),
{:ok, io_list} <- splice_arguments(tokens, args, options, &identity/2) do
quote do
format_list(unquote(Enum.reverse(io_list)))
end
end
end
end
|
lib/cldr_print.ex
| 0.885928 | 0.948632 |
cldr_print.ex
|
starcoder
|
defmodule CssColors.RGB do
@moduledoc false
defstruct [
red: 0.0, # 0-255
green: 0.0, # 0-255
blue: 0.0, # 0-255
alpha: 1.0 # 0-1
]
def rgb(red, green, blue, alpha\\1.0)
def rgb({red, :percent}, {green, :percent}, {blue, :percent}, alpha) do
rgb(red * 255, green * 255, blue * 255, alpha)
end
def rgb(red, green, blue, alpha) do
%__MODULE__{
red: cast(red, :red),
green: cast(green, :green),
blue: cast(blue, :blue),
alpha: cast(alpha, :alpha)
}
end
def to_string(struct, type\\nil)
def to_string(struct, :nil) do
type = case struct.alpha do
1.0 -> :hex
_ -> :rgba
end
to_string(struct, type)
end
def to_string(%__MODULE__{red: r, green: g, blue: b, alpha: alpha}, :rgba) do
"rgba(#{round(r)}, #{round(g)}, #{round(b)}, #{alpha})"
end
def to_string(%__MODULE__{red: r, green: g, blue: b, alpha: 1.0}, :hex) do
"#" <> to_hex(r) <> to_hex(g) <> to_hex(b)
end
def cast(value, field) when field in [:red, :green, :blue] do
value/1
|> min(255.0)
|> max(0.0)
end
def cast(value, :alpha) do
value/1
|> min(1.0)
|> max(0.0)
end
defp to_hex(value) when is_float(value), do:
to_hex(round(value))
defp to_hex(value) when value < 16, do:
"0" <> Integer.to_string(value, 16)
defp to_hex(value) when is_integer(value), do:
Integer.to_string(value, 16)
def to_hsl(%__MODULE__{red: r, green: g, blue: b}) do
r = r/255
g = g/255
b = b/255
colors = [r, g, b]
max_color = Enum.max colors
min_color = Enum.min colors
h = s = l = (max_color + min_color) / 2;
if max_color == min_color do
{0.0, 0.0, l}
else
color_diff = max_color - min_color
s = if l > 0.5,
do: color_diff / (2 - max_color - min_color),
else: color_diff / (max_color + min_color)
h = case max_color do
^r when g < b -> (g - b) / color_diff + 6
^r -> (g - b) / color_diff
^g -> (b - r) / color_diff + 2
^b -> (r - g) / color_diff + 4
end
h = h / 6
{h * 360, s, l}
end
end
end
defimpl String.Chars, for: CssColors.RGB do
def to_string(struct) do
CssColors.RGB.to_string(struct)
end
end
|
lib/CssColors/rgb.ex
| 0.772788 | 0.510374 |
rgb.ex
|
starcoder
|
defmodule ExSlackBot.TravisCIBot do
@moduldoc ~s"""
`TravisCIBot` is a generic bot to trigger builds on Travis CI using [the REST API](https://docs.travis-ci.com/user/triggering-builds).
This bot responds to commands as messages or as comments on a file snippet. If sending the bot a message by mention or directly, the line should take the following shape:
travis trigger repo=<org>/<repo> [branch=master]
The above text can be put into the comment section of a file snippet share, where the content of the file is a JSON document that will be sent to the Travis CI REST API. An example bit of JSON to set an environment variable might be:
```
{
"request": {
"branch": "master",
"config": {
"env": {
"global": ["OVERRIDE=true"]
}
}
}
}
```
"""
use ExSlackBot, :travis
def init([]) do
token = System.get_env("TRAVIS_TOKEN") || ""
{:ok, %{token: token}}
end
@doc ~s"""
The `trigger` function will invoke the Travis CI REST API for the given repository (passed by setting the attribute `repo=`).
"""
def trigger(%{repo: repo} = args, %{token: token} = state) do
repo_name = String.replace repo, "/", "%2F"
url = "https://api.travis-ci.org/repo/#{repo_name}/requests"
# Get the branch name from the attributes sent with the command or default to `master`
branch = case args do
%{branch: branch} -> branch
_ -> "master"
end
# Get the basic JSON to send to Travis. If not specified via snippet, pass the branch name.
json = case args do
%{file: content} -> content
_ -> "{\"request\":{\"branch\":\"#{branch}\"}}"
end
Logger.info "Travis CI: Triggering build on repo #{repo}@#{branch}"
Logger.debug "Travis CI: Sending JSON #{json}"
# Invoke the REST API with a POST
{color, text} = case HTTPoison.post! url, json, [
"Content-Type": "application/json",
"Accept": "application/json",
"Travis-API-Version": "3",
"Authorization": "token #{token}"
] do
%HTTPoison.Response{body: body, status_code: status} when status < 300 ->
# Post to Slack indicating success
{"good", "```#{body}```"}
resp ->
# Post to Slack indicating failure
{"danger", "```#{inspect(resp, pretty: true)}```"}
end
{:reply, %{summary: "Triggered #{repo}", color: color, text: text}, state}
end
end
|
lib/exslackbot/traviscibot.ex
| 0.785884 | 0.882529 |
traviscibot.ex
|
starcoder
|
defmodule Iyzico.Payment do
@moduledoc """
A module representing information for successful payment returned from the platform.
"""
@enforce_keys ~w(basket_id bin_id card_ref conversation_id currency
fraud_status installment transactions commission_fee commission_amount paid_price id
last_four_digits merchant_commission_rate merchant_commission_amount price)a
defstruct [
:basket_id,
:bin_id,
:card_ref,
:conversation_id,
:currency,
:fraud_status,
:installment,
:transactions,
:commission_fee,
:commission_amount,
:paid_price,
:id,
:last_four_digits,
:merchant_commission_rate,
:merchant_commission_amount,
:price
]
@typedoc """
Currency of the payment. Currently only Turkish Lira (`:try`) is supported.
"""
@type currency :: :try
@typedoc """
Fraud status of the payment.
If fraud checking authority is performing a wait in operation, the result is `:awaiting`.
A suspicious operation will be flagged by the authority as `:restrict`.
A merchant should proceed to the transaction if and only if value is `:ok`.
"""
@type fraud_status :: :restrict | :awaiting | :ok
@typedoc """
A struct representing processed payment.
## Fields
- `:basket_id`: Unique identifier of the basket.
- `:bin_id`: Unique identifier of the current bin.
- `:card_ref`: Card persistence reference if card is persisted.
- `:conversation_id`: Unique identifier of the conversation.
- `:currency`: Active currency of the transaction.
- `:installment`: The number of applied installments.
- `:transactions`: The transactions of subsequent items in the payment.
- `:commission_fee`: Commission fee applied to this particular transaction group (payment).
- `:commission_amount`: Commission amount applied to the total of transactions.
- `:last_four_digits`: Last four digits of the payment card provided.
- `:merchant_commission_rate`: Merchant commission rate of the payment.
- `:merchant_commission_amount`: Merchant commission amount of the payment.
- `:paid_price`: Transaction value.
- `:price`: Transaction base value.
- `:id`: Payment identifier.
"""
@type t :: %__MODULE__{
basket_id: binary,
bin_id: binary,
card_ref: Iyzico.CardReference.t,
conversation_id: binary,
currency: currency,
fraud_status: fraud_status,
installment: integer,
transactions: list,
commission_fee: number,
commission_amount: number,
last_four_digits: binary,
merchant_commission_rate: number,
merchant_commission_amount: number,
paid_price: number,
price: number,
id: binary
}
@doc """
Converts a given integer representation of a fraud status of a payment to respective type.
## Discussion
A merchant should only proceed to the transaction if and only if the fraud status is `:ok`.
A payments fraud status would be `:awaiting` if transaction safety is being processed in present.
Transactions with fraud status with `:restrict` value should be avoided.
## Examples
iex> Iyzico.Payment.to_fraud_status -1
:restrict
iex> Iyzico.Payment.to_fraud_status 0
:awaiting
iex> Iyzico.Payment.to_fraud_status 1
:ok
"""
@spec to_fraud_status(integer) :: fraud_status
def to_fraud_status(status) do
case status do
-1 ->
:restrict
0 ->
:awaiting
1 ->
:ok
end
end
end
|
lib/model/payment.ex
| 0.848612 | 0.625552 |
payment.ex
|
starcoder
|
defmodule LoggerMulticastBackend do
@moduledoc """
A backend for `Logger` that delivers messages over multicast UDP.
Designed for headless embedded applications, it allows watching the log over the local network.
## Easy Defaults
In your logger config, simply do something like this:
```elixir
config :logger,
backends: [ :console, LoggerMulticastBackend ]
```
or, at runtime, you can add this to your current config...
```elixir
Logger.add_backend LoggerMulticastBackend
```
Now, you'll have logging messages sent out on the default target multicast address, which is 192.168.127.12:9999.
## Custom Configuration
Don't like the default multicast target or format? Change it by including options like this:
```elixir
config :logger, :logger_multicast_backend,
target: {{224,1,22,223}, 4252},
format: "$time $metadata[$level] $message\\n",
level: :info
```
The full range of custom configuration options in the tuple are as follows:
- __target__ - a tuple of the target unicast or multicast address and port, like {{241,0,0,3}, 52209}
- __level__ - the level to be logged by this backend. Note that messages are first filtered by the general level configuration in :logger
- __format__ - the format message used to print logs.
Defaults to: ``"$time $metadata[$level] $levelpad$message\n"``
- __metadata__ - the metadata to be printed by $metadata.
Defaults to an empty list (no metadata)
"""
use GenEvent
require Logger
# @type level :: Logger.level
# @type format :: String.t
# @type metadata :: [atom]
@default_target {{224,0,0,224}, 9999}
@default_format "$time $metadata[$level] $message\n"
@doc false
def init({__MODULE__, opts}) when is_list(opts) do
env = Application.get_env(:logger, :logger_multicast_backend, [])
opts = Keyword.merge(env, opts)
Application.put_env(:logger, :logger_multicast_backend, opts)
level = Keyword.get(opts, :level)
metadata = Keyword.get(opts, :metadata, [])
format_opts = Keyword.get(opts, :format, @default_format)
format = Logger.Formatter.compile(format_opts)
target = Keyword.get(opts, :target, @default_target)
Logger.debug "starting multicast backend on target #{inspect target}"
{:ok, sender} = GenServer.start_link(LoggerMulticastSender, target)
state = %{sender: sender, level: level, format: format, metadata: metadata}
{:ok, state}
end
def init(__MODULE__), do: init({__MODULE__, []})
@doc false
def handle_event({level, _gl, {Logger, message, timestamp, metadata}}, %{level: min_level} = state) do
if is_nil(min_level) or Logger.compare_levels(level, min_level) != :lt do
entry = format_event(level, message, timestamp, metadata, state)
:ok = GenServer.cast(state.sender, {:add_entry, entry})
end
{:ok, state}
end
defp format_event(level, msg, ts, md, %{format: format, metadata: metadata}) do
Logger.Formatter.format(format, level, msg, ts, Keyword.take(md, metadata))
end
end
|
lib/logger_multicast_backend.ex
| 0.818592 | 0.651743 |
logger_multicast_backend.ex
|
starcoder
|
defmodule Mix.Tasks.Plumbapius.GetDocs do
@moduledoc """
Clones and updates git repo with apib docs
#Usage
```
mix plumbapius.get_docs -c ssh://[email protected]/gc/ghetto-auth-apib.git -d ./path/to/put/repo -b branch-name
```
"""
@shortdoc "Clones and updates git repo with apib docs"
use Mix.Task
require Logger
@default_apib_workdir ".apib"
@default_branch "master"
@impl Mix.Task
def run(argv, update_repo \\ &update_repo/3, update_gitignore \\ &update_gitignore/1, halt \\ &System.halt/1) do
case params() |> Optimus.parse!(argv, halt) do
%{options: options} ->
update_repo.(options.git_clone_uri, options.local_stock_folder, options.branch)
update_gitignore.(options.local_stock_folder)
error ->
error
end
end
defp update_repo(uri, local_folder, branch) do
unless File.exists?(local_folder) do
clone_repo(uri, local_folder, branch)
else
update_repo(local_folder, branch)
end
end
defp update_repo(local_git_folder, branch) do
Logger.info("Updating #{local_git_folder} repository with branch #{branch}")
with {_, 0} <- System.cmd("git", ["-C", local_git_folder, "fetch", "origin", branch]),
{_, 0} <-
System.cmd("git", ["-C", local_git_folder, "reset", "--hard", "origin/#{branch}"]),
{_, 0} <- System.cmd("git", ["-C", local_git_folder, "clean", "-ffdx"]) do
Logger.info("Repository has been updated successfully")
else
error ->
raise RuntimeError, inspect(error)
end
end
defp clone_repo(git_uri, local_folder, branch) do
Logger.info("Cloning #{git_uri} repository into #{local_folder} with branch #{branch}")
with {_, 0} <- System.cmd("git", ["clone", git_uri, local_folder]),
{_, 0} <- System.cmd("git", ["-C", local_folder, "checkout", branch]) do
Logger.info("Repository has been cloned successfully")
else
error ->
raise RuntimeError, inspect(error)
end
end
defp update_gitignore(local_stock_folder) do
unless File.stream!(".gitignore")
|> Enum.any?(&String.starts_with?(&1, local_stock_folder)) do
Logger.info("Updating .gitignore file")
{:ok, file} = File.open(".gitignore", [:append])
IO.binwrite(file, local_stock_folder <> "\n")
File.close(file)
Logger.info(".gitignore file has been updated successfully")
end
end
defp params do
Optimus.new!(
name: "get_docs",
description: "Git repositories assistant",
version: "0.1.0",
author: "Funbox",
about: "Utility for downloading and updating apib repository",
allow_unknown_args: false,
parse_double_dash: true,
options: [
git_clone_uri: [
value_name: "GIT_CLONE_URI",
short: "-c",
long: "--clone",
help: "Clone URI of apib repository",
required: true
],
local_stock_folder: [
value_name: "LOCAL_STOCK_DIRECTORY",
short: "-d",
long: "--directory",
help: "Local directory to stock apib repository",
required: false,
default: @default_apib_workdir
],
branch: [
value_name: "BRANCH",
short: "-b",
long: "--branch",
help: "Required branch in apib repository",
required: false,
default: @default_branch
]
]
)
end
end
|
lib/mix/get_docs.ex
| 0.602296 | 0.596609 |
get_docs.ex
|
starcoder
|
defmodule Trento.AggregateCase do
@moduledoc """
This module defines the test case to be used by aggregate tests.
Derived from Commanded.AggregateCase
"""
use ExUnit.CaseTemplate
alias Commanded.Aggregate.Multi
# credo:disable-for-this-file
using opts do
quote do
@aggregate Keyword.fetch!(unquote(opts), :aggregate)
# Assert that the expected events are returned when the given commands have been executed
defp assert_events(commands, expected_events) do
assert_events([], commands, expected_events)
end
defp assert_events(initial_events, commands, expected_events) do
assert {:ok, _state, events} = aggregate_run(initial_events, commands)
actual_events = List.wrap(events)
expected_events = List.wrap(expected_events)
assert actual_events == expected_events
end
# Assert that the aggregate will have the expected_state after the given commands have been executed
defp assert_state(commands, expected_state) do
assert_state([], commands, expected_state)
end
defp assert_state(
initial_events,
commands,
fun
)
when is_function(fun, 1) do
assert {:ok, state, _} = aggregate_run(initial_events, commands)
fun.(state)
end
defp assert_state(initial_events, commands, expected_state) do
assert {:ok, state, events} = aggregate_run(initial_events, commands)
assert state == expected_state
end
defp assert_events_and_state(
initial_events,
commands,
expected_events,
expected_state
) do
assert_events(initial_events, commands, expected_events)
assert_state(initial_events, commands, expected_state)
end
defp assert_error(commands, expected_error) do
assert_error([], commands, expected_error)
end
defp assert_error(initial_events, commands, expected_error) do
assert ^expected_error = aggregate_run(initial_events, commands)
end
# Apply the given commands to the aggregate hydrated with the given initial_events
defp aggregate_run(initial_events, commands) do
@aggregate
|> struct()
|> evolve(initial_events)
|> execute(commands)
end
# Execute one or more commands against an aggregate.
defp execute(state, commands) do
try do
{state, events} =
commands
|> List.wrap()
|> Enum.reduce({state, []}, fn command, {state, events} ->
case @aggregate.execute(state, command) do
{:error, _error} = error ->
throw(error)
%Multi{} = multi ->
case Multi.run(multi) do
{:error, _reason} = error ->
throw(error)
{state, new_events} ->
{state, events ++ new_events}
end
none when none in [:ok, nil, []] ->
{state, events}
{:ok, new_events} ->
{evolve(state, new_events), events ++ List.wrap(new_events)}
new_events when is_list(new_events) ->
{evolve(state, new_events), events ++ new_events}
new_event when is_map(new_event) ->
{evolve(state, new_event), events ++ [new_event]}
invalid ->
flunk("unexpected: " <> inspect(invalid))
end
end)
{:ok, state, events}
catch
{:error, _error} = reply -> reply
end
end
# Apply the given events to the aggregate state
defp evolve(state, events) do
events
|> List.wrap()
|> Enum.reduce(state, &@aggregate.apply(&2, &1))
end
end
end
end
|
test/support/aggregate_case.ex
| 0.805785 | 0.749958 |
aggregate_case.ex
|
starcoder
|
defmodule BSV.Contract.OpCodeHelpers do
@moduledoc """
Helper module for using Op Codes in `BSV.Contract` modules.
All known Op Codes are available as a function which simply pushes the Op Code
word onto the Contract Script. Refer to `BSV.VM` for descriptions of each
Op Code.
In addition, `op_if/2` and `op_if/3` provide a more syntactically pleasing way
of handling the flow controw operations by passing `handle_if` and `handle_else`
callback functions.
op_if(contract, &op_1add/1, &op_1sub/1)
# Equivalent to...
contract
|> op_if()
|> op_1add()
|> op_else()
|> op_1sub()
|> op_endif()
*The same applies to `op_notif/2` and `op_notif/3`*.
"""
alias BSV.{Contract, OpCode}
# Iterrates over all opcodes
# Defines a function to push the specified opcode onto the contract script
Enum.each(OpCode.all(), fn {op, _} ->
key = op
|> Atom.to_string()
|> String.downcase()
|> String.to_atom()
@doc "Pushes the `#{op}` word onto the script."
@spec unquote(key)(Contract.t()) :: Contract.t()
def unquote(key)(%Contract{} = contract) do
Contract.script_push(contract, unquote(op))
end
end)
@doc """
Wraps the given `handle_if` function with `OP_IF` and `OP_ENDIF` script words.
"""
@spec op_if(Contract.t(), (Contract.t() -> Contract.t())) :: Contract.t()
def op_if(%Contract{} = contract, handle_if) when is_function(handle_if) do
contract
|> op_if()
|> handle_if.()
|> op_endif()
end
@doc """
Wraps the given `handle_if` and `handle_else` functions with `OP_IF`,
`OP_ELSE` and `OP_ENDIF` script words.
"""
@spec op_if(
Contract.t(),
(Contract.t() -> Contract.t()),
(Contract.t() -> Contract.t())
) :: Contract.t()
def op_if(%Contract{} = contract, handle_if, handle_else)
when is_function(handle_if) and is_function(handle_else)
do
contract
|> op_if()
|> handle_if.()
|> op_else()
|> handle_else.()
|> op_endif()
end
@doc """
Wraps the given `handle_if` function with `OP_NOTIF` and `OP_ENDIF` script
words.
"""
@spec op_notif(Contract.t(), (Contract.t() -> Contract.t())) :: Contract.t()
def op_notif(%Contract{} = contract, handle_if) when is_function(handle_if) do
contract
|> op_notif()
|> handle_if.()
|> op_endif()
end
@doc """
Wraps the given `handle_if` and `handle_else` functions with `OP_NOTIF`,
`OP_ELSE` and `OP_ENDIF` script words.
"""
@spec op_notif(
Contract.t(),
(Contract.t() -> Contract.t()),
(Contract.t() -> Contract.t())
) :: Contract.t()
def op_notif(%Contract{} = contract, handle_if, handle_else)
when is_function(handle_if) and is_function(handle_else)
do
contract
|> op_notif()
|> handle_if.()
|> op_else()
|> handle_else.()
|> op_endif()
end
end
|
lib/bsv/contract/op_code_helpers.ex
| 0.776708 | 0.526038 |
op_code_helpers.ex
|
starcoder
|
defmodule SudokuSolver.Recursive do
@moduledoc """
Implementes SudokuSolver using recursion
"""
@behaviour SudokuSolver
@doc """
Implements a sudoku solver using recursion
"""
@impl SudokuSolver
@spec solve(SudokuBoard.t()) :: SudokuBoard.t() | nil
def solve(%SudokuBoard{size: size} = board) do
max_index = size * size - 1
solve_helper(board, max_index)
end
# Solves sudoku by starting using backtracing starting at the end of the board
# and moving to the front. solve_helper keeps track of which cell we are currently trying.
@spec solve_helper(SudokuBoard.t(), integer()) :: SudokuBoard.t() | nil
defp solve_helper(%SudokuBoard{} = board, -1) do
if SudokuBoard.solved?(board), do: board, else: nil
end
defp solve_helper(%SudokuBoard{} = board, idx) do
elt = Enum.at(board.grid, idx)
if elt != 0 do
solve_helper(board, idx - 1)
else
try_solve(board, idx, Enum.to_list(1..board.size))
end
end
# try_solve attempts to solve a board by populating a cell from a list of suggestions.
defp try_solve(%SudokuBoard{}, _idx, []), do: nil
defp try_solve(%SudokuBoard{} = board, idx, [suggestion | other_suggestions]) do
new_board = SudokuBoard.place_number(board, idx, suggestion)
if SudokuBoard.partial_solution?(new_board) do
solution = solve_helper(new_board, idx - 1)
if solution == nil do
try_solve(board, idx, other_suggestions)
else
solution
end
else
try_solve(board, idx, other_suggestions)
end
end
@doc """
Finds all possible solutions to a sudoku.
## Parameters
- board: A sudoku board
"""
@impl SudokuSolver
@spec all_solutions(SudokuBoard.t()) :: [SudokuBoard.t()]
def all_solutions(%SudokuBoard{} = board) do
max_index = board.size * board.size - 1
find_all_solutions_helper(board, max_index, [])
end
# Fand all solutions to a sudoku boart starting at the the end of the board
# It uses the acculumator `acc` to track the previously found solutions
defp find_all_solutions_helper(board, -1, acc) do
if SudokuBoard.solved?(board) do
[board | acc]
else
acc
end
end
defp find_all_solutions_helper(%SudokuBoard{} = board, idx, acc) do
elt = Enum.at(board.grid, idx)
if elt != 0 do
find_all_solutions_helper(board, idx - 1, acc)
else
try_find_all_solutions(board, idx, Enum.to_list(1..board.size), acc)
end
end
# try_find_all_solutions attempts to find a solution to a board by populating a cell from
# a list of suggestions. It will exhaust all possible solutions and store the results in the accumulator.
defp try_find_all_solutions(_board, _idx, [], acc), do: acc
defp try_find_all_solutions(%SudokuBoard{} = board, idx, [suggestion | other_suggestions], acc) do
new_board = SudokuBoard.place_number(board, idx, suggestion)
new_acc =
if SudokuBoard.partial_solution?(board) do
find_all_solutions_helper(new_board, idx - 1, acc)
else
acc
end
try_find_all_solutions(board, idx, other_suggestions, new_acc)
end
end
|
lib/sudoku_solver/recursive.ex
| 0.694821 | 0.551393 |
recursive.ex
|
starcoder
|
defmodule Gmail.Label do
@moduledoc"""
Labels are used to categorize messages and threads within the user's mailbox.
"""
alias __MODULE__
import Gmail.Base
@doc """
> Gmail API documentation: https://developers.google.com/gmail/api/v1/reference/users/labels#resource
"""
defstruct id: nil,
name: nil,
message_list_visibility: nil,
label_list_visibility: nil,
type: nil,
messages_total: nil,
messages_unread: nil,
threads_total: nil,
threads_unread: nil
@type t :: %__MODULE__{}
@doc """
Creates a new label.
> Gmail API documentation: https://developers.google.com/gmail/api/v1/reference/users/labels/create
"""
@spec create(String.t, String.t) :: {atom, String.t, String.t, map}
def create(user_id, label_name) do
{:post, base_url(), "users/#{user_id}/labels", %{"name" => label_name}}
end
@doc """
Updates the specified label.
Google API Documentation: https://developers.google.com/gmail/api/v1/reference/users/labels/update
"""
@spec update(String.t, Label.t) :: {atom, String.t, String.t, map}
def update(user_id, %Label{id: id} = label) do
{:put, base_url(), "users/#{user_id}/labels/#{id}", convert_for_update(label)}
end
@doc """
Updates the specified label. This method supports patch semantics.
Google API Documentation: https://developers.google.com/gmail/api/v1/reference/users/labels/patch
"""
@spec patch(String.t, Label.t) :: {atom, String.t, String.t, map}
def patch(user_id, %Label{id: id} = label) do
{:patch, base_url(), "users/#{user_id}/labels/#{id}", convert_for_patch(label)}
end
@doc """
Immediately and permanently deletes the specified label and removes it from any messages and threads that it is applied to.
Google API Documentation: https://developers.google.com/gmail/api/v1/reference/users/labels/delete
"""
@spec delete(String.t, String.t) :: {atom, String.t, String.t}
def delete(user_id, label_id) do
{:delete, base_url(), "users/#{user_id}/labels/#{label_id}"}
end
@doc """
Gets the specified label.
> Gmail API documentation: https://developers.google.com/gmail/api/v1/reference/users/labels/get
"""
@spec get(String.t, String.t) :: {atom, String.t, String.t}
def get(user_id, label_id) do
{:get, base_url(), "users/#{user_id}/labels/#{label_id}"}
end
@doc """
Lists all labels in the user's mailbox.
> Gmail API Documentation: https://developers.google.com/gmail/api/v1/reference/users/labels/list
"""
@spec list(String.t) :: {atom, String.t, String.t}
def list(user_id) do
{:get, base_url(), "users/#{user_id}/labels"}
end
@doc """
Converts a Gmail API label resource into a local struct.
"""
@spec convert(map) :: Label.t
def convert(result) do
Enum.reduce(result, %Label{}, fn({key, value}, label) ->
%{label | (key |> Macro.underscore |> String.to_atom) => value}
end)
end
@doc """
Handles a label resource response from the Gmail API.
"""
@spec handle_label_response({atom, map}) :: {atom, String.t} | {atom, map}
@spec handle_label_response({atom, String.t}) :: {atom, String.t} | {atom, map}
def handle_label_response(response) do
response
|> handle_error
|> case do
{:error, message} ->
{:error, message}
{:ok, %{"error" => details}} ->
{:error, details}
{:ok, raw_label} ->
{:ok, convert(raw_label)}
end
end
@doc """
Handles a label list response from the Gmail API.
"""
@spec handle_label_list_response(atom | {atom, map | String.t}) :: {atom, String.t | map}
def handle_label_list_response(response) do
response
|> handle_error
|> case do
{:error, message} ->
{:error, message}
{:ok, %{"labels" => raw_labels}} ->
{:ok, Enum.map(raw_labels, &convert/1)}
end
end
@doc """
Handles a label delete response from the Gmail API.
"""
@spec handle_label_delete_response(atom | {atom, map | String.t}) :: {atom, String.t} | {atom, map} | atom
def handle_label_delete_response(response) do
response
|> handle_error
|> case do
{:error, message} ->
{:error, message}
{:ok, _} ->
:ok
end
end
@spec convert_for_patch(Label.t) :: map
defp convert_for_patch(label) do
label |> Map.from_struct |> Enum.reduce(%{}, fn({key, value}, map) ->
if value do
{first_letter, rest} = key |> Atom.to_string |> Macro.camelize |> String.split_at(1)
Map.put(map, String.downcase(first_letter) <> rest, value)
else
map
end
end)
end
@spec convert_for_update(Label.t) :: map
defp convert_for_update(%Label{
id: id,
name: name,
label_list_visibility: label_list_visibility,
message_list_visibility: message_list_visibility
}) do
%{
"id" => id,
"name" => name,
"labelListVisibility" => label_list_visibility,
"messageListVisibility" => message_list_visibility
}
end
end
|
lib/gmail/label.ex
| 0.73307 | 0.408454 |
label.ex
|
starcoder
|
defmodule Essence.Readability do
@moduledoc """
The Readbility module contains several methods for
calculating the readability scores of a text.
"""
alias Essence.{Document, Token}
@doc """
The `ari_score` method calculates the Automated Readability Index (ARI)
of a given `Essence.Document`.
## Details
The ARI uses two quantities, mu(w) and mu(s), where
`mu(w)` is average number of letters per word in the given text and `mu(s)`
is the average number of words per sentence in the given text.
The ARI is then defined by the following formula:
`ari = 4.71 * mu(w) + 0.5 * mu(s) - 21.43`
Commonly, the ARI score is rounded up and translated by the following table:
| ARI score | Readability Level | Reader Age |
| --------- | ----------------- | ---------- |
| 1 | Kindergarten | 5-6 |
| 2 | First Grade | 6-7 |
| 3 | Second Grade | 7-8 |
| 4 | Third Grade | 8-9 |
| 5 | Fourth Grade | 9-10 |
| 6 | Fifth Grade | 10-11 |
| 7 | Sixth Grade | 11-12 |
| 8 | Seventh Grade | 12-13 |
| 9 | Eighth Grade | 13-14 |
| 10 | Ninth Grade | 14-15 |
| 11 | Tenth Grade | 15-16 |
| 12 | Eleventh Grade | 16-17 |
| 13 | Twelth Grade | 17-18 |
| 14+ | College | 18-22 |
"""
def ari_score(doc = %Document{}) do
mu_s = (doc |> Document.sentences |> Enum.map(&Enum.count/1) |> Enum.reduce(&+/2)) / (doc |> Document.sentences |> Enum.count)
mu_w = Token.avg_word_length(doc |> Document.enumerate_tokens)
ari = 4.71 * mu_w + 0.5 * mu_s - 21.43
ari |> Float.ceil |> Kernel.trunc
end
@doc """
The `smog_grade` method calculates the SMOG grade measure of readability that
estimates the years of education needed to understand a piece of writing.
The SMOG grade is commonly used in rating health messages.
Please note that results for documents with less than 30 sentences are statistically invalid[1].
[1] https://en.wikipedia.org/wiki/SMOG
"""
def smog_grade(doc = %Document{}) do
n_sentences = doc |> Document.sentences |> Enum.count
n_polys = doc |> Document.enumerate_tokens |> Enum.filter(&Token.is_word?/1) |> Enum.filter(&Token.is_polysyllabic?(&1, 3)) |> Enum.count
grade = 1.0430 * :math.sqrt(n_polys * 30 / n_sentences) + 3.1291
grade
end
@doc """
Gunning fog index measures the readability of English writing. The index
estimates the years of formal education needed to understand the text on a
first reading. A fog index of 12 requires the reading level of a U.S. high
school senior (around 18 years old). The test was developed by <NAME>, an American businessman, in 1952.[1]
The fog index is commonly used to confirm that text can be read easily by the
intended audience. Texts for a wide audience generally need a fog index less
than 12. Texts requiring near-universal understanding generally need an index
less than 8.
[1] DuBay, <NAME>. (23 March 2004). "Judges Scold Lawyers for Bad
Writing". Plain Language At Work Newsletter. Impact Information (8).
| Fog Index | Reading level by grade |
| --------- | ---------------------- |
| 17 | College graduate |
| 16 | College senior |
| 15 | College junior |
| 14 | College sophomore |
| 13 | College freshman |
| 12 | High school senior |
| 11 | High school junior |
| 10 | High school sophomore |
| 9 | High school freshman |
| 8 | Eighth grade |
| 7 | Seventh grade |
| 6 | Sixth grade |
"""
def gunning_fog(doc = %Document{}) do
n_words = doc |> Document.enumerate_tokens |> Enum.filter(&Token.is_word?/1) |> Enum.count
n_sentences = doc |> Document.sentences |> Enum.count
n_complex_words = doc |> Document.enumerate_tokens |> Enum.filter(&Token.is_word?/1) |> Enum.filter(&Token.is_polysyllabic?(&1, 3)) |> Enum.count
gf_index = 0.4 * ( (n_words / n_sentences) + 100 * (n_complex_words / n_words) )
gf_index
end
@doc """
Calculates the Dale-Chall readability score. that provides a numeric gauge of
the comprehension difficulty that readers come upon when reading a text. It
uses a list of 3000 words that groups of fourth-grade American students could
reliably understand, considering any word not on that list to be difficult.
| Score | Notes |
| ------------ | -------------------------------------------------------------------- |
| 4.9 or lower | easily understood by an average 4th-grade student or lower |
| 5.0–5.9 | easily understood by an average 5th or 6th-grade student |
| 6.0–6.9 | easily understood by an average 7th or 8th-grade student |
| 7.0–7.9 | easily understood by an average 9th or 10th-grade student |
| 8.0–8.9 | easily understood by an average 11th or 12th-grade student |
| 9.0–9.9 | easily understood by an average 13th to 15th-grade (college) student |
"""
def dale_chall(doc = %Document{}) do
n_words = doc |> Document.enumerate_tokens |> Enum.filter(&Token.is_word?/1) |> Enum.count
n_sentences = doc |> Document.sentences |> Enum.count
n_difficult_words = doc |> Document.enumerate_tokens |> Enum.filter(&Token.is_word?/1) |> Enum.filter(&Essence.DaleChall.is_hard_word?/1) |> Enum.count
score = 0.1579 * (n_difficult_words / n_words * 100) + 0.0496 * (n_words / n_sentences)
score
end
@doc """
The Coleman-Liau readability test. Like the ARI but unlike most of the other indices, Coleman–Liau
relies on characters instead of syllables per word. Although opinion varies on its accuracy as compared
to the syllable/word and complex word indices, characters are more readily and accurately counted by
computer programs than are syllables. The Coleman–Liau index was designed to be easily calculated
mechanically from samples of hard-copy text. Unlike syllable-based readability indices, it does not
require that the character content of words be analyzed, only their length in characters. Therefore,
it could be used in conjunction with theoretically simple mechanical scanners that would only need
to recognize character, word, and sentence boundaries, removing the need for full optical character
recognition or manual keypunching.
The score output approximates the U.S. grade level thought necessary to comprehend the text.
"""
@spec coleman_liau(%Essence.Document{}) :: float
def coleman_liau(doc = %Document{}) do
# Average number of letters per 100 words
n_letters = doc |> Document.enumerate_tokens() |> Enum.map(fn(token) -> Token.token_length(token) end ) |> Enum.sum
n_words = doc |> Document.enumerate_tokens |> Enum.filter(&Token.is_word?/1) |> Enum.count
l_metric = n_letters / n_words * 100
# Average number of sentences per 100 words
n_sentences = doc |> Document.sentences |> Enum.count
s_metric = n_sentences / n_words * 100
score = (0.0588 * l_metric) - (0.296 * s_metric) - 15.8
score
end
@doc """
Calculates an estimate of the time it would take an average reader to read
the given `Essence.Document`, assuming a reading `speed` of 200 words per
minute.
"""
def reading_time(doc = %Document{}, speed \\ 200) do
n_words = doc |> Document.words |> Enum.count
mins = Float.round(n_words / speed)
mins
end
@doc """
Calculates an estimate of the time it would take to read the given
`Essence.Document` as a speech, with a speaking `speed` of 120 words per
minute.
"""
def speaking_time(doc = %Document{}, speed \\ 120) do
n_words = doc |> Document.words |> Enum.count
n_sentences = doc |> Document.sentences |> Enum.count
mins = Float.round(n_words / speed) + n_sentences * 0.03
mins
end
@doc """
Calculates the speaking speed in words per minute, given a speech described
by the given `Essence.Document` and the recorded `speaking_time` in minutes.
"""
def speaking_speed(doc = %Document{}, speaking_time) do
n_words = doc |> Document.words |> Enum.count
n_sentences = doc |> Document.sentences |> Enum.count
speed = Float.round(n_words / ( speaking_time - (n_sentences * 0.03) ))
speed
end
end
|
lib/essence/readability.ex
| 0.861553 | 0.761006 |
readability.ex
|
starcoder
|
defmodule Saxy do
@moduledoc ~S"""
Saxy is an XML SAX parser and encoder.
Saxy provides functions to parse XML file in both binary and streaming way in compliant
with [Extensible Markup Language (XML) 1.0 (Fifth Edition)](https://www.w3.org/TR/xml/).
Saxy also offers DSL and API to build, compose and encode XML document.
See "Encoder" section below for more information.
## Parser
Saxy parser supports two modes of parsing: SAX and simple form.
### SAX mode (Simple API for XML)
SAX is an event driven algorithm for parsing XML documents. A SAX parser takes XML document as the input
and emits events out to a pre-configured event handler during parsing.
There are 5 types of SAX events supported by Saxy:
* `:start_document` - after prolog is parsed.
* `:start_element` - when open tag is parsed.
* `:characters` - when a chunk of `CharData` is parsed.
* `:end_element` - when end tag is parsed.
* `:end_document` - when the root element is closed.
See `Saxy.Handler` for more information.
### Simple form mode
Saxy supports parsing XML documents into a simple format. See `Saxy.SimpleForm` for more details.
### Encoding
Saxy **only** supports UTF-8 encoding. It also respects the encoding set in XML document prolog, which means
that if the declared encoding is not UTF-8, the parser stops. Anyway, when there is no encoding declared,
Saxy defaults the encoding to UTF-8.
### Reference expansion
Saxy supports expanding character references and XML 1.0 predefined entity references, for example `A`
is expanded to `"A"`, `&` to `"&"`, and `&` to `"&"`.
Saxy does not expand external entity references, but provides an option to specify how they should be handled.
See more in "Shared options" section.
### Creation of atoms
Saxy does not create atoms during the parsing process.
### DTD and XSD
Saxy does not support parsing DTD (Doctype Definition) and XSD schemas. When encountering DTD, the parser simply
skips that.
### Configuration
Saxy allows streaming feature to be configured off in compile time. This could give some performance gain when
you use Saxy to parse documents those are fully loaded in the memory.
Note that this will make streaming feature not working. It's turned on by default.
```
# config/config.exs
config :saxy, :parser, streaming: false
```
### Shared options
* `:expand_entity` - specifies how external entity references should be handled. Three supported strategies respectively are:
* `:keep` - keep the original binary, for example `Orange ®` will be expanded to `"Orange ®"`, this is the default strategy.
* `:skip` - skip the original binary, for example `Orange ®` will be expanded to `"Orange "`.
* `{mod, fun, args}` - take the applied result of the specified MFA.
## Encoder
Saxy offers two APIs to build simple form and encode XML document.
Use `Saxy.XML` to build and compose XML simple form, then `Saxy.encode!/2`
to encode the built element into XML binary.
iex> import Saxy.XML
iex> element = element("person", [gender: "female"], "Alice")
{"person", [{"gender", "female"}], [{:characters, "Alice"}]}
iex> Saxy.encode!(element, [version: "1.0"])
"<?xml version=\"1.0\"?><person gender=\"female\">Alice</person>"
See `Saxy.XML` for more XML building APIs.
Saxy also provides `Saxy.Builder` protocol to help composing structs into simple form.
defmodule Person do
@derive {Saxy.Builder, name: "person", attributes: [:gender], children: [:name]}
defstruct [:gender, :name]
end
iex> jack = %Person{gender: :male, name: "Jack"}
iex> john = %Person{gender: :male, name: "John"}
iex> import Saxy.XML
iex> root = element("people", [], [jack, john])
iex> Saxy.encode!(root, [version: "1.0"])
"<?xml version=\"1.0\"?><people><person gender=\"male\">Jack</person><person gender=\"male\">John</person></people>"
"""
alias Saxy.{
Encoder,
Parser,
State
}
@doc ~S"""
Parses XML binary data.
This function takes XML binary, SAX event handler (see more at `Saxy.Handler`) and an initial state as the input, it returns
`{:ok, state}` if parsing is successful, otherwise `{:error, exception}`, where `exception` is a
`Saxy.ParseError` struct which can be converted into readable message with `Exception.message/1`.
The third argument `state` can be used to keep track of data and parsing progress when parsing is happening, which will be
returned when parsing finishes.
### Options
See the “Shared options” section at the module documentation.
## Examples
defmodule MyTestHandler do
@behaviour Saxy.Handler
def handle_event(:start_document, prolog, state) do
{:ok, [{:start_document, prolog} | state]}
end
def handle_event(:end_document, _data, state) do
{:ok, [{:end_document} | state]}
end
def handle_event(:start_element, {name, attributes}, state) do
{:ok, [{:start_element, name, attributes} | state]}
end
def handle_event(:end_element, name, state) do
{:ok, [{:end_element, name} | state]}
end
def handle_event(:characters, chars, state) do
{:ok, [{:chacters, chars} | state]}
end
end
iex> xml = "<?xml version='1.0' ?><foo bar='value'></foo>"
iex> Saxy.parse_string(xml, MyTestHandler, [])
{:ok,
[{:end_document},
{:end_element, "foo"},
{:start_element, "foo", [{"bar", "value"}]},
{:start_document, [version: "1.0"]}]}
"""
@spec parse_string(
data :: binary,
handler :: module(),
initial_state :: term(),
options :: Keyword.t()
) :: {:ok, state :: term()} | {:error, exception :: Saxy.ParseError.t()}
def parse_string(data, handler, initial_state, options \\ [])
when is_binary(data) and is_atom(handler) do
expand_entity = Keyword.get(options, :expand_entity, :keep)
state = %State{
prolog: nil,
handler: handler,
user_state: initial_state,
expand_entity: expand_entity,
character_data_max_length: :infinity
}
case Parser.Prolog.parse(data, false, data, 0, state) do
{:ok, state} ->
{:ok, state.user_state}
{:error, _reason} = error ->
error
end
end
@doc ~S"""
Parses XML stream data.
This function takes a stream, SAX event handler (see more at `Saxy.Handler`) and an initial state as the input, it returns
`{:ok, state}` if parsing is successful, otherwise `{:error, exception}`, where `exception` is a
`Saxy.ParseError` struct which can be converted into readable message with `Exception.message/1`.
## Examples
defmodule MyTestHandler do
@behaviour Saxy.Handler
def handle_event(:start_document, prolog, state) do
{:ok, [{:start_document, prolog} | state]}
end
def handle_event(:end_document, _data, state) do
{:ok, [{:end_document} | state]}
end
def handle_event(:start_element, {name, attributes}, state) do
{:ok, [{:start_element, name, attributes} | state]}
end
def handle_event(:end_element, name, state) do
{:ok, [{:end_element, name} | state]}
end
def handle_event(:characters, chars, state) do
{:ok, [{:chacters, chars} | state]}
end
end
iex> stream = File.stream!("./test/support/fixture/foo.xml")
iex> Saxy.parse_stream(stream, MyTestHandler, [])
{:ok,
[{:end_document},
{:end_element, "foo"},
{:start_element, "foo", [{"bar", "value"}]},
{:start_document, [version: "1.0"]}]}
## Memory usage
`Saxy.parse_stream/3` takes a `File.Stream` or `Stream` as the input, so the amount of bytes to buffer in each
chunk can be controlled by `File.stream!/3` API.
During parsing, the actual memory used by Saxy might be higher than the number configured for each chunk, since
Saxy holds in memory some parsed parts of the original binary to leverage Erlang sub-binary extracting. Anyway,
Saxy tries to free those up when it makes sense.
### Options
See the “Shared options” section at the module documentation.
* `:character_data_max_length` - tells the parser to emit the `:characters` event when its length exceeds the specified
number. The option is useful when the tag being parsed containing a very large chunk of data. Defaults to `:infinity`.
"""
@spec parse_stream(
stream :: Enumerable.t(),
handler :: module(),
initial_state :: term(),
options :: Keyword.t()
) :: {:ok, state :: term()} | {:error, exception :: Saxy.ParseError.t()}
def parse_stream(stream, handler, initial_state, options \\ []) do
expand_entity = Keyword.get(options, :expand_entity, :keep)
character_data_max_length = Keyword.get(options, :character_data_max_length, :infinity)
state = %State{
prolog: nil,
handler: handler,
user_state: initial_state,
expand_entity: expand_entity,
character_data_max_length: character_data_max_length
}
init = Parser.Prolog.parse(<<>>, true, <<>>, 0, state)
stream
|> Enum.reduce_while(init, &stream_reducer/2)
|> case do
{:halted, context_fun} ->
case context_fun.(<<>>, false) do
{:ok, state} -> {:ok, state.user_state}
{:error, reason} -> {:error, reason}
end
{:ok, state} ->
{:ok, state.user_state}
{:error, reason} ->
{:error, reason}
end
end
defp stream_reducer(next_bytes, {:halted, context_fun}) do
{:cont, context_fun.(next_bytes, true)}
end
defp stream_reducer(_next_bytes, {:error, _reason} = error) do
{:halt, error}
end
defp stream_reducer(_next_bytes, {:ok, state}) do
{:halt, {:ok, state}}
end
@doc """
Encodes a simple form XML element into string.
This function encodes an element in simple form format and a prolog to an XML document.
## Examples
iex> import Saxy.XML
iex> root = element(:foo, [{"foo", "bar"}], "bar")
iex> prolog = [version: "1.0"]
iex> Saxy.encode!(root, prolog)
"<?xml version=\\"1.0\\"?><foo foo=\\"bar\\">bar</foo>"
"""
@spec encode!(root :: Saxy.XML.element(), prolog :: Saxy.Prolog.t() | Keyword.t()) :: String.t()
def encode!(root, prolog \\ nil) do
root
|> Encoder.encode_to_iodata(prolog)
|> IO.iodata_to_binary()
end
@doc """
Encodes a simple form element into IO data.
Same as `encode!/2` but this encodes the document into IO data.
## Examples
iex> import Saxy.XML
iex> root = element(:foo, [{"foo", "bar"}], "bar")
iex> prolog = [version: "1.0"]
iex> Saxy.encode_to_iodata!(root, prolog)
[
['<?xml', [32, 'version', 61, 34, "1.0", 34], [], [], '?>'],
[60, "foo", 32, "foo", 61, 34, "bar", 34],
62,
["bar"],
[60, 47, "foo", 62]
]
"""
@spec encode_to_iodata!(root :: Saxy.XML.element(), prolog :: Saxy.Prolog.t() | Keyword.t()) :: iodata()
def encode_to_iodata!(root, prolog \\ nil) do
Encoder.encode_to_iodata(root, prolog)
end
end
|
lib/saxy.ex
| 0.933317 | 0.853913 |
saxy.ex
|
starcoder
|
defmodule ExCrypto do
@moduledoc """
The ExCrypto module exposes a subset of functionality from the Erlang `crypto`
module with the goal of making it easier to include strong cryptography in your
Elixir applications.
This module provides functions for symmetric-key cryptographic operations using
AES in GCM and CBC mode. The ExCrypto module attempts to reduce complexity by providing
some sane default values for common operations.
"""
@epoch :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})
@aes_block_size 16
@iv_bit_length 128
@bitlength_error "IV must be exactly 128 bits and key must be exactly 128, 192 or 256 bits"
defmacro __using__(_) do
quote do
import ExCrypto
end
end
defp normalize_error(kind, error, key_and_iv \\ nil) do
key_error = test_key_and_iv_bitlength(key_and_iv)
cond do
key_error ->
key_error
%{message: message} = Exception.normalize(kind, error) ->
{:error, message}
x = Exception.normalize(kind, error) ->
{kind, x, System.stacktrace()}
end
end
defp test_key_and_iv_bitlength(nil), do: nil
defp test_key_and_iv_bitlength({_key, iv}) when bit_size(iv) != @iv_bit_length,
do: {:error, @bitlength_error}
defp test_key_and_iv_bitlength({key, _iv}) when rem(bit_size(key), 128) == 0, do: nil
defp test_key_and_iv_bitlength({key, _iv}) when rem(bit_size(key), 192) == 0, do: nil
defp test_key_and_iv_bitlength({key, _iv}) when rem(bit_size(key), 256) == 0, do: nil
defp test_key_and_iv_bitlength({_key, _iv}), do: {:error, @bitlength_error}
@doc """
Returns random characters. Each character represents 6 bits of entropy.
Accepts an `integer` to determine the number of random characters to return.
## Examples
iex> rand_string = ExCrypto.rand_chars(24)
iex> assert(String.length(rand_string) == 24)
true
iex> rand_string = ExCrypto.rand_chars(32)
iex> assert(String.length(rand_string) == 32)
true
iex> rand_string = ExCrypto.rand_chars(44)
iex> assert(String.length(rand_string) == 44)
true
"""
@spec rand_chars(integer) :: String.t()
def rand_chars(num_chars) do
block_bytes = 3
block_chars = 4
block_count = div(num_chars, block_chars)
block_partial = rem(num_chars, block_chars)
block_count =
case block_partial > 0 do
true -> block_count + 1
false -> block_count
end
rand_string = Base.url_encode64(:crypto.strong_rand_bytes(block_count * block_bytes))
String.slice(rand_string, 0, num_chars)
end
@doc """
Returns a random integer between `low` and `high`.
Accepts two `integer` arguments for the `low` and `high` boundaries. The `low` argument
must be less than the `high` argument.
## Examples
iex> rand_int = ExCrypto.rand_int(2, 20)
iex> assert(rand_int > 1)
true
iex> assert(rand_int < 21)
true
iex> rand_int = ExCrypto.rand_int(23, 99)
iex> assert(rand_int > 22)
true
iex> assert(rand_int < 99)
true
iex> rand_int = ExCrypto.rand_int(212, 736)
iex> assert(rand_int > 211)
true
iex> assert(rand_int < 737)
true
"""
@spec rand_int(integer, integer) :: integer
def rand_int(low, high) do
low + :rand.uniform(high - low + 1) - 1
end
@doc """
Returns a string of random where the length is equal to `integer`.
## Examples
iex> {:ok, rand_bytes} = ExCrypto.rand_bytes(16)
iex> assert(byte_size(rand_bytes) == 16)
true
iex> assert(bit_size(rand_bytes) == 128)
true
iex> {:ok, rand_bytes} = ExCrypto.rand_bytes(24)
iex> assert(byte_size(rand_bytes) == 24)
true
iex> assert(bit_size(rand_bytes) == 192)
true
iex> {:ok, rand_bytes} = ExCrypto.rand_bytes(32)
iex> assert(byte_size(rand_bytes) == 32)
true
iex> assert(bit_size(rand_bytes) == 256)
true
"""
@spec rand_bytes(integer) :: {:ok, binary} | {:error, binary}
def rand_bytes(length) do
{:ok, :crypto.strong_rand_bytes(length)}
catch
kind, error -> ExPublicKey.normalize_error(kind, error)
end
@spec rand_bytes!(integer) :: binary
def rand_bytes!(length) do
case rand_bytes(length) do
{:ok, data} -> data
{:error, reason} -> raise reason
end
end
@doc """
Returns an AES key.
Accepts a `key_type` (`:aes_128`|`:aes_192`|`:aes_256`) and `key_format`
(`:base64`|`:bytes`) to determine type of key to produce.
## Examples
iex> {:ok, key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> assert bit_size(key) == 256
true
iex> {:ok, key} = ExCrypto.generate_aes_key(:aes_256, :base64)
iex> assert String.length(key) == 44
true
iex> {:ok, key} = ExCrypto.generate_aes_key(:aes_192, :bytes)
iex> assert bit_size(key) == 192
true
iex> {:ok, key} = ExCrypto.generate_aes_key(:aes_192, :base64)
iex> assert String.length(key) == 32
true
iex> {:ok, key} = ExCrypto.generate_aes_key(:aes_128, :bytes)
iex> assert bit_size(key) == 128
true
iex> {:ok, key} = ExCrypto.generate_aes_key(:aes_128, :base64)
iex> assert String.length(key) == 24
true
"""
@spec generate_aes_key(atom, atom) :: {:ok, binary} | {:error, binary}
def generate_aes_key(key_type, key_format) do
case {key_type, key_format} do
{:aes_128, :base64} -> rand_bytes!(16) |> url_encode64
{:aes_128, :bytes} -> rand_bytes(16)
{:aes_192, :base64} -> rand_bytes!(24) |> url_encode64
{:aes_192, :bytes} -> rand_bytes(24)
{:aes_256, :base64} -> rand_bytes!(32) |> url_encode64
{:aes_256, :bytes} -> rand_bytes(32)
_ -> {:error, "invalid key_type/key_format"}
end
end
defp url_encode64(bytes_to_encode) do
{:ok, Base.url_encode64(bytes_to_encode)}
end
@doc """
Encrypt a `binary` with AES in GCM mode.
Returns a tuple containing the `initialization_vector`, the `cipher_text` and the `cipher_tag`.
At a high level encryption using AES in GCM mode looks like this:
key + init_vec + auth_data + clear_text -> cipher_text + cipher_tag
## Examples
iex> clear_text = "my-clear-text"
iex> auth_data = "my-auth-data"
iex> {:ok, aes_256_key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> {:ok, iv} = ExCrypto.rand_bytes(16)
iex> {:ok, {_ad, payload}} = ExCrypto.encrypt(aes_256_key, auth_data, iv, clear_text)
iex> {_iv, cipher_text, cipher_tag} = payload
iex> assert(is_bitstring(cipher_text))
true
iex> assert(bit_size(cipher_tag) == 128)
true
"""
@spec encrypt(binary, binary, binary, binary) ::
{:ok, {binary, {binary, binary, binary}}} | {:error, binary}
def encrypt(key, authentication_data, initialization_vector, clear_text) do
_encrypt(key, initialization_vector, {authentication_data, clear_text}, :aes_gcm)
catch
kind, error -> normalize_error(kind, error)
end
@doc """
Encrypt a `binary` with AES in CBC mode.
Returns a tuple containing the `initialization_vector`, and `cipher_text`.
At a high level encryption using AES in CBC mode looks like this:
key + clear_text -> init_vec + cipher_text
## Examples
iex> clear_text = "my-clear-text"
iex> {:ok, aes_256_key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> {:ok, {_iv, cipher_text}} = ExCrypto.encrypt(aes_256_key, clear_text)
iex> assert(is_bitstring(cipher_text))
true
"""
@spec encrypt(binary, binary) :: {:ok, {binary, binary}} | {:error, binary}
def encrypt(key, clear_text) do
# new 128 bit random initialization_vector
{:ok, initialization_vector} = rand_bytes(16)
_encrypt(key, initialization_vector, pad(clear_text, @aes_block_size), :aes_cbc256)
catch
kind, error ->
{:ok, initialization_vector} = rand_bytes(16)
normalize_error(kind, error, {key, initialization_vector})
end
@doc """
Encrypt a `binary` with AES in CBC mode providing explicit IV via map.
Returns a tuple containing the `initialization_vector`, and `cipher_text`.
At a high level encryption using AES in CBC mode looks like this:
key + clear_text + map -> init_vec + cipher_text
## Examples
iex> clear_text = "my-clear-text"
iex> {:ok, aes_256_key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> {:ok, init_vec} = ExCrypto.rand_bytes(16)
iex> {:ok, {_iv, cipher_text}} = ExCrypto.encrypt(aes_256_key, clear_text, %{initialization_vector: init_vec})
iex> assert(is_bitstring(cipher_text))
true
"""
@spec encrypt(binary, binary, %{initialization_vector: binary}) ::
{:ok, {binary, {binary, binary, binary}}}
| {:ok, {binary, binary}}
| {:error, any}
def encrypt(key, clear_text, %{initialization_vector: initialization_vector}) do
_encrypt(key, initialization_vector, pad(clear_text, @aes_block_size), :aes_cbc256)
catch
kind, error -> normalize_error(kind, error, {key, initialization_vector})
end
@doc """
Same as `encrypt/4` except the `initialization_vector` is automatically generated.
A 128 bit `initialization_vector` is generated automatically by `encrypt/3`. It returns a tuple
containing the `initialization_vector`, the `cipher_text` and the `cipher_tag`.
## Examples
iex> clear_text = "my-clear-text"
iex> auth_data = "my-auth-data"
iex> {:ok, aes_256_key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> {:ok, {_ad, payload}} = ExCrypto.encrypt(aes_256_key, auth_data, clear_text)
iex> {_init_vec, cipher_text, cipher_tag} = payload
iex> assert(is_bitstring(cipher_text))
true
iex> assert(bit_size(cipher_tag) == 128)
true
"""
@spec encrypt(binary, binary, binary) ::
{:ok, {binary, {binary, binary, binary}}} | {:error, binary}
def encrypt(key, authentication_data, clear_text) do
# new 128 bit random initialization_vector
{:ok, initialization_vector} = rand_bytes(16)
_encrypt(key, initialization_vector, {authentication_data, clear_text}, :aes_gcm)
end
defp _encrypt(key, initialization_vector, encryption_payload, algorithm) do
case :crypto.block_encrypt(algorithm, key, initialization_vector, encryption_payload) do
{cipher_text, cipher_tag} ->
{authentication_data, _clear_text} = encryption_payload
{:ok, {authentication_data, {initialization_vector, cipher_text, cipher_tag}}}
<<cipher_text::binary>> ->
{:ok, {initialization_vector, cipher_text}}
x ->
{:error, x}
end
end
def pad(data, block_size) do
to_add = block_size - rem(byte_size(data), block_size)
data <> to_string(:string.chars(to_add, to_add))
end
def unpad(data) do
to_remove = :binary.last(data)
:binary.part(data, 0, byte_size(data) - to_remove)
end
@doc """
Returns a clear-text string decrypted with AES in GCM mode.
At a high level decryption using AES in GCM mode looks like this:
key + init_vec + auth_data + cipher_text + cipher_tag -> clear_text
## Examples
iex> clear_text = "my-clear-text"
iex> auth_data = "my-auth-data"
iex> {:ok, aes_256_key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> {:ok, {_ad, payload}} = ExCrypto.encrypt(aes_256_key, auth_data, clear_text)
iex> {init_vec, cipher_text, cipher_tag} = payload
iex> {:ok, val} = ExCrypto.decrypt(aes_256_key, auth_data, init_vec, cipher_text, cipher_tag)
iex> assert(val == clear_text)
true
"""
@spec decrypt(binary, binary, binary, binary, binary) :: {:ok, binary} | {:error, :decrypt_failed} | {:error, binary}
def decrypt(key, authentication_data, initialization_vector, cipher_text, cipher_tag) do
_decrypt(key, initialization_vector, {authentication_data, cipher_text, cipher_tag}, :aes_gcm)
end
@doc """
Returns a clear-text string decrypted with AES256 in CBC mode.
At a high level decryption using AES in CBC mode looks like this:
key + init_vec + cipher_text -> clear_text
## Examples
iex> clear_text = "my-clear-text"
iex> {:ok, aes_256_key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> {:ok, {init_vec, cipher_text}} = ExCrypto.encrypt(aes_256_key, clear_text)
iex> {:ok, val} = ExCrypto.decrypt(aes_256_key, init_vec, cipher_text)
iex> assert(val == clear_text)
true
"""
@spec decrypt(binary, binary, binary) :: {:ok, binary} | {:error, :decrypt_failed} | {:error, binary}
def decrypt(key, initialization_vector, cipher_text) do
{:ok, padded_cleartext} = _decrypt(key, initialization_vector, cipher_text, :aes_cbc256)
{:ok, unpad(padded_cleartext)}
catch
kind, error -> normalize_error(kind, error, {key, initialization_vector})
end
defp _decrypt(key, initialization_vector, cipher_data, algorithm) do
case :crypto.block_decrypt(algorithm, key, initialization_vector, cipher_data) do
:error -> {:error, :decrypt_failed}
plain_text -> {:ok, plain_text}
end
catch
kind, error -> normalize_error(kind, error)
end
@doc """
Join the three parts of an encrypted payload and encode using `Base.url_encode64`.
This produces a Unicode `payload` string like this:
init_vec <> cipher_text <> cipher_tag
[128 bits] <> [?? bits] <> [128 bits]
This format is convenient to include in HTTP request bodies. It can also be used with JSON transport formats.
## Examples
iex> clear_text = "my-clear-text"
iex> auth_data = "my-auth-data"
iex> {:ok, aes_256_key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> {:ok, {_ad, {init_vec, cipher_text, cipher_tag}}} = ExCrypto.encrypt(aes_256_key, auth_data, clear_text)
iex> {:ok, encoded_payload} = ExCrypto.encode_payload(init_vec, cipher_text, cipher_tag)
iex> assert(String.valid?(encoded_payload))
true
"""
@spec encode_payload(binary, binary, binary) :: {:ok, binary} | {:error, binary}
def encode_payload(initialization_vector, cipher_text, cipher_tag) do
url_encode64(initialization_vector <> cipher_text <> cipher_tag)
end
@doc """
Split and decode the three parts of an encrypted payload and encode using `Base.url_decode64`.
## Examples
iex> clear_text = "my-clear-text"
iex> auth_data = "<PASSWORD>"
iex> {:ok, aes_256_key} = ExCrypto.generate_aes_key(:aes_256, :bytes)
iex> {:ok, {_ad, {init_vec, cipher_text, cipher_tag}}} = ExCrypto.encrypt(aes_256_key, auth_data, clear_text)
iex> {:ok, encoded_payload} = ExCrypto.encode_payload(init_vec, cipher_text, cipher_tag)
iex> assert(String.valid?(encoded_payload))
true
iex> {:ok, {d_init_vec, d_cipher_text, d_cipher_tag}} = ExCrypto.decode_payload(encoded_payload)
iex> assert(d_init_vec == init_vec)
true
iex> assert(d_cipher_text == cipher_text)
true
iex> assert(d_cipher_tag == cipher_tag)
true
"""
@spec decode_payload(binary) :: {:ok, {binary, binary, binary}} | {:error, binary}
def decode_payload(encoded_parts) do
{:ok, decoded_parts} = Base.url_decode64(encoded_parts)
decoded_length = byte_size(decoded_parts)
iv = Kernel.binary_part(decoded_parts, 0, 16)
cipher_text = Kernel.binary_part(decoded_parts, 16, decoded_length - 32)
cipher_tag = Kernel.binary_part(decoded_parts, decoded_length, -16)
{:ok, {iv, cipher_text, cipher_tag}}
end
@doc false
def universal_time(:unix) do
:calendar.datetime_to_gregorian_seconds(:calendar.universal_time()) - @epoch
end
end
|
lib/ex_crypto.ex
| 0.909068 | 0.630628 |
ex_crypto.ex
|
starcoder
|
defmodule SudokuBoard do
@moduledoc """
Implements a Sudoku board
"""
defstruct size: 9, grid: List.duplicate(0, 81)
@type t :: %SudokuBoard{size: non_neg_integer(), grid: list(non_neg_integer())}
@spec equals?(SudokuBoard.t(), SudokuBoard.t()) :: boolean
def equals?(board1, board2) do
board1.size == board2.size and board1.grid == board2.grid
end
@doc """
Creates a sudokuboard from a list. No validation checking is done.
## Parameters
- grid: A integer list representing a board. Element 0 is at top left, n is at bottom right.
"""
@spec new(list(non_neg_integer)) :: SudokuBoard.t()
def new(grid) do
size =
grid
|> Enum.count()
|> integer_sqrt
%SudokuBoard{grid: grid, size: size}
end
@doc """
Parses a string representation of a sudoku board.
Each board is a CSV containing digits 0-n where `n` x `n` is the size of the board.
Zeros represent empty spaces.
## Parameters
- board_string: A string representing a board
## Examples
iex> SudokuBoard.parse("0,0,1,2,0,0,0,0,1,2,3,4,0,0,0,0")
{:ok,
%SudokuBoard{grid: [0, 0, 1, 2, 0, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 0], size: 4}}
iex> SudokuBoard.parse("0,0,1")
{:error, "Invalid board"}
"""
@spec parse(String.t()) :: {:ok, SudokuBoard.t()} | {:error, String.t()}
def parse(str) do
try do
grid =
str
|> String.split(",")
|> Enum.map(fn elt -> elt |> String.trim() |> Integer.parse() |> elem(0) end)
size =
grid
|> Enum.count()
|> integer_sqrt
board = %SudokuBoard{size: size, grid: grid}
if valid?(board) do
{:ok, board}
else
{:error, "Invalid board"}
end
rescue
_ -> {:error, "Parsing error"}
end
end
# assumes a valid sudoku board
@doc """
Validates if a board is a partial solution
## Parameters
- board: A sudoku board
"""
@spec partial_solution?(SudokuBoard.t()) :: boolean
def partial_solution?(%SudokuBoard{} = board) do
rows = get_rows(board)
cols = get_columns(board)
boxes = get_boxes(board)
Enum.all?(rows, &unique_list?/1) and Enum.all?(cols, &unique_list?/1) and
Enum.all?(boxes, &unique_list?/1)
end
@doc """
Reads a sudoku board from a file
## Parameters
- file_path: string representing the file path of the file to be loaded
"""
@spec read_file(String.t()) :: {:ok, SudokuBoard.t()} | {:error, String.t()}
def read_file(path) do
case File.read(path) do
{:ok, data} -> parse(data)
{:error, reason} -> {:error, "File error: " <> Atom.to_string(reason)}
end
end
@doc """
Place a number into the sudoku board. Does not ensure that the square is empty.
## Parameters
- board: A sudoku board
- index: An index into the board
- number: The number to be placed into the board
"""
@spec place_number(SudokuBoard.t(), non_neg_integer(), non_neg_integer()) :: SudokuBoard.t()
def place_number(%SudokuBoard{size: size, grid: grid}, idx, number)
when 1 <= number and number <= size do
new_grid = List.replace_at(grid, idx, number)
%SudokuBoard{size: size, grid: new_grid}
end
@doc """
Tests if the board is solved
## Parameters
- board: A valid SudokuBoard.t representing a board
## Examples
iex> SudokuBoard.new([1,2,3,4,
...> 3,4,1,2,
...> 4,1,2,3,
...> 2,3,4,1]) |> SudokuBoard.solved?
true
"""
@spec solved?(SudokuBoard.t()) :: boolean
def solved?(%SudokuBoard{} = board) do
filled?(board) and partial_solution?(board)
end
@doc """
Checks if a sudoku board is well formed.
## Parameters
- board: A SudokuBoard.t representing a board
"""
@spec valid?(SudokuBoard.t()) :: boolean
def valid?(%SudokuBoard{size: size, grid: grid}) do
square?(size) and
Enum.count(grid) == size * size and
Enum.all?(grid, fn element -> 0 <= element and element <= size end)
end
## Private methods
# true if all squares in the board are populated
defp filled?(%SudokuBoard{grid: grid}) do
Enum.all?(grid, fn x -> x != 0 end)
end
@spec unique_list?(list(non_neg_integer())) :: boolean
defp unique_list?(l) do
filled_values = Enum.filter(l, fn x -> x > 0 end)
Enum.count(filled_values) == MapSet.new(filled_values) |> Enum.count()
end
@spec square?(non_neg_integer()) :: boolean
defp square?(i) do
j = integer_sqrt(i)
j * j == i
end
@spec integer_sqrt(non_neg_integer()) :: integer
defp integer_sqrt(i), do: trunc(:math.sqrt(i))
@spec get_rows(SudokuBoard.t()) :: list(list(non_neg_integer()))
defp get_rows(%SudokuBoard{size: size, grid: grid}) do
Enum.chunk_every(grid, size)
end
@spec get_columns(SudokuBoard.t()) :: list(list(non_neg_integer()))
defp get_columns(%SudokuBoard{size: size, grid: grid}) do
grid
|> Enum.with_index()
|> Enum.sort(fn {_, idx_1}, {_, idx_2} ->
get_col_index(idx_1, size) <= get_col_index(idx_2, size)
end)
|> Enum.map(&elem(&1, 0))
|> Enum.chunk_every(size)
end
@spec get_boxes(SudokuBoard.t()) :: list(list(non_neg_integer()))
defp get_boxes(%SudokuBoard{size: size, grid: grid}) do
grid
|> Enum.with_index()
|> Enum.sort(fn {_, idx_1}, {_, idx_2} ->
get_box_index(idx_1, size) <= get_box_index(idx_2, size)
end)
|> Enum.map(&elem(&1, 0))
|> Enum.chunk_every(size)
end
defp get_row_index(idx, sudoku_size) do
div(idx, sudoku_size)
end
defp get_col_index(idx, sudoku_size) do
rem(idx, sudoku_size)
end
defp get_box_index(idx, sudoku_size) do
box_size = integer_sqrt(sudoku_size)
row = get_row_index(idx, sudoku_size)
col = get_col_index(idx, sudoku_size)
div(row, box_size) * box_size + div(col, box_size)
end
end
defimpl String.Chars, for: SudokuBoard do
@spec to_string(SudokuBoard.t()) :: binary()
def to_string(%SudokuBoard{size: size, grid: grid}) do
chunk_size =
size
|> :math.sqrt()
|> trunc
board_string =
grid
|> Enum.map(fn elem -> "#{elem}," end)
|> Enum.chunk_every(size)
|> Enum.with_index()
|> Enum.reduce("", fn {row, idx}, acc ->
extra_rows =
if rem(idx, chunk_size) == 0 do
"\n"
else
""
end
"#{acc}#{extra_rows}\n\t #{format_row(row, chunk_size)}"
end)
|> String.trim()
~s/%SudokuBoard{
size: #{size},
grid: #{board_string}
}/
end
defp format_row(row, chunk_size) do
row
|> Enum.chunk_every(chunk_size)
|> Enum.reduce("", fn x, acc -> "#{acc} #{x}" end)
|> String.trim()
end
end
|
lib/sudoku_board.ex
| 0.848345 | 0.68177 |
sudoku_board.ex
|
starcoder
|
defmodule AWS.Kinesis.Firehose do
@moduledoc """
Amazon Kinesis Firehose API Reference
Amazon Kinesis Firehose is a fully-managed service that delivers real-time
streaming data to destinations such as Amazon Simple Storage Service
(Amazon S3), Amazon Elasticsearch Service (Amazon ES), and Amazon Redshift.
"""
@doc """
Creates a delivery stream.
By default, you can create up to 20 delivery streams per region.
This is an asynchronous operation that immediately returns. The initial
status of the delivery stream is `CREATING`. After the delivery stream is
created, its status is `ACTIVE` and it now accepts data. Attempts to send
data to a delivery stream that is not in the `ACTIVE` state cause an
exception. To check the state of a delivery stream, use
`DescribeDeliveryStream`.
A delivery stream is configured with a single destination: Amazon S3,
Amazon Elasticsearch Service, or Amazon Redshift. You must specify only one
of the following destination configuration parameters:
**ExtendedS3DestinationConfiguration**, **S3DestinationConfiguration**,
**ElasticsearchDestinationConfiguration**, or
**RedshiftDestinationConfiguration**.
When you specify **S3DestinationConfiguration**, you can also provide the
following optional values: **BufferingHints**, **EncryptionConfiguration**,
and **CompressionFormat**. By default, if no **BufferingHints** value is
provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever
condition is satisfied first. Note that **BufferingHints** is a hint, so
there are some cases where the service cannot adhere to these conditions
strictly; for example, record boundaries are such that the size is a little
over or under the configured buffering size. By default, no encryption is
performed. We strongly recommend that you enable encryption to ensure
secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
<ul> <li> An Amazon Redshift destination requires an S3 bucket as
intermediate location, as Firehose first delivers data to S3 and then uses
`COPY` syntax to load data into an Amazon Redshift table. This is specified
in the **RedshiftDestinationConfiguration.S3Configuration** parameter.
</li> <li> The compression formats `SNAPPY` or `ZIP` cannot be specified in
**RedshiftDestinationConfiguration.S3Configuration** because the Amazon
Redshift `COPY` operation that reads from the S3 bucket doesn't support
these compression formats.
</li> <li> We strongly recommend that you use the user name and password
you provide exclusively with Firehose, and that the permissions for the
account are restricted for Amazon Redshift `INSERT` permissions.
</li> </ul> Firehose assumes the IAM role that is configured as part of the
destination. The role should allow the Firehose principal to assume the
role, and the role should have permissions that allows the service to
deliver the data. For more information, see [Amazon S3 Bucket
Access](http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3)
in the *Amazon Kinesis Firehose Developer Guide*.
"""
def create_delivery_stream(client, input, options \\ []) do
request(client, "CreateDeliveryStream", input, options)
end
@doc """
Deletes a delivery stream and its data.
You can delete a delivery stream only if it is in `ACTIVE` or `DELETING`
state, and not in the `CREATING` state. While the deletion request is in
process, the delivery stream is in the `DELETING` state.
To check the state of a delivery stream, use `DescribeDeliveryStream`.
While the delivery stream is `DELETING` state, the service may continue to
accept the records, but the service doesn't make any guarantees with
respect to delivering the data. Therefore, as a best practice, you should
first stop any applications that are sending records before deleting a
delivery stream.
"""
def delete_delivery_stream(client, input, options \\ []) do
request(client, "DeleteDeliveryStream", input, options)
end
@doc """
Describes the specified delivery stream and gets the status. For example,
after your delivery stream is created, call `DescribeDeliveryStream` to see
if the delivery stream is `ACTIVE` and therefore ready for data to be sent
to it.
"""
def describe_delivery_stream(client, input, options \\ []) do
request(client, "DescribeDeliveryStream", input, options)
end
@doc """
Lists your delivery streams.
The number of delivery streams might be too large to return using a single
call to `ListDeliveryStreams`. You can limit the number of delivery streams
returned, using the **Limit** parameter. To determine whether there are
more delivery streams to list, check the value of
**HasMoreDeliveryStreams** in the output. If there are more delivery
streams to list, you can request them by specifying the name of the last
delivery stream returned in the call in the
**ExclusiveStartDeliveryStreamName** parameter of a subsequent call.
"""
def list_delivery_streams(client, input, options \\ []) do
request(client, "ListDeliveryStreams", input, options)
end
@doc """
Writes a single data record into an Amazon Kinesis Firehose delivery
stream. To write multiple data records into a delivery stream, use
`PutRecordBatch`. Applications using these operations are referred to as
producers.
By default, each delivery stream can take in up to 2,000 transactions per
second, 5,000 records per second, or 5 MB per second. Note that if you use
`PutRecord` and `PutRecordBatch`, the limits are an aggregate across these
two operations for each delivery stream. For more information about limits
and how to request an increase, see [Amazon Kinesis Firehose
Limits](http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
You must specify the name of the delivery stream and the data record when
using `PutRecord`. The data record consists of a data blob that can be up
to 1,000 KB in size, and any kind of data, for example, a segment from a
log file, geographic location data, web site clickstream data, etc.
Firehose buffers records before delivering them to the destination. To
disambiguate the data blobs at the destination, a common solution is to use
delimiters in the data, such as a newline (`\n`) or some other character
unique within the data. This allows the consumer application(s) to parse
individual data items when reading the data from the destination.
The `PutRecord` operation returns a **RecordId**, which is a unique string
assigned to each record. Producer applications can use this ID for purposes
such as auditability and investigation.
If the `PutRecord` operation throws a **ServiceUnavailableException**, back
off and retry. If the exception persists, it is possible that the
throughput limits have been exceeded for the delivery stream.
Data records sent to Firehose are stored for 24 hours from the time they
are added to a delivery stream as it attempts to send the records to the
destination. If the destination is unreachable for more than 24 hours, the
data is no longer available.
"""
def put_record(client, input, options \\ []) do
request(client, "PutRecord", input, options)
end
@doc """
Writes multiple data records into a delivery stream in a single call, which
can achieve higher throughput per producer than when writing single
records. To write single data records into a delivery stream, use
`PutRecord`. Applications using these operations are referred to as
producers.
By default, each delivery stream can take in up to 2,000 transactions per
second, 5,000 records per second, or 5 MB per second. Note that if you use
`PutRecord` and `PutRecordBatch`, the limits are an aggregate across these
two operations for each delivery stream. For more information about limits,
see [Amazon Kinesis Firehose
Limits](http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
Each `PutRecordBatch` request supports up to 500 records. Each record in
the request can be as large as 1,000 KB (before 64-bit encoding), up to a
limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when
using `PutRecord`. The data record consists of a data blob that can be up
to 1,000 KB in size, and any kind of data, for example, a segment from a
log file, geographic location data, web site clickstream data, and so on.
Firehose buffers records before delivering them to the destination. To
disambiguate the data blobs at the destination, a common solution is to use
delimiters in the data, such as a newline (`\n`) or some other character
unique within the data. This allows the consumer application(s) to parse
individual data items when reading the data from the destination.
The `PutRecordBatch` response includes a count of failed records,
**FailedPutCount**, and an array of responses, **RequestResponses**. Each
entry in the **RequestResponses** array provides additional information
about the processed record, and directly correlates with a record in the
request array using the same ordering, from the top to the bottom. The
response array always includes the same number of records as the request
array. **RequestResponses** includes both successfully and unsuccessfully
processed records. Firehose attempts to process all records in each
`PutRecordBatch` request. A single record failure does not stop the
processing of subsequent records.
A successfully processed record includes a **RecordId** value, which is
unique for the record. An unsuccessfully processed record includes
**ErrorCode** and **ErrorMessage** values. **ErrorCode** reflects the type
of error, and is one of the following values: `ServiceUnavailable` or
`InternalFailure`. **ErrorMessage** provides more detailed information
about the error.
If there is an internal server error or a timeout, the write might have
completed or it might have failed. If **FailedPutCount** is greater than 0,
retry the request, resending only those records that might have failed
processing. This minimizes the possible duplicate records and also reduces
the total bytes sent (and corresponding charges). We recommend that you
handle any duplicates at the destination.
If `PutRecordBatch` throws **ServiceUnavailableException**, back off and
retry. If the exception persists, it is possible that the throughput limits
have been exceeded for the delivery stream.
Data records sent to Firehose are stored for 24 hours from the time they
are added to a delivery stream as it attempts to send the records to the
destination. If the destination is unreachable for more than 24 hours, the
data is no longer available.
"""
def put_record_batch(client, input, options \\ []) do
request(client, "PutRecordBatch", input, options)
end
@doc """
Updates the specified destination of the specified delivery stream.
You can use this operation to change the destination type (for example, to
replace the Amazon S3 destination with Amazon Redshift) or change the
parameters associated with a destination (for example, to change the bucket
name of the Amazon S3 destination). The update might not occur immediately.
The target delivery stream remains active while the configurations are
updated, so data writes to the delivery stream can continue during this
process. The updated configurations are usually effective within a few
minutes.
Note that switching between Amazon ES and other services is not supported.
For an Amazon ES destination, you can only update to another Amazon ES
destination.
If the destination type is the same, Firehose merges the configuration
parameters specified with the destination configuration that already exists
on the delivery stream. If any of the parameters are not specified in the
call, the existing values are retained. For example, in the Amazon S3
destination, if `EncryptionConfiguration` is not specified then the
existing `EncryptionConfiguration` is maintained on the destination.
If the destination type is not the same, for example, changing the
destination from Amazon S3 to Amazon Redshift, Firehose does not merge any
parameters. In this case, all parameters must be specified.
Firehose uses **CurrentDeliveryStreamVersionId** to avoid race conditions
and conflicting merges. This is a required field, and the service updates
the configuration only if the existing configuration has a version ID that
matches. After the update is applied successfully, the version ID is
updated, and can be retrieved using `DescribeDeliveryStream`. You should
use the new version ID to set **CurrentDeliveryStreamVersionId** in the
next call.
"""
def update_destination(client, input, options \\ []) do
request(client, "UpdateDestination", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "firehose"}
host = get_host("firehose", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "Firehose_20150804.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/kinesis_firehose.ex
| 0.925158 | 0.678666 |
kinesis_firehose.ex
|
starcoder
|
defmodule RDF.Dataset do
@moduledoc """
A set of `RDF.Graph`s.
It may have multiple named graphs and at most one unnamed ("default") graph.
`RDF.Dataset` implements:
- Elixir's `Access` behaviour
- Elixir's `Enumerable` protocol
- Elixir's `Inspect` protocol
- the `RDF.Data` protocol
"""
defstruct name: nil, graphs: %{}
@behaviour Access
alias RDF.{Graph, Description, IRI, Statement, PrefixMap, PropertyMap}
import RDF.Statement, only: [coerce_subject: 1, coerce_graph_name: 1]
import RDF.Utils
@type graph_name :: IRI.t() | nil
@type t :: %__MODULE__{
name: graph_name,
graphs: %{graph_name => Graph.t()}
}
@type input :: Graph.input() | t
@type update_graph_fun :: (Graph.t() -> {Graph.t(), input} | :pop)
@doc """
Creates an empty unnamed `RDF.Dataset`.
"""
@spec new :: t
def new, do: %__MODULE__{}
@doc """
Creates an `RDF.Dataset`.
If a keyword list is given an empty dataset is created.
Otherwise an unnamed dataset initialized with the given data is created.
See `new/2` for available arguments and the different ways to provide data.
## Examples
RDF.Dataset.new(name: EX.GraphName)
RDF.Dataset.new(init: {EX.S, EX.p, EX.O})
RDF.Dataset.new({EX.S, EX.p, EX.O})
"""
@spec new(input | keyword) :: t
def new(data_or_opts)
def new(data_or_opts) when is_list(data_or_opts) and length(data_or_opts) != 0 do
if Keyword.keyword?(data_or_opts) do
{data, options} = Keyword.pop(data_or_opts, :init)
new(data, options)
else
new(data_or_opts, [])
end
end
def new(data), do: new(data, [])
@doc """
Creates an `RDF.Dataset` initialized with data.
The initial RDF triples can be provided in any form accepted by `add/3`.
Available options:
- `name`: the name of the dataset to be created
- `init`: some data with which the dataset should be initialized; the data can be
provided in any form accepted by `add/3` and above that also with a function returning
the initialization data in any of these forms
"""
@spec new(input, keyword) :: t
def new(data, opts)
def new(%__MODULE__{} = graph, opts) do
%__MODULE__{graph | name: opts |> Keyword.get(:name) |> coerce_graph_name()}
end
def new(data, opts) do
%__MODULE__{}
|> new(opts)
|> init(data, opts)
end
defp init(dataset, nil, _), do: dataset
defp init(dataset, fun, opts) when is_function(fun), do: add(dataset, fun.(), opts)
defp init(dataset, data, opts), do: add(dataset, data, opts)
@doc """
Returns the dataset name IRI of `dataset`.
"""
@spec name(t) :: Statement.graph_name()
def name(%__MODULE__{} = dataset), do: dataset.name
@doc """
Changes the dataset name of `dataset`.
"""
@spec change_name(t, Statement.coercible_graph_name()) :: t
def change_name(%__MODULE__{} = dataset, new_name) do
%__MODULE__{dataset | name: coerce_graph_name(new_name)}
end
defp destination_graph(opts, default \\ nil) do
opts
|> Keyword.get(:graph, default)
|> coerce_graph_name()
end
@doc """
Adds triples and quads to a `RDF.Dataset`.
The triples can be provided in any form accepted by `add/2`.
- as a single statement tuple
- an `RDF.Description`
- an `RDF.Graph`
- an `RDF.Dataset`
- or a list with any combination of the former
The `graph` option allows to set a different destination graph to which the
statements should be added, ignoring the graph context of given quads or the
name of given graphs in `input`.
Note: When the statements to be added are given as another `RDF.Dataset` and
a destination graph is set with the `graph` option, the descriptions of the
subjects in the different graphs are aggregated.
"""
@spec add(t, input, keyword) :: t
def add(dataset, input, opts \\ [])
def add(%__MODULE__{} = dataset, {_, _, _, graph} = quad, opts),
do: do_add(dataset, destination_graph(opts, graph), quad, opts)
def add(%__MODULE__{} = dataset, %Description{} = description, opts),
do: do_add(dataset, destination_graph(opts), description, opts)
def add(%__MODULE__{} = dataset, %Graph{} = graph, opts),
do: do_add(dataset, destination_graph(opts, graph.name), graph, opts)
def add(%__MODULE__{} = dataset, %__MODULE__{} = other_dataset, opts) do
other_dataset
|> graphs()
|> Enum.reduce(dataset, &add(&2, &1, opts))
end
def add(dataset, input, opts)
when is_list(input) or (is_map(input) and not is_struct(input)) do
Enum.reduce(input, dataset, &add(&2, &1, opts))
end
def add(%__MODULE__{} = dataset, input, opts),
do: do_add(dataset, destination_graph(opts), input, opts)
defp do_add(dataset, graph_name, input, opts) do
%__MODULE__{
dataset
| graphs:
lazy_map_update(
dataset.graphs,
graph_name,
# when new:
fn -> Graph.new(input, Keyword.put(opts, :name, graph_name)) end,
# when update:
fn graph -> Graph.add(graph, input, opts) end
)
}
end
@doc """
Adds statements to a `RDF.Dataset` overwriting existing statements with the subjects given in the `input` data.
The `graph` option allows to set a different destination graph to which the
statements should be added, ignoring the graph context of given quads or the
name of given graphs in `input`.
Note: When the statements to be added are given as another `RDF.Dataset` and
a destination graph is set with the `graph` option, the descriptions of the
subjects in the different graphs are aggregated.
## Examples
iex> dataset = RDF.Dataset.new({EX.S, EX.P1, EX.O1})
...> RDF.Dataset.put(dataset, {EX.S, EX.P2, EX.O2})
RDF.Dataset.new({EX.S, EX.P2, EX.O2})
iex> RDF.Dataset.put(dataset, {EX.S2, EX.P2, EX.O2})
RDF.Dataset.new([{EX.S, EX.P1, EX.O1}, {EX.S2, EX.P2, EX.O2}])
"""
@spec put(t, input, keyword) :: t
def put(dataset, input, opts \\ [])
def put(%__MODULE__{} = dataset, %__MODULE__{} = input, opts) do
%__MODULE__{
dataset
| graphs:
Enum.reduce(
input.graphs,
dataset.graphs,
fn {graph_name, graph}, graphs ->
Map.update(
graphs,
graph_name,
graph,
fn current -> Graph.put(current, graph, opts) end
)
end
)
}
end
def put(%__MODULE__{} = dataset, input, opts) do
put(dataset, new() |> add(input, opts), opts)
end
@doc """
Adds statements to a `RDF.Dataset` and overwrites all existing statements with the same subject-predicate combinations given in the `input` data.
The `graph` option allows to set a different destination graph to which the
statements should be added, ignoring the graph context of given quads or the
name of given graphs in `input`.
Note: When the statements to be added are given as another `RDF.Dataset` and
a destination graph is set with the `graph` option, the descriptions of the
subjects in the different graphs are aggregated.
## Examples
iex> dataset = RDF.Dataset.new({EX.S, EX.P1, EX.O1})
...> RDF.Dataset.put_properties(dataset, {EX.S, EX.P1, EX.O2})
RDF.Dataset.new({EX.S, EX.P1, EX.O2})
iex> RDF.Dataset.put_properties(dataset, {EX.S, EX.P2, EX.O2})
RDF.Dataset.new([{EX.S, EX.P1, EX.O1}, {EX.S, EX.P2, EX.O2}])
iex> RDF.Dataset.new([{EX.S1, EX.P1, EX.O1}, {EX.S2, EX.P2, EX.O2}])
...> |> RDF.Dataset.put_properties([{EX.S1, EX.P2, EX.O3}, {EX.S2, EX.P2, EX.O3}])
RDF.Dataset.new([{EX.S1, EX.P1, EX.O1}, {EX.S1, EX.P2, EX.O3}, {EX.S2, EX.P2, EX.O3}])
"""
@spec put_properties(t, input, keyword) :: t
def put_properties(dataset, input, opts \\ [])
def put_properties(%__MODULE__{} = dataset, %__MODULE__{} = input, opts) do
%__MODULE__{
dataset
| graphs:
Enum.reduce(
input.graphs,
dataset.graphs,
fn {graph_name, graph}, graphs ->
Map.update(
graphs,
graph_name,
graph,
fn current -> Graph.put_properties(current, graph, opts) end
)
end
)
}
end
def put_properties(%__MODULE__{} = dataset, input, opts) do
put_properties(dataset, new() |> add(input, opts), opts)
end
@doc """
Deletes statements from a `RDF.Dataset`.
The `graph` option allows to set a different destination graph from which the
statements should be deleted, ignoring the graph context of given quads or the
name of given graphs.
Note: When the statements to be deleted are given as another `RDF.Dataset`,
the dataset name must not match dataset name of the dataset from which the statements
are deleted. If you want to delete only datasets with matching names, you can
use `RDF.Data.delete/2`.
"""
@spec delete(t, input, keyword) :: t
def delete(dataset, input, opts \\ [])
def delete(%__MODULE__{} = dataset, {_, _, _, graph} = quad, opts),
do: do_delete(dataset, destination_graph(opts, graph), quad, opts)
def delete(%__MODULE__{} = dataset, %Description{} = description, opts),
do: do_delete(dataset, destination_graph(opts), description, opts)
def delete(%__MODULE__{} = dataset, %Graph{} = graph, opts),
do: do_delete(dataset, destination_graph(opts, graph.name), graph, opts)
def delete(%__MODULE__{} = dataset, %__MODULE__{} = other_dataset, opts) do
other_dataset
|> graphs()
|> Enum.reduce(dataset, &delete(&2, &1, opts))
end
def delete(dataset, input, opts)
when is_list(input) or (is_map(input) and not is_struct(input)) do
Enum.reduce(input, dataset, &delete(&2, &1, opts))
end
def delete(%__MODULE__{} = dataset, input, opts) when not is_struct(input),
do: do_delete(dataset, destination_graph(opts), input, opts)
defp do_delete(dataset, graph_name, input, opts) do
if existing_graph = dataset.graphs[graph_name] do
new_graph = Graph.delete(existing_graph, input, opts)
%__MODULE__{
dataset
| graphs:
if Graph.empty?(new_graph) do
Map.delete(dataset.graphs, graph_name)
else
Map.put(dataset.graphs, graph_name, new_graph)
end
}
else
dataset
end
end
@doc """
Deletes the given graph.
"""
@spec delete_graph(t, Statement.graph_name() | [Statement.graph_name()] | nil) :: t
def delete_graph(graph, graph_names)
def delete_graph(%__MODULE__{} = dataset, graph_names) when is_list(graph_names) do
Enum.reduce(graph_names, dataset, &delete_graph(&2, &1))
end
def delete_graph(%__MODULE__{} = dataset, graph_name) do
%__MODULE__{dataset | graphs: Map.delete(dataset.graphs, coerce_graph_name(graph_name))}
end
@doc """
Deletes the default graph.
"""
@spec delete_default_graph(t) :: t
def delete_default_graph(%__MODULE__{} = graph),
do: delete_graph(graph, nil)
@doc """
Fetches the `RDF.Graph` with the given name.
When a graph with the given name can not be found can not be found `:error` is returned.
## Examples
iex> dataset = RDF.Dataset.new([{EX.S1, EX.P1, EX.O1, EX.Graph}, {EX.S2, EX.P2, EX.O2}])
...> RDF.Dataset.fetch(dataset, EX.Graph)
{:ok, RDF.Graph.new({EX.S1, EX.P1, EX.O1}, name: EX.Graph)}
iex> RDF.Dataset.fetch(dataset, nil)
{:ok, RDF.Graph.new({EX.S2, EX.P2, EX.O2})}
iex> RDF.Dataset.fetch(dataset, EX.Foo)
:error
"""
@impl Access
@spec fetch(t, Statement.graph_name() | nil) :: {:ok, Graph.t()} | :error
def fetch(%__MODULE__{} = dataset, graph_name) do
Access.fetch(dataset.graphs, coerce_graph_name(graph_name))
end
@doc """
Fetches the `RDF.Graph` with the given name.
When a graph with the given name can not be found can not be found the optionally
given default value or `nil` is returned
## Examples
iex> dataset = RDF.Dataset.new([{EX.S1, EX.P1, EX.O1, EX.Graph}, {EX.S2, EX.P2, EX.O2}])
...> RDF.Dataset.get(dataset, EX.Graph)
RDF.Graph.new({EX.S1, EX.P1, EX.O1}, name: EX.Graph)
iex> RDF.Dataset.get(dataset, nil)
RDF.Graph.new({EX.S2, EX.P2, EX.O2})
iex> RDF.Dataset.get(dataset, EX.Foo)
nil
iex> RDF.Dataset.get(dataset, EX.Foo, :bar)
:bar
"""
@spec get(t, Statement.graph_name() | nil, Graph.t() | nil) :: Graph.t() | nil
def get(%__MODULE__{} = dataset, graph_name, default \\ nil) do
case fetch(dataset, graph_name) do
{:ok, value} -> value
:error -> default
end
end
@doc """
The graph with given name.
"""
@spec graph(t, Statement.graph_name() | nil) :: Graph.t()
def graph(%__MODULE__{} = dataset, graph_name) do
Map.get(dataset.graphs, coerce_graph_name(graph_name))
end
@doc """
The default graph of a `RDF.Dataset`.
"""
@spec default_graph(t) :: Graph.t()
def default_graph(%__MODULE__{} = dataset) do
Map.get(dataset.graphs, nil, Graph.new())
end
@doc """
The set of all graphs.
"""
@spec graphs(t) :: [Graph.t()]
def graphs(%__MODULE__{} = dataset), do: Map.values(dataset.graphs)
@doc """
Gets and updates the graph with the given name, in a single pass.
Invokes the passed function on the `RDF.Graph` with the given name;
this function should return either `{graph_to_return, new_graph}` or `:pop`.
If the passed function returns `{graph_to_return, new_graph}`, the
return value of `get_and_update` is `{graph_to_return, new_dataset}` where
`new_dataset` is the input `Dataset` updated with `new_graph` for
the given name.
If the passed function returns `:pop` the graph with the given name is
removed and a `{removed_graph, new_dataset}` tuple gets returned.
## Examples
iex> dataset = RDF.Dataset.new({EX.S, EX.P, EX.O, EX.Graph})
...> RDF.Dataset.get_and_update(dataset, EX.Graph, fn current_graph ->
...> {current_graph, {EX.S, EX.P, EX.NEW}}
...> end)
{RDF.Graph.new({EX.S, EX.P, EX.O}, name: EX.Graph), RDF.Dataset.new({EX.S, EX.P, EX.NEW, EX.Graph})}
"""
@impl Access
@spec get_and_update(t, Statement.graph_name() | nil, update_graph_fun) :: {Graph.t(), input}
def get_and_update(%__MODULE__{} = dataset, graph_name, fun) do
graph_context = coerce_graph_name(graph_name)
case fun.(get(dataset, graph_context)) do
{old_graph, new_graph} ->
{old_graph, put(dataset, new_graph, graph: graph_context)}
:pop ->
pop(dataset, graph_context)
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
@doc """
Pops an arbitrary statement from a `RDF.Dataset`.
"""
@spec pop(t) :: {Statement.t() | nil, t}
def pop(dataset)
def pop(%__MODULE__{graphs: graphs} = dataset)
when graphs == %{},
do: {nil, dataset}
def pop(%__MODULE__{graphs: graphs} = dataset) do
# TODO: Find a faster way ...
[{graph_name, graph}] = Enum.take(graphs, 1)
{{s, p, o}, popped_graph} = Graph.pop(graph)
popped =
if Graph.empty?(popped_graph),
do: graphs |> Map.delete(graph_name),
else: graphs |> Map.put(graph_name, popped_graph)
{
{s, p, o, graph_name},
%__MODULE__{dataset | graphs: popped}
}
end
@doc """
Pops the graph with the given name.
When a graph with given name can not be found the optionally given default value
or `nil` is returned.
## Examples
iex> dataset = RDF.Dataset.new([
...> {EX.S1, EX.P1, EX.O1, EX.Graph},
...> {EX.S2, EX.P2, EX.O2}])
...> RDF.Dataset.pop(dataset, EX.Graph)
{RDF.Graph.new({EX.S1, EX.P1, EX.O1}, name: EX.Graph), RDF.Dataset.new({EX.S2, EX.P2, EX.O2})}
iex> RDF.Dataset.pop(dataset, EX.Foo)
{nil, dataset}
"""
@impl Access
@spec pop(t, Statement.coercible_graph_name()) :: {Statement.t() | nil, t}
def pop(%__MODULE__{} = dataset, graph_name) do
case Access.pop(dataset.graphs, coerce_graph_name(graph_name)) do
{nil, _} ->
{nil, dataset}
{graph, new_graphs} ->
{graph, %__MODULE__{dataset | graphs: new_graphs}}
end
end
@doc """
The number of graphs within a `RDF.Dataset`.
## Examples
iex> RDF.Dataset.new([
...> {EX.S1, EX.p1, EX.O1},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3, EX.Graph}])
...> |> RDF.Dataset.graph_count()
2
"""
@spec graph_count(t) :: non_neg_integer
def graph_count(%__MODULE__{} = dataset) do
Enum.count(dataset.graphs)
end
@doc """
The number of statements within a `RDF.Dataset`.
## Examples
iex> RDF.Dataset.new([
...> {EX.S1, EX.p1, EX.O1, EX.Graph},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}]) |>
...> RDF.Dataset.statement_count
3
"""
@spec statement_count(t) :: non_neg_integer
def statement_count(%__MODULE__{} = dataset) do
Enum.reduce(dataset.graphs, 0, fn {_, graph}, count ->
count + Graph.triple_count(graph)
end)
end
@doc """
The set of all subjects used in the statement within all graphs of a `RDF.Dataset`.
## Examples
iex> RDF.Dataset.new([
...> {EX.S1, EX.p1, EX.O1, EX.Graph},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}]) |>
...> RDF.Dataset.subjects
MapSet.new([RDF.iri(EX.S1), RDF.iri(EX.S2)])
"""
def subjects(%__MODULE__{} = dataset) do
Enum.reduce(dataset.graphs, MapSet.new(), fn {_, graph}, subjects ->
MapSet.union(subjects, Graph.subjects(graph))
end)
end
@doc """
The set of all properties used in the predicates within all graphs of a `RDF.Dataset`.
## Examples
iex> RDF.Dataset.new([
...> {EX.S1, EX.p1, EX.O1, EX.Graph},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}]) |>
...> RDF.Dataset.predicates
MapSet.new([EX.p1, EX.p2])
"""
def predicates(%__MODULE__{} = dataset) do
Enum.reduce(dataset.graphs, MapSet.new(), fn {_, graph}, predicates ->
MapSet.union(predicates, Graph.predicates(graph))
end)
end
@doc """
The set of all resources used in the objects within a `RDF.Dataset`.
Note: This function does collect only IRIs and BlankNodes, not Literals.
## Examples
iex> RDF.Dataset.new([
...> {EX.S1, EX.p1, EX.O1, EX.Graph},
...> {EX.S2, EX.p2, EX.O2, EX.Graph},
...> {EX.S3, EX.p1, EX.O2},
...> {EX.S4, EX.p2, RDF.bnode(:bnode)},
...> {EX.S5, EX.p3, "foo"}
...> ]) |> RDF.Dataset.objects
MapSet.new([RDF.iri(EX.O1), RDF.iri(EX.O2), RDF.bnode(:bnode)])
"""
def objects(%__MODULE__{} = dataset) do
Enum.reduce(dataset.graphs, MapSet.new(), fn {_, graph}, objects ->
MapSet.union(objects, Graph.objects(graph))
end)
end
@doc """
The set of all resources used within a `RDF.Dataset`.
## Examples
iex> RDF.Dataset.new([
...> {EX.S1, EX.p1, EX.O1, EX.Graph},
...> {EX.S2, EX.p1, EX.O2, EX.Graph},
...> {EX.S2, EX.p2, RDF.bnode(:bnode)},
...> {EX.S3, EX.p1, "foo"}
...> ]) |> RDF.Dataset.resources
MapSet.new([RDF.iri(EX.S1), RDF.iri(EX.S2), RDF.iri(EX.S3),
RDF.iri(EX.O1), RDF.iri(EX.O2), RDF.bnode(:bnode), EX.p1, EX.p2])
"""
def resources(%__MODULE__{} = dataset) do
Enum.reduce(dataset.graphs, MapSet.new(), fn {_, graph}, resources ->
MapSet.union(resources, Graph.resources(graph))
end)
end
@doc """
All statements within all graphs of a `RDF.Dataset`.
When the optional `:filter_star` flag is set to `true` RDF-star statements with
a triple as subject or object will be filtered. The default value is `false`.
## Examples
iex> RDF.Dataset.new([
...> {EX.S1, EX.p1, EX.O1, EX.Graph},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}]) |>
...> RDF.Dataset.statements
[{RDF.iri(EX.S1), RDF.iri(EX.p2), RDF.iri(EX.O3)},
{RDF.iri(EX.S2), RDF.iri(EX.p2), RDF.iri(EX.O2)},
{RDF.iri(EX.S1), RDF.iri(EX.p1), RDF.iri(EX.O1), RDF.iri(EX.Graph)}]
"""
@spec statements(t, keyword) :: [Statement.t()]
def statements(%__MODULE__{} = dataset, opts \\ []) do
Enum.flat_map(dataset.graphs, fn
{nil, graph} ->
Graph.triples(graph, opts)
{name, graph} ->
graph |> Graph.triples(opts) |> Enum.map(fn {s, p, o} -> {s, p, o, name} end)
end)
end
@doc """
Returns if the given `dataset` is empty.
Note: You should always prefer this over the use of `Enum.empty?/1` as it is significantly faster.
"""
@spec empty?(t) :: boolean
def empty?(%__MODULE__{} = dataset) do
Enum.empty?(dataset.graphs) or dataset |> graphs() |> Enum.all?(&Graph.empty?/1)
end
@doc """
Checks if the given `input` statements exist within `dataset`.
The `graph` option allows to set a different destination graph in which the
statements should be checked, ignoring the graph context of given quads or the
name of given graphs.
## Examples
iex> dataset = RDF.Dataset.new([
...> {EX.S1, EX.p1, EX.O1, EX.Graph},
...> {EX.S2, EX.p2, EX.O2},
...> {EX.S1, EX.p2, EX.O3}])
...> RDF.Dataset.include?(dataset, {EX.S1, EX.p1, EX.O1, EX.Graph})
true
"""
@spec include?(t, input, keyword) :: boolean
def include?(dataset, input, opts \\ [])
def include?(%__MODULE__{} = dataset, {_, _, _, graph} = quad, opts),
do: do_include?(dataset, destination_graph(opts, graph), quad, opts)
def include?(%__MODULE__{} = dataset, %Description{} = description, opts),
do: do_include?(dataset, destination_graph(opts), description, opts)
def include?(%__MODULE__{} = dataset, %Graph{} = graph, opts),
do: do_include?(dataset, destination_graph(opts, graph.name), graph, opts)
def include?(%__MODULE__{} = dataset, %__MODULE__{} = other_dataset, opts) do
other_dataset
|> graphs()
|> Enum.all?(&include?(dataset, &1, opts))
end
def include?(dataset, input, opts)
when is_list(input) or (is_map(input) and not is_struct(input)) do
Enum.all?(input, &include?(dataset, &1, opts))
end
def include?(dataset, input, opts) when not is_struct(input),
do: do_include?(dataset, destination_graph(opts), input, opts)
defp do_include?(%__MODULE__{} = dataset, graph_name, input, opts) do
if graph = dataset.graphs[graph_name] do
Graph.include?(graph, input, opts)
else
false
end
end
@doc """
Checks if a graph of a `RDF.Dataset` contains statements about the given resource.
## Examples
iex> RDF.Dataset.new([{EX.S1, EX.p1, EX.O1}]) |> RDF.Dataset.describes?(EX.S1)
true
iex> RDF.Dataset.new([{EX.S1, EX.p1, EX.O1}]) |> RDF.Dataset.describes?(EX.S2)
false
"""
@spec describes?(t, Statement.t(), Statement.coercible_graph_name() | nil) :: boolean
def describes?(%__MODULE__{} = dataset, subject, graph_context \\ nil) do
if graph = dataset.graphs[coerce_graph_name(graph_context)] do
Graph.describes?(graph, subject)
else
false
end
end
@doc """
Returns the names of all graphs of a `RDF.Dataset` containing statements about the given subject.
## Examples
iex> dataset = RDF.Dataset.new([
...> {EX.S1, EX.p, EX.O},
...> {EX.S2, EX.p, EX.O},
...> {EX.S1, EX.p, EX.O, EX.Graph1},
...> {EX.S2, EX.p, EX.O, EX.Graph2}])
...> RDF.Dataset.who_describes(dataset, EX.S1)
[nil, RDF.iri(EX.Graph1)]
"""
@spec who_describes(t, Statement.coercible_subject()) :: [Graph.t()]
def who_describes(%__MODULE__{} = dataset, subject) do
subject = coerce_subject(subject)
dataset.graphs
|> Map.values()
|> Stream.filter(&Graph.describes?(&1, subject))
|> Enum.map(& &1.name)
end
@doc """
Returns a nested map of the native Elixir values of a `RDF.Dataset`.
When a `:context` option is given with a `RDF.PropertyMap`, predicates will
be mapped to the terms defined in the `RDF.PropertyMap`, if present.
## Examples
iex> [
...> {~I<http://example.com/S>, ~I<http://example.com/p>, ~L"Foo", ~I<http://example.com/Graph>},
...> {~I<http://example.com/S>, ~I<http://example.com/p>, RDF.XSD.integer(42), }
...> ]
...> |> RDF.Dataset.new()
...> |> RDF.Dataset.values()
%{
"http://example.com/Graph" => %{
"http://example.com/S" => %{"http://example.com/p" => ["Foo"]}
},
nil => %{
"http://example.com/S" => %{"http://example.com/p" => [42]}
}
}
"""
@spec values(t, keyword) :: map
def values(%__MODULE__{} = dataset, opts \\ []) do
if property_map = PropertyMap.from_opts(opts) do
map(dataset, Statement.default_property_mapping(property_map))
else
map(dataset, &Statement.default_term_mapping/1)
end
end
@doc """
Returns a nested map of a `RDF.Dataset` where each element from its quads is mapped with the given function.
The function `fun` will receive a tuple `{statement_position, rdf_term}` where
`statement_position` is one of the atoms `:subject`, `:predicate`, `:object` or
`:graph_name` while `rdf_term` is the RDF term to be mapped. When the given function
returns `nil` this will be interpreted as an error and will become the overhaul
result of the `map/2` call.
## Examples
iex> [
...> {~I<http://example.com/S>, ~I<http://example.com/p>, ~L"Foo", ~I<http://example.com/Graph>},
...> {~I<http://example.com/S>, ~I<http://example.com/p>, RDF.XSD.integer(42), }
...> ]
...> |> RDF.Dataset.new()
...> |> RDF.Dataset.map(fn
...> {:graph_name, graph_name} ->
...> graph_name
...> {:predicate, predicate} ->
...> predicate
...> |> to_string()
...> |> String.split("/")
...> |> List.last()
...> |> String.to_atom()
...> {_, term} ->
...> RDF.Term.value(term)
...> end)
%{
~I<http://example.com/Graph> => %{
"http://example.com/S" => %{p: ["Foo"]}
},
nil => %{
"http://example.com/S" => %{p: [42]}
}
}
"""
@spec map(t, Statement.term_mapping()) :: map
def map(dataset, fun)
def map(%__MODULE__{} = dataset, fun) do
Map.new(dataset.graphs, fn {graph_name, graph} ->
{fun.({:graph_name, graph_name}), Graph.map(graph, fun)}
end)
end
@doc """
Checks if two `RDF.Dataset`s are equal.
Two `RDF.Dataset`s are considered to be equal if they contain the same triples
and have the same name.
"""
@spec equal?(t | any, t | any) :: boolean
def equal?(dataset1, dataset2)
def equal?(%__MODULE__{} = dataset1, %__MODULE__{} = dataset2) do
clear_metadata(dataset1) == clear_metadata(dataset2)
end
def equal?(_, _), do: false
@doc """
Returns the aggregated prefixes of all graphs of `dataset` as a `RDF.PrefixMap`.
"""
@spec prefixes(t) :: PrefixMap.t() | nil
def prefixes(%__MODULE__{} = dataset) do
dataset
|> RDF.Dataset.graphs()
|> Enum.reduce(RDF.PrefixMap.new(), fn graph, prefixes ->
if graph.prefixes do
RDF.PrefixMap.merge!(prefixes, graph.prefixes, :ignore)
else
prefixes
end
end)
end
defp clear_metadata(%__MODULE__{} = dataset) do
%__MODULE__{
dataset
| graphs:
Map.new(dataset.graphs, fn {name, graph} ->
{name, Graph.clear_metadata(graph)}
end)
}
end
defimpl Enumerable do
alias RDF.Dataset
def member?(dataset, statement), do: {:ok, Dataset.include?(dataset, statement)}
def count(dataset), do: {:ok, Dataset.statement_count(dataset)}
def slice(dataset) do
size = Dataset.statement_count(dataset)
{:ok, size, &Enumerable.List.slice(Dataset.statements(dataset), &1, &2, size)}
end
def reduce(dataset, acc, fun) do
dataset
|> Dataset.statements()
|> Enumerable.List.reduce(acc, fun)
end
end
defimpl Collectable do
alias RDF.Dataset
def into(original) do
collector_fun = fn
dataset, {:cont, list} when is_list(list) ->
Dataset.add(dataset, List.to_tuple(list))
dataset, {:cont, elem} ->
Dataset.add(dataset, elem)
dataset, :done ->
dataset
_dataset, :halt ->
:ok
end
{original, collector_fun}
end
end
end
|
lib/rdf/dataset.ex
| 0.908992 | 0.678939 |
dataset.ex
|
starcoder
|
defmodule Rabbit.SerializerError do
@moduledoc false
defexception [:message]
end
defmodule Rabbit.Serializer do
@moduledoc """
A behaviour to implement serializers.
To create a serializer, you just need to implement the `c:encode/1` and `c:decode/1`
callbacks,
## Example
defmodule MySerializer do
@behaviour Rabbit.Serializer
@impl true
def encode(data) do
# Create a binary from data
end
@impl true
def decode(binary) do
# Create data from a binary
end
end
## Default Serializers
By default, Rabbit comes with serializers for the following content types:
* `"application/etf"` - built in erlang term format.
* `"application/json"` - requires the `Jason` library to be added.
You can modify the available serializers through application config:
config :rabbit,
serializers: %{
"application/custom-type" => MySerializer
}
"""
@type t :: module()
@doc """
Callback invoked to encode the given data to a binary.
"""
@callback encode(any()) :: {:ok, binary()} | {:error, Exception.t()}
@doc """
Callback invoked to decode the given binary to data.
"""
@callback decode(binary()) :: {:ok, any()} | {:error, Exception.t()}
@defaults %{
"application/json" => Rabbit.Serializers.JSON,
"application/etf" => Rabbit.Serializers.ETF
}
@doc false
@spec encode(Rabbit.Serializer.t(), any()) :: {:ok, any()} | {:error, Exception.t()}
def encode(serializer, data) do
do_serialize(serializer, :encode, data)
end
@doc false
@spec encode!(Rabbit.Serializer.t(), any()) :: any
def encode!(serializer, data) do
case encode(serializer, data) do
{:ok, data} -> data
{:error, error} -> raise Rabbit.SerializerError, Exception.message(error)
end
end
@doc false
@spec decode(Rabbit.Serializer.t(), any()) :: {:ok, any()} | {:error, Exception.t()}
def decode(serializer, data) do
do_serialize(serializer, :decode, data)
end
@doc false
@spec decode!(Rabbit.Serializer.t(), any()) :: any
def decode!(serializer, data) do
case decode(serializer, data) do
{:ok, data} -> data
{:error, error} -> raise Rabbit.SerializerError, Exception.message(error)
end
end
@doc false
@spec defaults() :: map()
def defaults do
@defaults
end
defp do_serialize(serializer, fun, data) do
apply(serializer, fun, [data])
end
end
|
lib/rabbit/serializer.ex
| 0.919552 | 0.432303 |
serializer.ex
|
starcoder
|
defmodule TemperatureLogger do
@moduledoc """
Client/Server implementation that allows the client to log temperature via
temperature-sensing hardware. The client can customize...
* the UART port.
* the frequency of readings.
* the destination log file.
Additionally, the client can enumerate the UART ports available.
**Note:** Temperature readings are logged in degrees Celsius and Fahrenheit.
"""
use GenServer
require Logger
alias Nerves.UART
alias Poison.Parser
alias TemperatureLogger.Settings
alias TemperatureLogger.LoggerManager
@uart_pid UART
@on "O"
@off "F"
@uart_open_options [
speed: 9600,
active: true,
framing: {UART.Framing.Line, separator: "\n"}
]
## Client API
@spec start_link(keyword()) :: {:ok, pid()} | {:error, term()}
def start_link(opts) do
GenServer.start_link(__MODULE__, :ok, opts)
end
@spec enumerate(pid()) :: map()
def enumerate(server) do
GenServer.call(server, {:enumerate})
end
@spec start_logging(pid(), keyword()) :: :ok | {:error, term()}
def start_logging(server, opts \\ []) do
GenServer.call(server, {:start_logging, opts})
end
@spec stop_logging(pid(), keyword()) :: :ok | {:error, term()}
def stop_logging(server, opts \\ []) do
GenServer.call(server, {:stop_logging, opts})
end
## Server Callbacks
def init(:ok) do
{:ok, _pid} = UART.start_link(name: uart_pid())
state = %{}
{:ok, state}
end
def handle_call({:enumerate}, _from, state) do
ports = UART.enumerate()
{:reply, {:ok, ports}, state}
end
def handle_call({:start_logging, opts}, _from, state) do
port = Keyword.get(opts, :port, Settings.default_port)
if Map.has_key?(state, port) do
{:reply, {:error, :eagain}, state}
else
log_path = Path.expand(Keyword.get(opts, :log_path, Settings.default_log_path))
period = Keyword.get(opts, :period, Settings.default_period)
case set_up(port, log_path, period) do
{:ok, settings} ->
new_state = Map.put(state, port, settings)
{:reply, :ok, new_state}
{:error, error} ->
{:reply, {:error, error}, state}
end
end
end
def handle_call({:stop_logging, opts}, _from, state) do
port = Keyword.get(opts, :port, Settings.default_port)
if Map.has_key?(state, port) do
case teardown(Map.get(state, port)) do
:ok ->
new_state = Map.delete(state, port)
{:reply, :ok, new_state}
{:error, error} ->
{:reply, {:error, error}, state}
end
else
{:reply, {:error, :ebadf}, state}
end
end
def handle_info({:nerves_uart, port, {:error, :einval}}, state) do
if Map.has_key?(state, port) do
LoggerManager.remove_backend(Map.get(state, port).log_path)
new_state = Map.delete(state, port)
{:noreply, new_state}
else
{:noreply, state}
end
end
def handle_info({:nerves_uart, _port, {:error, _err}}, state) do
{:noreply, state}
end
def handle_info({:nerves_uart, port, raw_message}, state) do
if Map.has_key?(state, port) do
{point_type, new_settings} = Settings.next(Map.get(state, port))
if point_type == :crest or point_type == :trough do
with {:ok, data} <- Parser.parse(String.trim(raw_message)),
do: Logger.info(message_from_data(data))
end
new_state = Map.put(state, port, new_settings)
{:noreply, new_state}
else
{:noreply, state}
end
end
def handle_info(msg, state) do
IO.puts("Received message: #{inspect(msg)}")
{:noreply, state}
end
defp uart_pid do
@uart_pid
end
defp set_up(port, log_path, period) do
with {:ok, settings} <- Settings.generate(log_path, period),
:ok <- UART.open(uart_pid(), port, @uart_open_options),
:ok <- UART.flush(uart_pid()),
:ok <- UART.write(uart_pid(), @on),
:ok <- UART.drain(uart_pid()),
:ok <- LoggerManager.add_backend(log_path),
do: {:ok, settings}
end
defp teardown(settings) do
with :ok <- UART.write(uart_pid(), @off),
:ok <- UART.drain(uart_pid()),
:ok <- UART.close(uart_pid()),
do: LoggerManager.remove_backend(settings.log_path)
end
defp message_from_data(data) do
Enum.join([
Map.get(data, "celsius"),
Map.get(data, "fahrenheit")
], ", ")
end
end
|
temperature_logger_umbrella/apps/temperature_logger/lib/temperature_logger.ex
| 0.794425 | 0.425904 |
temperature_logger.ex
|
starcoder
|
defmodule Ecto.Validator do
@moduledoc """
Validates a given struct or dict given a set of predicates.
Ecto.Validator.struct(user,
name: present() when on_create?(user),
age: present(message: "must be present"),
age: greater_than(18),
also: validate_other
)
Validations are passed as the second argument in the attribute-predicate
format. Each predicate can be filtered via the `when` operator. Note `when`
here is not limited to only guard expressions.
The predicates above are going to receive the attribute being validated
and its current value as argument. For example, the `present` predicate
above is going to be called as:
present(:name, user.name)
present(:age, user.age, message: "must be present")
The validator also handles a special key `:also`, which is used to pipe
to predicates without a particular attribute. Instead, such predicates
receive the struct as argument. In this example, `validate_other` will
be invoked as:
validate_other(user)
Note all predicates must return a keyword list, with the attribute error
as key and the validation message as value.
A handful of predicates can be found at `Ecto.Validator.Predicates`.
"""
@doc """
Validates a given dict given a set of predicates.
"""
@spec dict(Macro.t, Keyword.t) :: Macro.t
defmacro dict(value, opts) when is_list(opts) do
process opts, value, fn var, attr ->
quote do: Dict.get(unquote(var), unquote(attr))
end
end
@doc """
Validates a given dict, with binary keys, given a set of predicates.
"""
@spec bin_dict(Macro.t, Keyword.t) :: Macro.t
defmacro bin_dict(value, opts) when is_list(opts) do
process opts, value, fn var, attr ->
quote do: Dict.get(unquote(var), unquote(atom_to_binary(attr)))
end
end
@doc """
Validates a given struct given a set of predicates.
"""
@spec struct(Macro.t, Keyword.t) :: Macro.t
defmacro struct(value, opts) when is_list(opts) do
process opts, value, fn var, attr ->
quote do: Map.get(unquote(var), unquote(attr))
end
end
defp process([], _value, _getter), do: []
defp process(opts, value, getter) do
var = quote do: var
validations =
opts
|> Enum.map(&process_each(&1, var, getter))
|> concat
quote do
unquote(var) = unquote(value)
unquote(validations)
end
end
defp concat(predicates) do
Enum.reduce(predicates, fn i, acc ->
quote do: unquote(acc) ++ unquote(i)
end)
end
defp process_each({:also, function}, var, _getter) do
handle_ops function, fn call -> Macro.pipe(var, call, 0) end
end
defp process_each({attr, function}, var, getter) do
handle_ops function, fn call ->
Macro.pipe(attr, Macro.pipe(getter.(var, attr), call, 0), 0)
end
end
defp handle_ops({:when, _, [left, right]}, callback) do
quote do
if unquote(right), do: unquote(concat(handle_and(left, callback))), else: []
end
end
defp handle_ops(other, callback) do
concat(handle_and(other, callback))
end
defp handle_and({:and, _, [left, right]}, callback) do
handle_and(left, callback) ++ [callback.(right)]
end
defp handle_and(other, callback) do
[callback.(other)]
end
end
|
lib/ecto/validator.ex
| 0.921145 | 0.589835 |
validator.ex
|
starcoder
|
defmodule Shared.Ecto.Interval do
@behaviour Ecto.Type
def type, do: :interval
def cast(%Timex.Duration{} = duration) do
{:ok, duration}
end
def cast(duration_as_binary) when is_binary(duration_as_binary) do
case Float.parse(duration_as_binary) do
{duration, ""} -> cast(duration)
_ -> cast(:error)
end
end
def cast(duration) when is_float(duration) do
duration
|> float_hours_to_seconds
|> Timex.Duration.from_seconds()
|> cast()
end
def cast(%{"megaseconds" => megaseconds, "microseconds" => microseconds, "seconds" => seconds}) do
{:ok, %Timex.Duration{megaseconds: megaseconds, microseconds: microseconds, seconds: seconds}}
end
def cast(_) do
:error
end
def dump(%Timex.Duration{
megaseconds: megaseconds,
microseconds: microseconds,
seconds: remainder_seconds
})
when is_integer(remainder_seconds) and is_integer(microseconds) and is_integer(megaseconds) do
seconds =
megaseconds_to_seconds(megaseconds) + microseconds_to_seconds(microseconds) +
remainder_seconds
{days, new_remainder_seconds} = seconds_to_days_and_remainder_seconds(seconds)
{:ok, %Postgrex.Interval{months: 0, days: days, secs: new_remainder_seconds}}
end
def dump(duration) when is_float(duration) do
{:ok, timex_duration} =
duration
|> cast
dump(timex_duration)
end
def dump(_) do
:error
end
def load(%Postgrex.Interval{months: 0, days: days, secs: seconds}) do
{:ok, Timex.Duration.from_seconds(days * seconds_per_day() + seconds)}
end
def load(_) do
:error
end
def embed_as(_), do: :self
def equal?(interval1, interval2), do: interval1 == interval2
defp megaseconds_to_seconds(megaseconds), do: megaseconds * 1_000_000
defp microseconds_to_seconds(microseconds), do: round(microseconds / 1_000_000)
defp seconds_per_day, do: 60 * 60 * 24
defp seconds_to_days_and_remainder_seconds(seconds) do
{div(seconds, seconds_per_day()), rem(seconds, seconds_per_day())}
end
defp float_hours_to_seconds(hours) do
round(hours * 60 * 60)
end
end
if Code.ensure_loaded?(Jason) do
defimpl Jason.Encoder, for: Timex.Duration do
def encode(duration, opts) do
Jason.Encode.map(Map.take(duration, [:megaseconds, :microseconds, :seconds]), opts)
end
end
end
|
lib/ecto/interval.ex
| 0.59843 | 0.435001 |
interval.ex
|
starcoder
|
defmodule Grizzly.ZWave.NodeIdList do
@moduledoc false
# This module contains helpers for parsing and encoding a list of node ids
# into a binary with bytes that are bitmasks of the node ids contained in the
# list. This is common in network commands that contain a list of node ids in
# the Z-Wave network.
@node_ids_list_len 29
@doc """
Parse a binary mask of node ids
"""
@spec parse(binary()) :: [Grizzly.ZWave.node_id()]
def parse(node_ids) when byte_size(node_ids) == @node_ids_list_len do
unmask(node_ids, &node_id_modifier/2)
end
def parse(<<node_ids::binary-size(@node_ids_list_len), 0x00::16, _rest::binary>>) do
unmask(node_ids, &node_id_modifier/2)
end
def parse(
<<node_ids::binary-size(@node_ids_list_len), extended_node_ids_list_len::16,
extended_node_ids_list::binary-size(extended_node_ids_list_len)>>
) do
unmask(node_ids, &node_id_modifier/2) ++
unmask(extended_node_ids_list, &node_id_extended_modifier/2)
end
defp unmask(node_ids_bin, modifier) do
unmask(node_ids_bin, 0, [], modifier)
end
defp unmask(<<>>, _byte_offset, node_ids_list, _modifier) do
Enum.sort(node_ids_list)
end
defp unmask(<<masked_byte::binary-size(1), rest::binary>>, byte_offset, node_id_list, modifier) do
new_node_id_list = add_node_ids_in_byte(masked_byte, node_id_list, byte_offset, modifier)
unmask(rest, byte_offset + 1, new_node_id_list, modifier)
end
defp add_node_ids_in_byte(byte, node_id_list, byte_offset, modifier) do
<<eight::1, seven::1, six::1, five::1, four::1, three::1, two::1, one::1>> = byte
node_ids = [
{one, 1},
{two, 2},
{three, 3},
{four, 4},
{five, 5},
{six, 6},
{seven, 7},
{eight, 8}
]
Enum.reduce(node_ids, node_id_list, fn
{0, _id}, ids -> ids
{1, id}, ids -> [modifier.(id, byte_offset) | ids]
end)
end
defp node_id_modifier(node_id, byte_offset), do: node_id + byte_offset * 8
defp node_id_extended_modifier(node_id, byte_offset),
do: node_id_modifier(node_id, byte_offset) + 255
@typedoc """
Options for when to encode the node list into a the binary mask
* `:extended` - weather or not the node list contains extended ids
(default `true`). For some command classes that predate Z-Wave long range
the node list binary only contains 29 bytes. After command class versions
that support 16 bit node ids the binary list will at minium contain 31
bytes, 29 for the 8 bit node ids and 2 bytes for the byte size of the
extended node id binary. If there are no extended node ids then this the
byte size bytes will be `0x00000`.
"""
@type to_binary_opt() :: {:extended, boolean()}
@doc """
Make a list of node ids into the binary node id list mask
"""
@spec to_binary([Grizzly.ZWave.node_id()], [to_binary_opt()]) :: binary()
def to_binary(node_id_list, opts \\ []) do
contains_extended? = Keyword.get(opts, :extended, true)
{node_ids, node_ids_extended} = :lists.partition(fn id -> id < 256 end, node_id_list)
node_ids_binary = node_ids_to_binary(node_ids)
if contains_extended? do
extended_node_ids_binary = extended_node_ids_to_binary(node_ids_extended)
extended_node_id_list_len = byte_size(extended_node_ids_binary)
<<node_ids_binary::binary, <<extended_node_id_list_len::16>>,
extended_node_ids_binary::binary>>
else
node_ids_binary
end
end
defp node_ids_to_binary(node_ids, opts \\ []) do
number_bytes = opts[:bytes] || 29
offset = opts[:offset] || 0
node_id_map = Enum.reduce(node_ids, %{}, fn id, hash -> Map.put(hash, id, id) end)
for byte_index <- 0..(number_bytes - 1), into: <<>> do
for bit_index <- 8..1, into: <<>> do
node_id = byte_index * 8 + bit_index + offset
if node_id_map[node_id], do: <<1::size(1)>>, else: <<0::size(1)>>
end
end
end
defp extended_node_ids_to_binary([]) do
<<>>
end
defp extended_node_ids_to_binary(node_ids) do
max = Enum.max(node_ids)
# Subtract 31 because the extended node ids start at the
# 31st byte of a node list binary mask. If we did not subtract
# the number of bytes would start at 32.
num_bytes = floor(max / 8) - 31
node_ids_to_binary(node_ids, offset: 255, bytes: num_bytes)
end
end
|
lib/grizzly/zwave/node_id_list.ex
| 0.639061 | 0.438184 |
node_id_list.ex
|
starcoder
|
defmodule Video.Timestamp do
@second_in_ms 1000
@minute_in_ms 60 * @second_in_ms
@hour_in_ms 60 * @minute_in_ms
@expected_length 12
defguardp looks_valid(str) when is_binary(str) and byte_size(str) == @expected_length
# 12*8=96
@type t :: <<_::96>>
def valid?(str) do
looks_valid(str) && Regex.match?(~r/^\d{2}:\d{2}:\d{2}\.\d{3}$/, str)
end
def zero(), do: "00:00:00.000"
@doc """
Takes a duration in millisecond and returns it as an ffmpeg formatted timestamp
iex> Video.Timestamp.from_milliseconds(1337)
"00:00:01.337"
"""
@spec from_milliseconds(integer()) :: t()
def from_milliseconds(duration_in_ms) do
hours = div(duration_in_ms, @hour_in_ms)
duration_in_ms = rem(duration_in_ms, @hour_in_ms)
minutes = div(duration_in_ms, @minute_in_ms)
duration_in_ms = rem(duration_in_ms, @minute_in_ms)
seconds = div(duration_in_ms - hours - minutes, @second_in_ms)
milliseconds = rem(duration_in_ms, @second_in_ms)
"#{pad_left(hours)}:#{pad_left(minutes)}:#{pad_left(seconds)}.#{pad_left(milliseconds, 3)}"
end
@doc """
Takes a timed point and returns the ffmpeg timestamp
iex> %Video.TimedPoint{time_offset_ms: 1337, lat: 0, lon: 0}
...> |> Video.Timestamp.from_timed_point()
"00:00:01.337"
"""
@spec from_timed_point(Video.TimedPoint.t()) :: t()
def from_timed_point(%Video.TimedPoint{time_offset_ms: duration_in_ms}),
do: from_milliseconds(duration_in_ms)
@doc """
Takes an ffmpeg timestamp and returns the value in milliseconds
iex> Video.Timestamp.in_milliseconds("00:00:01.337")
1337
"""
@spec in_milliseconds(t()) :: integer()
def in_milliseconds(timestamp) when looks_valid(timestamp) do
<<hours::binary-size(2), ":", minutes::binary-size(2), ":", seconds::binary-size(2), ".",
milliseconds::binary-size(3)>> = timestamp
String.to_integer(hours) * @hour_in_ms +
String.to_integer(minutes) * @minute_in_ms +
String.to_integer(seconds) * @second_in_ms +
String.to_integer(milliseconds)
end
@spec add_milliseconds(t() | :start | :end, integer()) :: t()
@doc """
Add milliseconds to the timestamp in ffmpeg format, and return it again
in ffmpeg format. Negative milliseconds are allowed, but the time will
be clamped to 0.
iex> Video.Timestamp.add_milliseconds("00:00:01.337", 42)
"00:00:01.379"
iex> Video.Timestamp.add_milliseconds("00:00:01.337", -31337)
"00:00:00.000"
"""
def add_milliseconds(:start, ms_to_add), do: add_milliseconds(zero(), ms_to_add)
def add_milliseconds(:end, ms_to_add) when ms_to_add < 0 do
"-" <> add_milliseconds(zero(), -1 * ms_to_add)
end
def add_milliseconds(timestamp, ms_to_add) when is_integer(ms_to_add) do
timestamp
|> in_milliseconds()
|> Kernel.+(ms_to_add)
|> Kernel.max(0)
|> from_milliseconds()
end
defp pad_left(int, num \\ 2), do: String.pad_leading("#{int}", num, "0")
@doc """
Try to parse a timestamp. It returns the timestamp on success, or nil
otherwise. The 2nd argument will be printed on error for identification
purposes.
iex> Video.Timestamp.valid_or_nil("00:00:01.337", "wat")
"00:00:01.337"
"""
@spec valid_or_nil(any(), any()) :: t() | nil
def valid_or_nil(nil, _ident), do: nil
def valid_or_nil(timestamp, ident) do
if valid?(timestamp) do
timestamp
else
IO.puts(
:stderr,
"Warning: timestamp '#{timestamp}' given, but it is not a valid ffmpeg timestamp (#{inspect(ident)})"
)
nil
end
end
end
|
lib/video/timestamp.ex
| 0.827166 | 0.472988 |
timestamp.ex
|
starcoder
|
defmodule KittenBlue.JWT do
@moduledoc """
This module provides JWT Claims handling functions.
* verify_claims : Verify claims in Payload
"""
@doc """
Claims verification in Payload
This function validates the basic claims defined in RFC7519.
```
valid_claims = %{iss: "https://accounts.google.com", aud: "your_oidc_client_id", nonce: "12345"}
:ok = KittenBlue.JWT.verify_claims(payload, valid_claims)
```
This function supports the following claims
* `iss` : Check for an exact match.
* `aud` : Check for an exact match or list
* `exp` : Is greater than specified value
* `nbf` : Is less than the specified value
"""
@spec verify_claims(payload :: map, valid_claims :: map) ::
:ok | {:error, :invalid_payload, claim_name :: String.t()}
def verify_claims(payload, valid_claims) when is_map(payload) and is_map(valid_claims) do
with :ok <- validate_iss(payload["iss"], valid_claims["iss"]),
:ok <- validate_aud(payload["aud"], valid_claims["aud"]),
:ok <- validate_exp(payload["exp"], valid_claims["exp"]),
:ok <- validate_nbf(payload["nbf"], valid_claims["nbf"]) do
:ok
end
end
defp validate_iss(_, nil), do: :ok
defp validate_iss(iss, valid_iss) when iss == valid_iss, do: :ok
defp validate_iss(_, _), do: {:error, :invalid_payload, "iss"}
defp validate_aud(_, nil), do: :ok
defp validate_aud(aud, valid_aud) when is_list(aud) do
if valid_aud in aud do
:ok
else
{:error, :invalid_payload, "aud"}
end
end
defp validate_aud(aud, valid_aud) when aud == valid_aud, do: :ok
defp validate_aud(_, _), do: {:error, :invalid_payload, "aud"}
defp validate_exp(_, nil), do: :ok
defp validate_exp(exp, valid_exp)
when is_integer(exp) and is_integer(valid_exp) and exp >= valid_exp,
do: :ok
defp validate_exp(_, _), do: {:error, :invalid_payload, "exp"}
defp validate_nbf(_, nil), do: :ok
defp validate_nbf(nbf, valid_nbf)
when is_integer(nbf) and is_integer(valid_nbf) and nbf <= valid_nbf,
do: :ok
defp validate_nbf(_, _), do: {:error, :invalid_payload, "nbf"}
end
|
lib/kitten_blue/jwt.ex
| 0.826257 | 0.915997 |
jwt.ex
|
starcoder
|
defmodule Ello.V2.ImageView do
use Ello.V2.Web, :view
@moduledoc """
Serializes an image and image metadata.
Usage:
render(Ello.V2.ImageView, :image, [
image: user.avatar_struct, # An %Ello.Core.Image{} struct
conn: conn # The conn - for determining pixelation.
])
"""
def render("image.json", %{conn: conn, image: image}) do
image.versions
|> Enum.reduce(%{}, &render_version(&1, &2, image, conn))
|> Map.put("original", %{url: image_url(image.path, image.filename)})
end
defp render_version(version, results, image, conn) do
Map.put(results, version.name, %{
url: image_url(image.path, filename(version, image, conn)),
metadata: metadata(version)
})
end
# content nsfw + no nsfw = pixellated
defp filename(version,
%{user: %{settings: %{posts_adult_content: true}}},
%{assigns: %{allow_nsfw: false}}), do: version.pixellated_filename
# content nudity + no nudity = pixellated
defp filename(version,
%{user: %{settings: %{posts_nudity: true}}},
%{assigns: %{allow_nudity: false}}), do: version.pixellated_filename
# _ + _ = normal
defp filename(version, _, _), do: version.filename
defp metadata(%{height: nil, width: nil, size: nil, type: nil}), do: nil
defp metadata(version) do
Map.take(version, [:height, :width, :size, :type])
end
@doc """
Return a full URI given an image path and file name.
Handles domain sharding.
"""
def image_url(_, nil), do: ""
def image_url(_, ""), do: ""
def image_url(path, filename) do
filename
|> asset_host
|> URI.merge(path <> "/" <> filename)
|> URI.to_string
end
defp asset_host(filename) do
asset_host = "https://" <> Application.get_env(:ello_v2, :asset_host)
if String.contains?(asset_host, "%d") do
String.replace(asset_host, "%d", asset_host_number(filename))
else
asset_host
end
end
defp asset_host_number(filename) do
:zlib.open
|> :zlib.crc32(filename)
|> Integer.mod(3)
|> Integer.to_string
end
end
|
apps/ello_v2/web/views/image_view.ex
| 0.705988 | 0.439326 |
image_view.ex
|
starcoder
|
defmodule Cashtrail.Banking.Institution do
@moduledoc """
This is an `Ecto.Schema` struct that represents a financial institution of the entity.
## Definition
According with [Investopedia](https://www.investopedia.com/terms/f/financialinstitution.asp),
the financial institution is a company engaged in the business of dealing with financial
and monetary transactions such as deposits, loans, investments, and currency exchange.
financial institutionS can be banks, brokers, investiments dealers, or currency exchange.
## Fields
* `:id` - The unique id of the institution.
* `:country` - The country where the institution is located.
* `:bank_code` - The code of the institution in the country that the institution
is located.
* `:swift` - The SWIFT code that identifies a particular bank worldwide.
* `:logo_url` - The url with the logo of the institution.
* `:contact_id` - The unique id of contact that the institution refers. As an
institution is a contact, this id must be informed. See `Cashtrail.Contacts.Contact`
to know more about a contact.
* `:contact` - The the contact that the institution refers.
* `:inserted_at` - When the institution was inserted at the first time.
* `:updated_at` - When the institution was updated at the last time.
"""
use Ecto.Schema
import Ecto.Changeset
alias Cashtrail.Contacts
@type t :: %Cashtrail.Banking.Institution{
id: Ecto.UUID.t() | nil,
country: String.t() | nil,
bank_code: String.t() | nil,
swift: String.t() | nil,
logo_url: String.t() | nil,
contact_id: Ecto.UUID.t() | nil,
contact: Cashtrail.Contacts.Contact.t() | Ecto.Association.NotLoaded.t() | nil,
inserted_at: NaiveDateTime.t() | nil,
updated_at: NaiveDateTime.t() | nil,
__meta__: Ecto.Schema.Metadata.t()
}
@primary_key {:id, :binary_id, autogenerate: true}
@foreign_key_type :binary_id
schema "institutions" do
field :country, :string
field :bank_code, :string
field :logo_url, :string
field :swift, :string
belongs_to :contact, Contacts.Contact, on_replace: :update
timestamps()
end
@url_regex ~r/https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&\/\/=]*)/
@swift_regex ~r/[A-Z]{6}[A-Z0-9]{2}([A-Z0-9]{3})?/i
@doc false
@spec changeset(t(), map()) :: Ecto.Changeset.t()
def changeset(institution, attrs) do
institution
|> cast(attrs, [:country, :bank_code, :swift, :logo_url, :contact_id])
|> validate_format(:swift, @swift_regex, message: "is not a valid swift code")
|> validate_format(:logo_url, @url_regex, message: "is not a valid url")
|> ensure_associated_contact()
end
defp ensure_associated_contact(changeset) do
if Ecto.Changeset.get_field(changeset, :contact_id) do
changeset
else
cast_assoc(changeset, :contact, required: true)
end
end
end
|
apps/cashtrail/lib/cashtrail/banking/institution.ex
| 0.810028 | 0.683056 |
institution.ex
|
starcoder
|
defmodule Elastic.HTTP do
@moduledoc ~S"""
Used to make raw calls to Elastic Search.
Each function returns a tuple indicating whether or not the request
succeeded or failed (`:ok` or `:error`), the status code of the response,
and then the processed body of the response.
For example, a request like this:
```elixir
Elastic.HTTP.get(Elastic.base_url() <> "/answer/_search")
```
Would return a response like this:
```
{:ok, 200,
%{"_shards" => %{"failed" => 0, "successful" => 5, "total" => 5},
"hits" => %{"hits" => [%{"_id" => "1", "_index" => "answer", "_score" => 1.0,
"_source" => %{"text" => "I like using Elastic Search"}, "_type" => "answer"}],
"max_score" => 1.0, "total" => 1}, "timed_out" => false, "took" => 7}}
```
"""
alias Elastic.AWS
alias Elastic.ResponseHandler
alias Tesla.Env
@type method ::
:head
| :get
| :delete
| :trace
| :options
| :post
| :put
| :patch
@type url :: binary() | URI.t()
@doc """
Makes a request using the GET HTTP method, and can take a body.
```
Elastic.HTTP.get(Elastic.base_url() <> "/answer/_search", body: %{query: ...})
```
"""
@spec get(url(), Keyword.t()) :: ResponseHandler.result()
def get(url, options \\ []) do
request(:get, url, options)
end
@doc """
Makes a request using the POST HTTP method, and can take a body.
"""
@spec post(url(), Keyword.t()) :: ResponseHandler.result()
def post(url, options \\ []) do
request(:post, url, options)
end
@doc """
Makes a request using the PUT HTTP method:
```
Elastic.HTTP.put("/answers/answer/1", body: %{
text: "I like using Elastic Search"
})
```
"""
@spec put(url(), Keyword.t()) :: ResponseHandler.result()
def put(url, options \\ []) do
request(:put, url, options)
end
@doc """
Makes a request using the DELETE HTTP method:
```
Elastic.HTTP.delete("/answers/answer/1")
```
"""
@spec delete(url(), Keyword.t()) :: ResponseHandler.result()
def delete(url, options \\ []) do
request(:delete, url, options)
end
@doc """
Makes a request using the HEAD HTTP method:
```
Elastic.HTTP.head("/answers")
```
"""
@spec head(url(), Keyword.t()) :: ResponseHandler.result()
def head(url, options \\ []) do
request(:head, url, options)
end
@spec bulk(Keyword.t()) :: ResponseHandler.result()
def bulk(options) do
body = Keyword.get(options, :body, "") <> "\n"
options = Keyword.put(options, :body, body)
url = Elastic.base_url() <> "/_bulk"
request(:post, url, options)
end
@spec request(method(), url(), Keyword.t()) :: ResponseHandler.result()
defp request(method, url, options) do
body = Keyword.get(options, :body, []) |> encode_body
timeout = Application.get_env(:elastic, :timeout, 30_000)
options =
options
|> Keyword.put_new(:headers, Keyword.new())
|> Keyword.put(:body, body)
|> Keyword.put(:timeout, timeout)
|> Keyword.put(:method, method)
|> Keyword.put(:url, url)
|> add_content_type_header
|> add_aws_header(method, url, body)
middlewares =
Keyword.get(options, :middlewares, []) ++
[
basic_auth_middleware(options)
]
client = Tesla.client(middlewares, Tesla.Adapter.Hackney)
Tesla.request(client, options) |> process_response
end
@spec add_content_type_header(Keyword.t()) :: Keyword.t()
defp add_content_type_header(options) do
headers = Keyword.put(options[:headers], :"Content-Type", "application/json")
Keyword.put(options, :headers, headers)
end
@spec add_aws_header(Keyword.t(), method, url(), binary()) :: Keyword.t()
def add_aws_header(options, method, url, body) do
if AWS.enabled?() do
headers =
AWS.authorization_headers(
method,
url,
options[:headers],
body
)
|> Enum.reduce(options[:headers], fn {header, value}, headers ->
Keyword.put(headers, String.to_atom(header), value)
end)
Keyword.put(options, :headers, headers)
else
options
end
end
@spec basic_auth_middleware(Keyword.t()) :: {atom(), map()}
def basic_auth_middleware(options) do
{username, password} = Keyword.get(options, :basic_auth, Elastic.basic_auth())
{Tesla.Middleware.BasicAuth, %{username: username, password: password}}
end
@spec kibana_middleware() :: {atom(), list({binary(), binary()})}
def kibana_middleware do
{Tesla.Middleware.Headers, [{"kbn-xsrf", "true"}]}
end
@spec process_response(Env.result()) :: ResponseHandler.result()
defp process_response(response) do
ResponseHandler.process(response)
end
@spec encode_body(any()) :: binary()
defp encode_body(body) when is_binary(body) do
body
end
defp encode_body(body) when is_map(body) and body != %{} do
{:ok, encoded_body} = Jason.encode(body)
encoded_body
end
defp encode_body(_body) do
""
end
end
|
lib/elastic/http.ex
| 0.906439 | 0.772187 |
http.ex
|
starcoder
|
defmodule Decimal.Context do
import Decimal.Macros
alias Decimal.Context
@moduledoc """
The context is kept in the process dictionary. It can be accessed with
`get/0` and `set/1`.
The default context has a precision of 28, the rounding algorithm is
`:half_up`. The set trap enablers are `:invalid_operation` and
`:division_by_zero`.
## Fields
* `precision` - maximum number of decimal digits in the coefficient. If an
operation result has more digits it will be rounded to `precision`
digits with the rounding algorithm in `rounding`.
* `rounding` - the rounding algorithm used when the coefficient's number of
exceeds `precision`. Strategies explained below.
* `flags` - a list of signals that for which the flag is sent. When an
exceptional condition is signalled its flag is set. The flags are sticky
and will be set until explicitly cleared.
* `traps` - a list of set trap enablers for signals. When a signal's trap
enabler is set the condition causes `Decimal.Error` to be raised.
## Rounding algorithms
* `:down` - round toward zero (truncate). Discarded digits are ignored,
result is unchanged.
* `:half_up` - if the discarded digits is greater than or equal to half of
the value of a one in the next left position then the coefficient will be
incremented by one (rounded up). Otherwise (the discarded digits are less
than half) the discarded digits will be ignored.
* `:half_even` - also known as "round to nearest" or "banker's rounding". If
the discarded digits is greater than half of the value of a one in the
next left position then the coefficient will be incremented by one
(rounded up). If they represent less than half discarded digits will be
ignored. Otherwise (exactly half), the coefficient is not altered if it's
even, or incremented by one (rounded up) if it's odd (to make an even
number).
* `:ceiling` - round toward +Infinity. If all of the discarded digits are
zero or the sign is negative the result is unchanged. Otherwise, the
coefficient will be incremented by one (rounded up).
* `:floor` - round toward -Infinity. If all of the discarded digits are zero
or the sign is positive the result is unchanged. Otherwise, the sign is
negative and coefficient will be incremented by one.
* `:half_down` - if the discarded digits is greater than half of the value
of a one in the next left position then the coefficient will be
incremented by one (rounded up). Otherwise (the discarded digits are half
or less) the discarded digits are ignored.
* `:up` - round away from zero. If all discarded digits are zero the
coefficient is not changed, otherwise it is incremented by one (rounded
up).
This table shows the results of rounding operations for all the rounding
algorithms:
Rounding algorithm | 5.5 | 2.5 | 1.6 | 1.1 | 1.0 | -1.0 | -1.1 | -1.6 | -2.5 | -5.5
:----------------- | :-- | :-- | :-- | :-- | :-- | :--- | :--- | :--- | :--- | :---
`:up` | 6 | 3 | 2 | 2 | 1 | -1 | -2 | -2 | -3 | -6
`:down` | 5 | 2 | 1 | 1 | 1 | -1 | -1 | -1 | -2 | -5
`:ceiling` | 6 | 3 | 2 | 2 | 1 | -1 | -1 | -1 | -2 | -5
`:floor` | 5 | 2 | 1 | 1 | 1 | -1 | -2 | -2 | -3 | -6
`:half_up` | 6 | 3 | 2 | 1 | 1 | -1 | -1 | -2 | -3 | -6
`:half_down` | 5 | 2 | 2 | 1 | 1 | -1 | -1 | -2 | -2 | -5
`:half_even` | 6 | 2 | 2 | 1 | 1 | -1 | -1 | -2 | -2 | -6
"""
@type t :: %__MODULE__{
precision: pos_integer,
rounding: Decimal.rounding(),
flags: [Decimal.signal()],
traps: [Decimal.signal()]
}
defstruct precision: 28,
rounding: :half_up,
flags: [],
traps: [:invalid_operation, :division_by_zero]
@context_key :"$decimal_context"
@doc """
Runs function with given context.
"""
doc_since("1.9.0")
@spec with(t(), (() -> x)) :: x when x: var
def with(%Context{} = context, fun) when is_function(fun, 0) do
old = Process.put(@context_key, context)
try do
fun.()
after
set(old || %Context{})
end
end
@doc """
Gets the process' context.
"""
doc_since("1.9.0")
@spec get() :: t()
def get() do
Process.get(@context_key, %Context{})
end
@doc """
Set the process' context.
"""
doc_since("1.9.0")
@spec set(t()) :: :ok
def set(%Context{} = context) do
Process.put(@context_key, context)
:ok
end
@doc """
Update the process' context.
"""
doc_since("1.9.0")
@spec update((t() -> t())) :: :ok
def update(fun) when is_function(fun, 1) do
get() |> fun.() |> set()
end
end
|
lib/decimal/context.ex
| 0.801159 | 0.789964 |
context.ex
|
starcoder
|
defmodule ExPlasma.Transaction.Type.PaymentV1.Validator do
@moduledoc """
Contain stateless validation logic for Payment V1 transactions
"""
alias ExPlasma.Output
alias ExPlasma.Transaction.TypeMapper
@empty_tx_data 0
@output_limit 4
@output_type TypeMapper.output_type_for(:output_payment_v1)
@type inputs_validation_error() :: {:inputs, :duplicate_inputs} | {:inputs, :cannot_exceed_maximum_value}
@type outputs_validation_error() ::
{:outputs, :cannot_exceed_maximum_value}
| {:outputs, :cannot_subceed_minimum_value}
| {:outputs, :invalid_output_type_for_transaction}
defmacro is_metadata(metadata) do
quote do
is_binary(unquote(metadata)) and byte_size(unquote(metadata)) == 32
end
end
@spec validate_inputs(list(Output)) :: :ok | {:error, inputs_validation_error()}
def validate_inputs(inputs) do
with :ok <- validate_generic_output(inputs),
:ok <- validate_unique_inputs(inputs),
:ok <- validate_outputs_count(:inputs, inputs, 0) do
:ok
end
end
@spec validate_outputs(list(Output)) :: :ok | {:error, outputs_validation_error()}
def validate_outputs(outputs) do
with :ok <- validate_generic_output(outputs),
:ok <- validate_outputs_count(:outputs, outputs, 1),
:ok <- validate_outputs_type(outputs) do
:ok
end
end
@spec validate_tx_data(any()) :: :ok | {:error, {:tx_data, :malformed_tx_data}}
# txData is required to be zero in the contract
def validate_tx_data(@empty_tx_data), do: :ok
def validate_tx_data(_), do: {:error, {:tx_data, :malformed_tx_data}}
@spec validate_metadata(<<_::256>>) :: :ok | {:error, {:metadata, :malformed_metadata}}
def validate_metadata(metadata) when is_metadata(metadata), do: :ok
def validate_metadata(_), do: {:error, {:metadata, :malformed_metadata}}
defp validate_generic_output([output | rest]) do
case Output.validate(output) do
:ok -> validate_generic_output(rest)
error -> error
end
end
defp validate_generic_output([]), do: :ok
defp validate_unique_inputs(inputs) do
case inputs == Enum.uniq(inputs) do
true -> :ok
false -> {:error, {:inputs, :duplicate_inputs}}
end
end
defp validate_outputs_count(field, list, _min_limit) when length(list) > @output_limit do
{:error, {field, :cannot_exceed_maximum_value}}
end
defp validate_outputs_count(field, list, min_limit) when length(list) < min_limit do
{:error, {field, :cannot_subceed_minimum_value}}
end
defp validate_outputs_count(_field, _list, _min_limit), do: :ok
defp validate_outputs_type(outputs) do
case Enum.all?(outputs, &(&1.output_type == @output_type)) do
true -> :ok
false -> {:error, {:outputs, :invalid_output_type_for_transaction}}
end
end
end
|
lib/ex_plasma/transaction/type/payment_v1/payment_v1_validator.ex
| 0.829112 | 0.40389 |
payment_v1_validator.ex
|
starcoder
|
defmodule DataMiner.Eclat do
@moduledoc """
Documentation for `Eclat` Algorithm Implementation.
"""
@transactions_file Path.expand("../data/transactions_items.txt")
@result_save_file Path.expand("../results/eclat_frequents.txt")
@doc """
Main function for run algorithm with minimum support.
This function will get minimum support as input.
This number is expressed as a percentage.
At the end of function result of `Eclat` algorithm will
save to a file.
"""
def main(min_supp) do
transactions = import_transactions()
itemsets =
transactions
|> transactios_to_eclat_form()
start = Time.utc_now()
eclat(itemsets, [], min_supp, length(transactions))
|> List.flatten()
|> export_frequents()
endt = Time.utc_now()
IO.inspect("total time: #{Time.diff(endt, start)}s")
end
@doc """
Export frequents will export all frequent itemsets to a file.
"""
def export_frequents(frequents) do
{:ok, file} = File.open(@result_save_file, [:write])
Enum.each(frequents, fn {itemset, transactions} ->
itemset
|> Enum.each(fn item ->
IO.write(file, "#{item} | ")
end)
IO.write(file, "#{MapSet.size(transactions)}\n")
end)
end
@doc """
Implementation of eclat algorithm, this function will return any frequent itemset.
## Examples
iex> DataMiner.Eclat.eclat([{[:a], MapSet.new([2])}, {[:b], MapSet.new([2])}], [], 0.1, 3)
[[{[:b, :a], #MapSet<[2]>}], [{[:a], #MapSet<[2]>}, {[:b], #MapSet<[2]>}]]
"""
def eclat([], frequents, _, _) do
IO.inspect("ends")
frequents
end
def eclat(itemsets, frequents, min_supp, transactions_length) do
IO.inspect("eclat!")
supported_itemsets = remove_low_frequencies(itemsets, min_supp, transactions_length)
IO.inspect("supported")
supported_itemsets
|> merge_itemsets()
|> eclat([supported_itemsets | frequents], min_supp, transactions_length)
end
@doc """
This function will merge a list of itemsets to a list of sub itemsets.
So input is a list of itemsets and output is a list of merged itemsets.
`note: Commented codes are a parallel code for merging.`
## Examples
iex> DataMiner.Eclat.merge_itemsets([{[2, 1], MapSet.new([2])}, {[3, 1], MapSet.new([2])}])
[{[3, 2, 1], #MapSet<[2]>}]
"""
def merge_itemsets(itemsets) do
IO.inspect("merging #{length(itemsets)}")
# itemsets
# |> Stream.with_index(1)
# |> Flow.from_enumerable()
# |> Flow.partition()
# |> Flow.flat_map(fn {{[base_item | tail_base_itemset], base_transactions}, index} ->
# itemsets
# |> Stream.drop(index)
# |> Stream.filter(fn {[_ | tail_itemset], _} -> tail_itemset == tail_base_itemset end)
# |> Enum.map(fn {[item | _], transactions} ->
# {[item | [base_item | tail_base_itemset]],
# MapSet.intersection(base_transactions, transactions)}
# end)
# end)
# |> Enum.to_list()
itemsets
|> Stream.with_index(1)
|> Stream.flat_map(fn {{[base_item | tail_base_itemset], base_transactions}, index} ->
itemsets
|> Stream.drop(index)
|> Stream.filter(fn {[_ | tail_itemset], _} -> tail_itemset == tail_base_itemset end)
|> Stream.map(fn {[item | _], transactions} ->
{[item | [base_item | tail_base_itemset]],
MapSet.intersection(base_transactions, transactions)}
end)
|> Enum.to_list()
end)
|> Enum.to_list()
end
@doc """
This function will merge an itemset with another itemset.
What is merge itemsets and make sub itemset?
if we have `a = [1, 2, 4]` and `b = [1, 2, 5]`
then merge of them will be: `result = [1, 2, 4, 5]`
`a` and `b` can merge because of (0 .. k-1)th items in their lists are similar.
In this module for avoiding of list overhead, we merge lists by (1 .. k)th items. if `a = [2, 1]` and `b = [3, 1]`
then merge of them with this algorithm will be: `result = [3, 2, 1]`
## Examples
iex> DataMiner.Eclat.merge_itemsets([{[2, 1], MapSet.new([2])}, {[3, 1], MapSet.new([2])}])
[{[3, 2, 1], #MapSet<[2]>}]
"""
def merger({base_item, base_transactions}, {item, transactions}, group_tail) do
{[item | [base_item | group_tail]], MapSet.intersection(base_transactions, transactions)}
end
@doc """
When itemsets merged succesfully, we should pipe them into `remove_low_frequencies`
that will remove all of itemsets that size of their transactions are lower that minimum support.
This is for downward closers!
## Examples
iex> DataMiner.Eclat.remove_low_frequencies([{[:a], MapSet.new([1, 2, 3])}, {[:b], MapSet.new([1])}], 50, 3)
[{[:a], #MapSet<[1, 2, 3]>}]
"""
def remove_low_frequencies(itemsets, min_supp, transactions_length) do
itemsets
|> Enum.filter(fn {_item, transactions} ->
support(MapSet.size(transactions), transactions_length) >= min_supp
end)
|> IO.inspect()
end
@doc """
support will calculate support of an itemset by its frequency
"""
def support(item_frequency, transactions_length) do
item_frequency / transactions_length * 100
end
@doc """
This function will get main transactions and return eclat form of that.
Eclat form is a `map` that show transactions of an item that it is inside them!
## Examples
iex> DataMiner.Eclat.transactios_to_eclat_form([["1", "2"], ["2", "4"], ["1", "5"], ["1", "6", "7", "3", "1", "2", "9"]])
{[:"1"] => #MapSet<[0, 2, 3]>, [:"2"] => #MapSet<[0, 1, 3]>, [:"3"] => #MapSet<[3]>, [:"4"] => #MapSet<[1]>, [:"5"] => #MapSet<[2]>, [:"6"] => #MapSet<[3]>, [:"7"] => #MapSet<[3]>, [:"9"] => #MapSet<[3]>}
"""
def transactios_to_eclat_form(transactions) do
transactions
|> Stream.with_index()
|> Enum.reduce(%{}, fn {transaction, index}, items ->
Enum.frequencies(transaction)
|> Map.new(fn {item, _} -> {[String.to_atom(item)], MapSet.new([index])} end)
|> Map.merge(items, fn _k, v1, v2 -> MapSet.union(v1, v2) end)
end)
end
@doc """
import transactions file.
"""
def import_transactions do
Path.expand(@transactions_file)
|> import_file()
|> Enum.to_list()
end
@doc """
Import file.
"""
def import_file(file_address) do
File.stream!(file_address)
|> Stream.map(&String.trim/1)
|> Stream.map(fn line -> String.split(line, "|") |> Enum.filter(fn word -> word != "" end) end)
|> Stream.drop(1)
end
end
|
data_miner/lib/eclat.ex
| 0.834609 | 0.592637 |
eclat.ex
|
starcoder
|
defmodule Conversion.Time do
@moduledoc """
This module is in charge of converting from one time value to another, e.g. seconds to minutes
"""
@year_const 365.25
@typedoc """
- `:value` - Atom representation of time measurement. e.g. :seconds, :minutes, :hours, :days, :weeks, :years
"""
@type value :: :milliseconds | :seconds | :minutes | :hours | :days | :weeks | :years
@doc """
Given a time `time_value` along with two measurements e.g. :milliseconds, :hours.
Convert from one measurement to the other. The initial unit is the from case and the latter is the
unit we are converting to.
## Examples
iex> Conversion.convert(1, :seconds, :milliseconds)
1_000
iex> Conversion.convert(1, :hours, :seconds)
3_600
"""
@spec convert(time_value :: float(), from :: value(), to :: value()) :: float()
def convert(time_value, :milliseconds, :milliseconds)
when is_float(time_value) or is_integer(time_value),
do: time_value
def convert(time_value, :milliseconds, :seconds)
when is_float(time_value) or is_integer(time_value),
do: time_value / 1000
def convert(time_value, :milliseconds, :minutes)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :milliseconds, :seconds) / 60
def convert(time_value, :milliseconds, :hours)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :milliseconds, :minutes) / 60
def convert(time_value, :milliseconds, :days)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :milliseconds, :hours) / 24
def convert(time_value, :milliseconds, :weeks)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :milliseconds, :days) / 7
def convert(time_value, :milliseconds, :years)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :milliseconds, :days) / @year_const
def convert(time_value, :seconds, :milliseconds)
when is_float(time_value) or is_integer(time_value),
do: time_value / 1000
def convert(time_value, :seconds, :seconds) when is_float(time_value) or is_integer(time_value),
do: time_value
def convert(time_value, :seconds, :minutes) when is_float(time_value) or is_integer(time_value),
do: time_value / 60
def convert(time_value, :seconds, :hours) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :seconds, :minutes) / 60
def convert(time_value, :seconds, :days) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :seconds, :hours) / 24
def convert(time_value, :seconds, :weeks) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :seconds, :days) / 7
def convert(time_value, :seconds, :years) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :seconds, :days) / @year_const
def convert(time_value, :minutes, :milliseconds)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :minutes, :seconds) / 1000
def convert(time_value, :minutes, :seconds) when is_float(time_value) or is_integer(time_value),
do: time_value * 60
def convert(time_value, :minutes, :minutes) when is_float(time_value) or is_integer(time_value),
do: time_value
def convert(time_value, :minutes, :hours) when is_float(time_value) or is_integer(time_value),
do: time_value / 60
def convert(time_value, :minutes, :days) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :minutes, :hours) / 24
def convert(time_value, :minutes, :weeks) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :minutes, :days) / 7
def convert(time_value, :minutes, :years) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :minutes, :days) / @year_const
def convert(time_value, :hours, :milliseconds)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :hours, :seconds) * 1000
def convert(time_value, :hours, :seconds) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :hours, :minutes) * 60
def convert(time_value, :hours, :minutes) when is_float(time_value) or is_integer(time_value),
do: time_value * 60
def convert(time_value, :hours, :hours) when is_float(time_value) or is_integer(time_value),
do: time_value
def convert(time_value, :hours, :days) when is_float(time_value) or is_integer(time_value),
do: time_value / 24
def convert(time_value, :hours, :weeks) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :hours, :days) / 7
def convert(time_value, :hours, :years) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :hours, :days) / @year_const
def convert(time_value, :days, :milliseconds)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :days, :seconds) * 1000
def convert(time_value, :days, :seconds) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :days, :minutes) * 60
def convert(time_value, :days, :minutes) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :days, :hours) * 60
def convert(time_value, :days, :hours) when is_float(time_value) or is_integer(time_value),
do: time_value * 24
def convert(time_value, :days, :days) when is_float(time_value) or is_integer(time_value),
do: time_value
def convert(time_value, :days, :weeks) when is_float(time_value) or is_integer(time_value),
do: time_value / 7
def convert(time_value, :days, :years) when is_float(time_value) or is_integer(time_value),
do: time_value / @year_const
def convert(time_value, :weeks, :milliseconds)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :weeks, :seconds) * 1000
def convert(time_value, :weeks, :seconds) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :weeks, :minutes) * 60
def convert(time_value, :weeks, :minutes) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :weeks, :hours) * 60
def convert(time_value, :weeks, :hours) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :weeks, :days) * 24
def convert(time_value, :weeks, :days) when is_float(time_value) or is_integer(time_value),
do: time_value * 7
def convert(time_value, :weeks, :weeks) when is_float(time_value) or is_integer(time_value),
do: time_value
def convert(time_value, :weeks, :years) when is_float(time_value) or is_integer(time_value),
do: time_value / 51.1429
def convert(time_value, :years, :milliseconds)
when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :years, :seconds) * 1000
def convert(time_value, :years, :seconds) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :years, :minutes) * 60
def convert(time_value, :years, :minutes) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :years, :hours) * 60
def convert(time_value, :years, :hours) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :years, :days) * 24
def convert(time_value, :years, :days) when is_float(time_value) or is_integer(time_value),
do: convert(time_value, :years, :weeks) * 7
def convert(time_value, :years, :weeks) when is_float(time_value) or is_integer(time_value),
do: time_value * 51.1429
def convert(time_value, :years, :years) when is_float(time_value) or is_integer(time_value),
do: time_value
end
|
lib/conversion/time.ex
| 0.908303 | 0.864939 |
time.ex
|
starcoder
|
require Logger
defmodule FileSystem.Backends.FSMac do
@moduledoc """
File system backend for MacOS.
The built-in executable file will be compile upon first use.
This file is a fork from https://github.com/synrc/fs.
## Backend Options
* `:latency` (float, default: 0.5), latency period.
* `:no_defer` (bool, default: false), enable no-defer latency modifier.
Works with latency parameter.
See `FSEvent` API documents
https://developer.apple.com/documentation/coreservices/kfseventstreamcreateflagnodefer.
* `:watch_root` (bool, default: false), watch for when the root path has changed.
Set the flag `true` to monitor events when watching `/tmp/fs/dir` and run
`mv /tmp/fs /tmp/fx`.
See `FSEvent` API documents
https://developer.apple.com/documentation/coreservices/kfseventstreamcreateflagwatchroot.
* recursive is enabled by default and it can'b be disabled for now.
## Executable File Path
Useful when running `:file_system` with escript.
The default listener executable file is `priv/mac_listener` within the folder of
`:file_system` application.
Two ways to customize the executable file path:
* Module config with `config.exs`:
```elixir
config :file_system, :fs_mac,
executable_file: "YOUR_EXECUTABLE_FILE_PATH"`
```
* System environment variable:
```
export FILESYSTEM_FSMAC_EXECUTABLE_FILE="YOUR_EXECUTABLE_FILE_PATH"`
```
"""
use GenServer
@behaviour FileSystem.Backend
@default_exec_file "mac_listener"
def bootstrap do
exec_file = executable_path()
if not is_nil(exec_file) and File.exists?(exec_file) do
:ok
else
Logger.error "Can't find executable `mac_listener`"
{:error, :fs_mac_bootstrap_error}
end
end
def supported_systems do
[{:unix, :darwin}]
end
def known_events do
[ :mustscansubdirs, :userdropped, :kerneldropped, :eventidswrapped, :historydone,
:rootchanged, :mount, :unmount, :created, :removed, :inodemetamod, :renamed, :modified,
:finderinfomod, :changeowner, :xattrmod, :isfile, :isdir, :issymlink, :ownevent,
]
end
defp executable_path do
executable_path(:system_env) || executable_path(:config) || executable_path(:system_path) || executable_path(:priv)
end
defp executable_path(:config) do
Application.get_env(:file_system, :fs_mac)[:executable_file]
end
defp executable_path(:system_env) do
System.get_env("FILESYSTEM_FSMAC_EXECUTABLE_FILE")
end
defp executable_path(:system_path) do
System.find_executable(@default_exec_file)
end
defp executable_path(:priv) do
case :code.priv_dir(:file_system) do
{:error, _} ->
Logger.error "`priv` dir for `:file_system` application is not avalible in current runtime, appoint executable file with `config.exs` or `FILESYSTEM_FSMAC_EXECUTABLE_FILE` env."
nil
dir when is_list(dir) ->
Path.join(dir, @default_exec_file)
end
end
def parse_options(options) do
case Keyword.pop(options, :dirs) do
{nil, _} ->
Logger.error "required argument `dirs` is missing"
{:error, :missing_dirs_argument}
{dirs, rest} ->
args = ['-F' | dirs |> Enum.map(&Path.absname/1) |> Enum.map(&to_charlist/1)]
parse_options(rest, args)
end
end
defp parse_options([], result), do: {:ok, result}
defp parse_options([{:latency, latency} | t], result) do
result =
if is_float(latency) or is_integer(latency) do
['--latency=#{latency / 1}' | result]
else
Logger.error "latency should be integer or float, got `#{inspect latency}, ignore"
result
end
parse_options(t, result)
end
defp parse_options([{:no_defer, true} | t], result) do
parse_options(t, ['--no-defer' | result])
end
defp parse_options([{:no_defer, false} | t], result) do
parse_options(t, result)
end
defp parse_options([{:no_defer, value} | t], result) do
Logger.error "unknown value `#{inspect value}` for no_defer, ignore"
parse_options(t, result)
end
defp parse_options([{:with_root, true} | t], result) do
parse_options(t, ['--with-root' | result])
end
defp parse_options([{:with_root, false} | t], result) do
parse_options(t, result)
end
defp parse_options([{:with_root, value} | t], result) do
Logger.error "unknown value `#{inspect value}` for with_root, ignore"
parse_options(t, result)
end
defp parse_options([h | t], result) do
Logger.error "unknown option `#{inspect h}`, ignore"
parse_options(t, result)
end
def start_link(args) do
GenServer.start_link(__MODULE__, args, [])
end
def init(args) do
{worker_pid, rest} = Keyword.pop(args, :worker_pid)
case parse_options(rest) do
{:ok, port_args} ->
port = Port.open(
{:spawn_executable, to_charlist(executable_path())},
[:stream, :exit_status, {:line, 16384}, {:args, port_args}, {:cd, System.tmp_dir!()}]
)
Process.link(port)
Process.flag(:trap_exit, true)
{:ok, %{port: port, worker_pid: worker_pid}}
{:error, _} ->
:ignore
end
end
def handle_info({port, {:data, {:eol, line}}}, %{port: port}=state) do
{file_path, events} = line |> parse_line
send(state.worker_pid, {:backend_file_event, self(), {file_path, events}})
{:noreply, state}
end
def handle_info({port, {:exit_status, _}}, %{port: port}=state) do
send(state.worker_pid, {:backend_file_event, self(), :stop})
{:stop, :normal, state}
end
def handle_info({:EXIT, port, _reason}, %{port: port}=state) do
send(state.worker_pid, {:backend_file_event, self(), :stop})
{:stop, :normal, state}
end
def handle_info(_, state) do
{:noreply, state}
end
def parse_line(line) do
[_, _, events, path] = line |> to_string |> String.split(["\t", "="], parts: 4)
{path, events |> String.split(["[", ",", "]"], trim: true) |> Enum.map(&String.to_existing_atom/1)}
end
end
|
lib/file_system/backends/fs_mac.ex
| 0.713831 | 0.731155 |
fs_mac.ex
|
starcoder
|
defmodule Bella.Controller do
@moduledoc """
`Bella.Controller` defines controller behaviours and generates boilerplate for generating Kubernetes manifests.
> A custom controller is a controller that users can deploy and update on a running cluster, independently of the cluster’s own lifecycle. Custom controllers can work with any kind of resource, but they are especially effective when combined with custom resources. The Operator pattern is one example of such a combination. It allows developers to encode domain knowledge for specific applications into an extension of the Kubernetes API.
Controllers allow for simple `add`, `modify`, `delete`, and `reconcile` handling of custom resources in the Kubernetes API.
"""
@callback add(map()) :: :ok | :error
@callback modify(map()) :: :ok | :error
@callback delete(map()) :: :ok | :error
@callback reconcile(map()) :: :ok | :error
@callback operation() :: K8s.Operation.t()
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Bella.Controller
@client opts[:client] || K8s.Client
use Supervisor
def start_link(_) do
Supervisor.start_link(__MODULE__, %{}, name: __MODULE__)
end
@impl true
def init(_init_arg) do
children = [
{__MODULE__.WatchServer, name: __MODULE__.WatchServer},
{__MODULE__.ReconcileServer, name: __MODULE__.ReconcileServer}
]
Supervisor.init(children, strategy: :one_for_one)
end
@doc false
@spec client() :: any()
def client, do: @client
end
end
@doc false
defmacro __before_compile__(env) do
controller = env.module
quote bind_quoted: [controller: controller] do
defmodule WatchServer do
@moduledoc "Controller watcher implementation"
use Bella.Server.Watcher
@impl Bella.Server.Watcher
defdelegate add(resource), to: controller
@impl Bella.Server.Watcher
defdelegate modify(resource), to: controller
@impl Bella.Server.Watcher
defdelegate delete(resource), to: controller
@impl Bella.Server.Watcher
defdelegate watch_operation(), to: controller, as: :operation
end
defmodule ReconcileServer do
@moduledoc "Controller reconciler implementation"
use Bella.Server.Reconciler, frequency: 30
@impl Bella.Server.Reconciler
defdelegate reconcile(resource), to: controller
@impl Bella.Server.Reconciler
defdelegate reconcile_operation(), to: controller, as: :operation
end
end
end
end
|
lib/bella/controller.ex
| 0.891221 | 0.444203 |
controller.ex
|
starcoder
|
defmodule PixelFont.GlyphSource do
require PixelFont.RectilinearShape, as: RectilinearShape
require PixelFont.RectilinearShape.Path, as: Path
import PixelFont.DSL.MacroHelper
alias PixelFont.Glyph
alias PixelFont.Glyph.{BitmapData, CompositeData, VariationSequence}
@type source_options :: [based_on: module()]
@spec __using__(keyword()) :: Macro.t()
defmacro __using__(_options) do
quote do
import unquote(__MODULE__), only: [glyph_source: 2, glyph_source: 3]
end
end
@spec glyph_source(module(), do: Macro.t()) :: Macro.t()
@spec glyph_source(module(), source_options(), do: Macro.t()) :: Macro.t()
defmacro glyph_source(name, options \\ [], do: do_block) do
{exprs, _block} = get_exprs(do_block)
map_expr =
quote do
unquote(exprs)
|> List.flatten()
|> unquote(__MODULE__).__make_contours__()
end
|> handle_based_on(options[:based_on])
quote do
defmodule unquote(name) do
import unquote(__MODULE__), only: [bmp_glyph: 2, composite_glyph: 2]
@glyph_map unquote(map_expr)
@glyph_list @glyph_map |> Map.values() |> Enum.sort(&(&1.id <= &2.id))
def __glyph_map__, do: @glyph_map
def glyphs, do: @glyph_list
IO.puts("#{inspect(__MODULE__)}: Exported #{length(@glyph_list)} glyphs.")
end
end
end
defp handle_based_on(map_expr, expr)
defp handle_based_on(map_expr, nil), do: map_expr
defp handle_based_on(map_expr, module) when is_atom(module) do
quote(do: Map.merge(unquote(module).__glyph__map__(), unquote(map_expr)))
end
defp handle_based_on(map_expr, {:__aliases__, _, _} = alias_expr) do
quote(do: Map.merge(unquote(alias_expr).__glyph_map__(), unquote(map_expr)))
end
defp handle_based_on(_map_expr, x) do
raise "expected the value of :based_on keyword to be known " <>
"as an atom or an alias in compilation time, got: #{inspect(x)}"
end
defmacro bmp_glyph(id, do: block) do
{glyph_exprs, block} = get_exprs(block, expected: ~w(variations)a)
{bmp_data_exprs, _block} = get_exprs(block, expected: ~w(advance bounds data)a, warn: true)
data_expr =
quote do
struct!(
BitmapData,
[{:contours, []} | List.flatten(unquote(bmp_data_exprs))]
)
end
quote do
if true do
import unquote(__MODULE__), only: [advance: 1, bounds: 2, data: 1, variations: 2]
{unquote(id), unquote(glyph_expr(id, glyph_exprs, data_expr))}
end
end
end
Enum.each(~w(advance data)a, fn key ->
@spec unquote(key)(Macro.t()) :: Macro.t()
defmacro unquote(key)(expr), do: {unquote(key), expr}
end)
@spec bounds(Macro.t(), Macro.t()) :: Macro.t()
defmacro bounds(x_bounds, y_bounds) do
{:.., _, [xmin, xmax]} = x_bounds
{:.., _, [ymin, ymax]} = y_bounds
[xmin: xmin, xmax: xmax, ymin: ymin, ymax: ymax]
end
def __make_contours__(glyphs) do
glyphs
|> Task.async_stream(fn
{id, %Glyph{data: %BitmapData{} = data} = glyph} ->
contours =
data.data
|> String.split(~r/\r?\n/, trim: true)
|> Enum.map(&to_charlist/1)
|> RectilinearShape.from_bmp()
|> Path.transform({{1, 0}, {0, -1}}, {data.xmin, data.ymax})
{id, %Glyph{glyph | data: %BitmapData{data | contours: contours}}}
{id, %Glyph{} = glyph} ->
{id, glyph}
end)
|> Map.new(&elem(&1, 1))
end
defmacro composite_glyph(id, do: block) do
{glyph_exprs, block} = get_exprs(block, expected: ~w(variations)a)
{composite_data_exprs, _block} = get_exprs(block)
data_expr =
quote do
%CompositeData{
components: Enum.reject(unquote(composite_data_exprs), &is_nil/1)
}
end
quote do
if true do
import unquote(__MODULE__), only: [component: 3, component: 4, variations: 2]
{unquote(id), unquote(glyph_expr(id, glyph_exprs, data_expr))}
end
end
end
defmacro component(glyph_id, x_off, y_off) do
handle_component(glyph_id, x_off, y_off, [])
end
defmacro component(glyph_id, x_off, y_off, opts) do
handle_component(glyph_id, x_off, y_off, opts)
end
defp handle_component(glyph_id, x_off, y_off, opts) do
quote do
%{
glyph_id: unquote(glyph_id),
glyph: nil,
x_offset: unquote(x_off),
y_offset: unquote(y_off),
flags: unquote(opts)[:flags] || []
}
end
end
defp glyph_expr(id, glyph_exprs, data_expr) do
quote do
struct!(
Glyph,
[{:id, unquote(id)}, {:data, unquote(data_expr)} | unquote(glyph_exprs)]
)
end
end
defmacro variations([default: default_vs], do: block) when default_vs in 1..256 do
non_default_map_expr =
block
|> Map.new(fn
{:->, _, [[vs], target_glyph_id]} when vs in 1..256 ->
{vs, target_glyph_id}
end)
|> Macro.escape()
quote do
{:variations,
%VariationSequence{
default: unquote(default_vs),
non_default: unquote(non_default_map_expr)
}}
end
end
end
|
lib/pixel_font/glyph_source.ex
| 0.816516 | 0.501526 |
glyph_source.ex
|
starcoder
|
defmodule Automaton.Types.DECPOMDP do
@moduledoc """
Implements the Decentralized Partially Observable Markov Decision Process
(DEC-POMDP) state space representation for multi-agent control and prediction.
Each agent is goal-oriented, i.e. associated with a distinct, high-level goal
which it attempts to achieve.
At every stage the environment is in a particular state. This state emits a
joint observation according to the observation model from which each agent
observes its individual perception. Then each agent selects an action,
together forming the joint action, which leads to a state transition
according to the transition model . means that in a Dec-POMDP, communication
has no special semantics.
An agent can focuses on planning over a finite horizon, for which the
(undiscounted) expected cumulative reward is the commonly used optimality
criterion. The planning problem thus amounts to finding a tuple of policies,
called a joint policy that maximizes the expected cumulative reward.
At each stage, each agent takes an action and receives:
• A local observation for local decision making
• A joint immediate reward
DECPOMDP's can be defined with the tuple: { I, S, {A_i}, T, R, {Omega_i}, O, h }
• I, a finite set of agents
• S, a finite set of states with designated initial distribution b^0
• A_i, each agents finite set of actions
• T, state transition model P(s'|s,a->). Computes pdf of the updated states,
depends on all agents
• R, the reward model, depends on all agents
• Omega_i, each agents finite set of observations
• O, the observation model: P(o|s',a->), depends on all agents
• h, horizon or discount factor
DECPOMDP
• considers outcome, sensory, and communication uncertainty in a single
framework
• Can model any multi-agent coordination problem
• Macro-actions provide an abstraction to improve scalability
• Learning methods can remove the need to generate a detailed multi-agent model
• Methods also apply when less uncertainty
• Begun demonstrating scalability and quality in a number of domains, but a lot
of great open questions to solve
"""
# alias Automaton.Types.DECPOMDP.Config.Parser
defmacro __using__(_opts) do
end
end
|
lib/automata/automaton_types/reinforcement_learning/decpomdp/decpomdp.ex
| 0.811825 | 0.888807 |
decpomdp.ex
|
starcoder
|
defmodule Cldr.LocaleDisplay do
@moduledoc """
Implements the [CLDR locale display name algorithm](https://unicode-org.github.io/cldr/ldml/tr35-general.html#locale_display_name_algorithm) to format
a `t:Cldr.LanguageTag` structs for presentation uses.
"""
@doc false
def cldr_backend_provider(config) do
Cldr.LocaleDisplay.Backend.define_locale_display_module(config)
end
import Cldr.LanguageTag, only: [empty?: 1]
alias Cldr.LanguageTag
@basic_tag_order [:language, :script, :territory, :language_variants]
@extension_order [:transform, :locale, :extensions]
@omit_script_if_only_one false
@type display_options :: [
{:compound_locale, boolean()},
{:prefer, atom()},
{:locale, Cldr.Locale.locale_name() | Cldr.LanguageTag.t()},
{:backend, Cldr.backend()}
]
@doc """
Returns a localised display name for a
locale.
UI applications often have a requirement
to present locale choices to an end user.
This function takes a `t.Cldr.LanguageTag`
and using the [CLDR locale display name algorithm](https://unicode-org.github.io/cldr/ldml/tr35-general.html#locale_display_name_algorithm)
produces a string suitable for presentation.
## Arguments
* `language_tag` is any `t:Cldr.LanguageTag` or
a binary locale name.
* `options` is a keyword list of options.
## Options
* `:compound_locale` is a boolean indicating
if the combination of language, script and territory
should be used to resolve a language name.
The default is `true`.
* `:prefer` signals the preferred name for
a subtag when there are alternatives.
The default is `:default`. Few subtags
provide alternative renderings. Some of
the alternative preferences are`:short`,
`:long`, `:menu` and `:variant`.
* `:locale` is a `t:Cldr.LanguageTag` or any valid
locale name returned by `Cldr.known_locale_names/1`.
* `:backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend!/0`.
## Returns
* `{:ok, string}` representating a name
suitable for presentation purposes or
* `{:error, {exception, reason}}`
## Examples
iex> Cldr.LocaleDisplay.display_name "en"
{:ok, "English"}
iex> Cldr.LocaleDisplay.display_name "en-US"
{:ok, "American English"}
iex> Cldr.LocaleDisplay.display_name "en-US", compound_locale: false
{:ok, "English (United States)"}
iex> Cldr.LocaleDisplay.display_name "en-US-u-ca-gregory-cu-aud"
{:ok, "American English (Gregorian Calendar, Currency: A$)"}
iex> Cldr.LocaleDisplay.display_name "en-US-u-ca-gregory-cu-aud", locale: "fr"
{:ok, "anglais américain (calendrier grégorien, devise : A$)"}
iex> Cldr.LocaleDisplay.display_name "nl-BE"
{:ok, "Flemish"}
iex> Cldr.LocaleDisplay.display_name "nl-BE", compound_locale: false
{:ok, "Dutch (Belgium)"}
"""
@spec display_name(Cldr.Locale.locale_name() | Cldr.LanguageTag.t(), display_options()) ::
{:ok, String.t()} | {:error, {module(), String.t()}}
def display_name(language_tag, options \\ [])
def display_name(language_tag, options) when is_binary(language_tag) do
{_in_locale, backend} = Cldr.locale_and_backend_from(options)
options = Keyword.put_new(options, :add_likely_subtags, false)
with {:ok, locale} <- Cldr.Locale.canonical_language_tag(language_tag, backend, options) do
display_name(locale, options)
end
end
def display_name(%LanguageTag{} = language_tag, options) do
{in_locale, backend} = Cldr.locale_and_backend_from(options)
compound_locale? = !!Keyword.get(options, :compound_locale, true)
prefer = Keyword.get(options, :prefer, :default)
with {:ok, in_locale} <- Cldr.validate_locale(in_locale, backend) do
options = Keyword.put(options, :locale, in_locale)
{:ok, display_names} =
Module.concat(in_locale.backend, :LocaleDisplay).display_names(in_locale)
match_fun = &language_match_fun(&1, &2, display_names.language)
{language_name, matched_tags} =
first_match(language_tag, match_fun, @omit_script_if_only_one, compound_locale?, prefer)
subtag_names =
language_tag
|> subtag_names(@basic_tag_order -- matched_tags, display_names, prefer)
|> List.flatten()
|> join_subtags(display_names)
language_tag = merge_extensions_and_private_use(language_tag)
extension_names =
@extension_order
|> Enum.map(&Cldr.DisplayName.display_name(Map.fetch!(language_tag, &1), options))
|> Enum.reject(&empty?/1)
|> join_subtags(display_names)
{:ok, format_display_name(language_name, subtag_names, extension_names, display_names)}
end
end
@doc """
Returns a localised display name for a
locale.
UI applications often have a requirement
to present locale choices to an end user.
This function takes a `t.Cldr.LanguageTag`
and using the [CLDR locale display name algorithm](https://unicode-org.github.io/cldr/ldml/tr35-general.html#locale_display_name_algorithm)
produces a string suitable for presentation.
## Arguments
* `language_tag` is any `t:Cldr.LanguageTag` or
a binary locale name.
* `options` is a keyword list of options.
## Options
* `:compound_locale` is a boolean indicating
if the combination of language, script and territory
should be used to resolve a language name.
The default is `true`.
* `:prefer` signals the preferred name for
a subtag when there are alternatives.
The default is `:default`. Few subtags
provide alternative renderings. Some of
the alternative preferences are`:short`,
`:long`, `:menu` and `:variant`.
* `:locale` is a `t:Cldr.LanguageTag` or any valid
locale name returned by `Cldr.known_locale_names/1`.
* `:backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend!/0`.
## Returns
* a string representation of the language tag
suitable for presentation purposes or
* raises an exception.
## Examples
iex> Cldr.LocaleDisplay.display_name! "en"
"English"
iex> Cldr.LocaleDisplay.display_name! "en-US"
"American English"
iex> Cldr.LocaleDisplay.display_name! "en-US", compound_locale: false
"English (United States)"
iex> Cldr.LocaleDisplay.display_name! "en-US-u-ca-gregory-cu-aud"
"American English (Gregorian Calendar, Currency: A$)"
iex> Cldr.LocaleDisplay.display_name! "en-US-u-ca-gregory-cu-aud", locale: "fr"
"anglais américain (calendrier grégorien, devise : A$)"
"""
@spec display_name!(Cldr.Locale.locale_name() | Cldr.LanguageTag.t(), display_options()) ::
String.t() | no_return()
def display_name!(language_tag, options \\ []) do
case display_name(language_tag, options) do
{:ok, locale} -> locale
{:error, {exception, reason}} -> raise exception, reason
end
end
defp merge_extensions_and_private_use(%LanguageTag{private_use: []} = language_tag) do
language_tag
end
defp merge_extensions_and_private_use(%LanguageTag{} = language_tag) do
extensions = Map.put_new(language_tag.extensions, "x", language_tag.private_use)
Map.put(language_tag, :extensions, extensions)
end
# If matching on the compound locale then we
# don't need to take any action
defp first_match(language_tag, match_fun, omit_script_if_only_one?, true, prefer) do
{language_name, matched_tags} =
Cldr.Locale.first_match(language_tag, match_fun, omit_script_if_only_one?)
{get_display_preference(language_name, prefer), matched_tags}
end
# If we don't want a compound language then we need to omit
# the territory when matching but restore is afterwards so
# its generated as a subtag
@reinstate_subtags [:territory, :script]
defp first_match(language_tag, match_fun, omit_script_if_only_one?, false, prefer) do
language_tag = Enum.reduce(@reinstate_subtags, language_tag, fn key, tag ->
Map.put(tag, key, nil)
end)
{language_name, matched_tags} =
Cldr.Locale.first_match(language_tag, match_fun, omit_script_if_only_one?)
{get_display_preference(language_name, prefer), matched_tags -- @reinstate_subtags}
end
defp format_display_name(language_name, [], [], _display_names) do
language_name
end
defp format_display_name(language_name, subtag_names, extension_names, display_names) do
locale_pattern = get_in(display_names, [:locale_display_pattern, :locale_pattern])
subtags =
[subtag_names, extension_names]
|> Enum.reject(&empty?/1)
|> join_subtags(display_names)
[language_name, subtags]
|> Cldr.Substitution.substitute(locale_pattern)
|> :erlang.iolist_to_binary()
end
defp subtag_names(_locale, [], _display_names, _prefer) do
[]
end
defp subtag_names(locale, subtags, display_names, prefer) do
subtags
|> Enum.map(&get_display_name(locale, display_names, &1, prefer))
|> Enum.reject(&empty?/1)
end
defp get_display_name(locale, display_names, subtag, prefer) do
case Map.fetch!(locale, subtag) do
[_ | _] = subtags ->
Enum.map(subtags, fn value -> get_in(display_names, [subtag, value]) end)
|> Enum.sort()
subtag_value ->
get_in(display_names, [subtag, subtag_value])
end
|> get_display_preference(prefer)
end
@doc false
def get_display_preference(nil, _preference) do
nil
end
def get_display_preference(value, _preference) when is_binary(value) do
value
end
def get_display_preference(value, _preference) when is_atom(value) do
to_string(value)
end
def get_display_preference(values, preference) when is_list(values) do
Enum.map(values, &get_display_preference(&1, preference))
end
def get_display_preference(values, preference) when is_map(values) do
Map.get(values, preference) || Map.fetch!(values, :default)
end
defp join_subtags([], _display_names) do
[]
end
defp join_subtags([field], _display_names) do
[field]
end
defp join_subtags(fields, display_names) do
join_pattern = get_in(display_names, [:locale_display_pattern, :locale_separator])
Enum.reduce(fields, &Cldr.Substitution.substitute([&2, &1], join_pattern))
end
defp language_match_fun(locale_name, matched_tags, language_names) do
if display_name = Map.get(language_names, locale_name) do
{display_name, matched_tags}
else
nil
end
end
@doc false
def replace_parens_with_brackets(value) when is_binary(value) do
value
|> String.replace("(", "[")
|> String.replace(")", "]")
end
# Joins field values together using the
# localised format
@doc false
def join_field_values([], _display_names) do
[]
end
def join_field_values(fields, display_names) do
join_pattern = get_in(display_names, [:locale_display_pattern, :locale_separator])
Enum.reduce(fields, &Cldr.Substitution.substitute([&2, &1], join_pattern))
end
defimpl Cldr.DisplayName, for: Cldr.LanguageTag do
def display_name(language_tag, options) do
Cldr.LocaleDisplay.display_name!(language_tag, options)
end
end
defimpl Cldr.DisplayName, for: Map do
def display_name(map, _options) when map == %{} do
""
end
def display_name(map, options) do
Cldr.LocaleDisplay.Extension.display_name(map, options)
end
end
end
|
lib/cldr/locale_display.ex
| 0.895443 | 0.590248 |
locale_display.ex
|
starcoder
|
defmodule Vault.Engine.Generic do
@moduledoc """
A generic Vault.Engine adapter. Most of the vault secret engines don't use a
wildly different API, and can be handled with a single adapter.
## Request Details
By default, `read` runs a GET request, `write` does a POST, `list` does a GET
with an appended `?list=true`, and `delete` runs a DELETE. The options below
should give you additional flexibility.
### Request Options:
- :method - one of :get, :put, :post, :options, :patch, :head
- :full_response - if `true`, returns the full response body on success, rather than just the `data` key. Defaults to `false`,
- :query_params - query params for the request. Defaults to `%{}` (no params)
- :body - body to be sent along with the request. Defaults to `%{}` (no body) on read, or the passed in `value` on write
## Examples
Create a generic vault client:
{:ok, vault } =
Vault.new(
host: System.get_env("VAULT_ADDR"),
auth: Vault.Auth.Token,
engine: Vault.Engine.Generic,
http: Vault.HTTP.Tesla,
) |> Vault.auth(%{token: "token"})
Read/Write from the cubbyhole secret engine.
{:ok, _data} = Vault.write(vault, "cubbyhole/hello", %{"foo" => "bar"})
{:ok, %{"foo" => "bar"}} = Vault.read(vault, "cubbyhole/hello")
Read/Write from the ssh secret engine.
# create a key
{:ok, _} = Vault.write(vault, "ssh/keys/test", %{key: key})
# create a role for that key
{:ok, _} =
Vault.write(vault, "ssh/roles/test", %{
key: "test",
key_type: "dynamic",
default_user: "tester",
admin_user: "admin_tester"
})
# read a role, and return the full response
{:ok, %{ "data" => data } } =
Vault.read(vault, "ssh-client-signer/roles/test", full_response: true)
Options:
- :method - one of :get, :put, :post, :options, :patch, :head
- :full_response - if `true`, returns the full response body on success, rather than just the `data` key. Defaults to `false`,
- :params - query params for the request. Defaults to `%{}` (no params)
- :body - body to be sent along with the request. Defaults to `%{}` (no body) on read, or the passed in `value` on write
"""
@behaviour Vault.Engine.Adapter
@type vault :: Vault.t()
@type path :: String.t()
@type options :: Keyword.t()
@type token :: String.t()
@type value :: map()
@type errors :: list()
@doc """
Gets a value from vault. Defaults to a GET request against the current path.
See `option` details above for full configuration.
"""
@impl true
def read(vault, path, options \\ []) do
options = Keyword.merge([method: :get], options)
request(vault, path, %{}, options)
end
@doc """
Puts a value in vault. Defaults to a POST request against the provided path.
See `options` details above for full configuration.
"""
@impl true
def write(vault, path, value, options \\ []) do
options = Keyword.merge([method: :post], options)
request(vault, path, value, options)
end
@doc """
Lists secrets at a path. Defaults to a GET request against the provided path,
with a query param of ?list=true.
See `options` details above for full configuration.
## Examples
```
{:ok, %{
"keys"=> ["foo", "foo/"]
}
} = Vault.Engine.Generic.list(vault, "path/to/list/", [full_response: true])
```
With the full Response:
```
{:ok, %{
"data" => %{
"keys"=> ["foo", "foo/"]
},
}
} = Vault.Engine.Generic.list(vault, "path/to/list/", [full_response: true])
```
"""
@impl true
def list(vault, path, options \\ []) do
options = Keyword.merge([method: :get, query_params: %{list: true}], options)
request(vault, path, %{}, options)
end
@impl true
def delete(vault, path, options \\ []) do
options = Keyword.merge([method: :delete], options)
request(vault, path, nil, options)
end
defp request(client, path, value, options) do
method = Keyword.get(options, :method, :post)
body = Keyword.get(options, :body, value)
query_params = Keyword.get(options, :query_params, [])
full_response = Keyword.get(options, :full_response, false)
with {:ok, body} <-
Vault.HTTP.request(client, method, path, body: body, query_params: query_params) do
case body do
nil ->
{:ok, %{}}
"" ->
{:ok, %{}}
%{"errors" => []} ->
{:error, ["Key not found"]}
%{"errors" => messages} ->
{:error, messages}
%{} = data when full_response == true ->
{:ok, data}
%{"data" => data} ->
{:ok, data}
otherwise ->
{:error, ["Unknown response from vault", inspect(otherwise)]}
end
else
{:error, reason} ->
{:error, ["Http Adapter error", inspect(reason)]}
end
end
end
|
lib/vault/engine/generic.ex
| 0.891274 | 0.720762 |
generic.ex
|
starcoder
|
defmodule Faker.Lorem do
import Faker, only: [sampler: 2]
alias Faker.Util
@moduledoc """
Functions for generating Lorem Ipsum data
"""
@doc """
Returns a random word from @data
## Examples
iex> Faker.Lorem.word()
"aliquam"
iex> Faker.Lorem.word()
"ut"
iex> Faker.Lorem.word()
"sint"
iex> Faker.Lorem.word()
"deleniti"
"""
@spec word() :: String.t()
sampler(:word, [
"alias",
"consequatur",
"aut",
"perferendis",
"sit",
"voluptatem",
"accusantium",
"doloremque",
"aperiam",
"eaque",
"ipsa",
"quae",
"ab",
"illo",
"inventore",
"veritatis",
"et",
"quasi",
"architecto",
"beatae",
"vitae",
"dicta",
"sunt",
"explicabo",
"aspernatur",
"aut",
"odit",
"aut",
"fugit",
"sed",
"quia",
"consequuntur",
"magni",
"dolores",
"eos",
"qui",
"ratione",
"voluptatem",
"sequi",
"nesciunt",
"neque",
"dolorem",
"ipsum",
"quia",
"dolor",
"sit",
"amet",
"consectetur",
"adipisci",
"velit",
"sed",
"quia",
"non",
"numquam",
"eius",
"modi",
"tempora",
"incidunt",
"ut",
"labore",
"et",
"dolore",
"magnam",
"aliquam",
"quaerat",
"voluptatem",
"ut",
"enim",
"ad",
"minima",
"veniam",
"quis",
"nostrum",
"exercitationem",
"ullam",
"corporis",
"nemo",
"enim",
"ipsam",
"voluptatem",
"quia",
"voluptas",
"sit",
"suscipit",
"laboriosam",
"nisi",
"ut",
"aliquid",
"ex",
"ea",
"commodi",
"consequatur",
"quis",
"autem",
"vel",
"eum",
"iure",
"reprehenderit",
"qui",
"in",
"ea",
"voluptate",
"velit",
"esse",
"quam",
"nihil",
"molestiae",
"et",
"iusto",
"odio",
"dignissimos",
"ducimus",
"qui",
"blanditiis",
"praesentium",
"laudantium",
"totam",
"rem",
"voluptatum",
"deleniti",
"atque",
"corrupti",
"quos",
"dolores",
"et",
"quas",
"molestias",
"excepturi",
"sint",
"occaecati",
"cupiditate",
"non",
"provident",
"sed",
"ut",
"perspiciatis",
"unde",
"omnis",
"iste",
"natus",
"error",
"similique",
"sunt",
"in",
"culpa",
"qui",
"officia",
"deserunt",
"mollitia",
"animi",
"id",
"est",
"laborum",
"et",
"dolorum",
"fuga",
"et",
"harum",
"quidem",
"rerum",
"facilis",
"est",
"et",
"expedita",
"distinctio",
"nam",
"libero",
"tempore",
"cum",
"soluta",
"nobis",
"est",
"eligendi",
"optio",
"cumque",
"nihil",
"impedit",
"quo",
"porro",
"quisquam",
"est",
"qui",
"minus",
"id",
"quod",
"maxime",
"placeat",
"facere",
"possimus",
"omnis",
"voluptas",
"assumenda",
"est",
"omnis",
"dolor",
"repellendus",
"temporibus",
"autem",
"quibusdam",
"et",
"aut",
"consequatur",
"vel",
"illum",
"qui",
"dolorem",
"eum",
"fugiat",
"quo",
"voluptas",
"nulla",
"pariatur",
"at",
"vero",
"eos",
"et",
"accusamus",
"officiis",
"debitis",
"aut",
"rerum",
"necessitatibus",
"saepe",
"eveniet",
"ut",
"et",
"voluptates",
"repudiandae",
"sint",
"et",
"molestiae",
"non",
"recusandae",
"itaque",
"earum",
"rerum",
"hic",
"tenetur",
"a",
"sapiente",
"delectus",
"ut",
"aut",
"reiciendis",
"voluptatibus",
"maiores",
"doloribus",
"asperiores",
"repellat"
])
@doc """
Returns a character list of the given length.
If a range is provided, the length of the list is random in between the
specified range.
Defaults to a range between 15 and 255 (inclusive).
## Examples
iex> Faker.Lorem.characters()
'ppkQqaIfGqxsjFoNITNnu6eXyJicLJNth88PrhGDhwp4LNQMt5pCFh7XGEZUiBOjqwcnSUTH94vu8a9XKUwNAs48lHzPITbFXSfTS0pHfBSmHkbj9kOsd7qRuGeXKTgCgI1idI3uwENwTqc'
iex> Faker.Lorem.characters(3..5)
'EFbv'
iex> Faker.Lorem.characters(2)
'vx'
iex> Faker.Lorem.characters(7)
'jycADSd'
"""
@spec characters(integer | Range.t) :: [char]
def characters(range_or_length \\ 15..255)
def characters(first..last) do
characters(Faker.random_between(first, last))
end
def characters(num) do
char = &character/0
char
|> Stream.repeatedly()
|> Enum.take(num)
end
@doc """
Returns a string with a given amount of sentences.
If a range is provided, the number of sentences is random in between the
specified range.
Defaults to a range between 2 and 5 (inclusive).
## Examples
iex> Faker.Lorem.paragraph()
"Deleniti consequatur et qui vitae et. Sit aut expedita cumque est necessitatibus beatae ex sunt! Soluta asperiores qui vitae animi et id et vitae. Quisquam corporis quisquam ab harum!"
iex> Faker.Lorem.paragraph(1..2)
"Numquam maxime ut aut inventore eius rerum beatae. Qui officia vel quaerat expedita."
iex> Faker.Lorem.paragraph(1)
"Perspiciatis rerum nam repellendus inventore nihil."
iex> Faker.Lorem.paragraph(2)
"Sequi ducimus qui voluptates magni quisquam sed odio. Vel error non impedit tempora minus."
"""
@spec paragraph(integer | Range.t()) :: String.t()
def paragraph(range \\ 2..5)
def paragraph(first..last) do
paragraph(Faker.random_between(first, last))
end
def paragraph(num) do
Enum.join(sentences(num), " ")
end
@doc """
Returns a list with a given amount of paragraphs.
If a range is provided, the number of paragraphs is random in between the
specified range.
Defaults to a range between 2 and 5 (inclusive)
## Examples
iex> Faker.Lorem.paragraphs()
["Consequatur et qui vitae? Et sit aut expedita cumque est necessitatibus beatae ex. Possimus soluta asperiores qui vitae.", "Et vitae vitae ut quisquam corporis quisquam ab harum ipsa. Numquam maxime ut aut inventore eius rerum beatae. Qui officia vel quaerat expedita. Perspiciatis rerum nam repellendus inventore nihil. Sequi ducimus qui voluptates magni quisquam sed odio.", "Error non impedit tempora minus voluptatem qui fugit. Ab consectetur harum earum possimus. Provident quisquam modi accusantium eligendi numquam illo voluptas. Est non id quibusdam qui omnis?", "Dicta dolores at ut delectus magni atque eos beatae nulla. Laudantium qui dolorem pariatur voluptatibus sed et enim?"]
iex> Faker.Lorem.paragraphs(2..3)
["Voluptate reiciendis repellat et praesentium quia sed nemo. Vero repellat cumque nihil similique repudiandae corrupti rerum? Accusamus suscipit perspiciatis cum et sint dolore et ut. Eos reprehenderit cupiditate omnis et doloremque omnis.", "Quo et est culpa eum ex et veniam aut aut! Labore fuga tenetur alias est provident?", "Illo consequatur maiores illum et quia culpa sunt! Cumque porro ut eum porro est id maxime dolorum animi. Deserunt ipsa consequuntur eveniet asperiores. Quia numquam voluptas vitae repellat tempore."]
iex> Faker.Lorem.paragraphs(1)
["Voluptas harum modi omnis quam dolor a aliquam officiis. Neque voluptas consequatur sed cupiditate dolorum pariatur et."]
iex> Faker.Lorem.paragraphs(2)
["Voluptatem natus amet eius eos non dolorum quaerat dolores pariatur. Aliquam rerum ab voluptatem exercitationem nobis enim delectus tempore eos. Ex enim dolore ut consequuntur eaque expedita dicta eius totam. A eveniet ab magni rerum enim consequatur.", "Nihil laudantium ea veniam necessitatibus qui. Minus ad omnis quaerat quidem impedit sint. Id ut repellat qui repudiandae!"]
"""
@spec paragraphs(integer | Range.t()) :: list(String.t())
def paragraphs(range \\ 2..5)
def paragraphs(first..last) do
paragraphs(Faker.random_between(first, last))
end
def paragraphs(num) do
paragraph = ¶graph/0
paragraph
|> Stream.repeatedly()
|> Enum.take(num)
end
@doc """
Returns a string with a given amount of words.
If a range is provided, the number of words is random in between the
specified range.
Defaults to a range between 4 and 10 (inclusive).
## Examples
iex> Faker.Lorem.sentence()
"Sint deleniti consequatur et qui vitae et quibusdam et sit."
iex> Faker.Lorem.sentence(2..3)
"Cumque est?"
iex> Faker.Lorem.sentence(3)
"Beatae ex sunt."
iex> Faker.Lorem.sentence(5)
"Possimus soluta asperiores qui vitae."
"""
@spec sentence(integer | Range.t()) :: String.t()
def sentence(range \\ 4..10)
def sentence(first..last) do
Faker.random_between(first, last)
|> sentence(Util.pick([".", ".", ".", "!", "?"]))
end
def sentence(num) do
sentence(num, Util.pick([".", ".", ".", "!", "?"]))
end
@doc """
Returns a string with an amount of words equal to the parameter provided,
concatenating the specified mark
## Examples
iex> Faker.Lorem.sentence(7, "...")
"Aliquam ut sint deleniti consequatur et qui..."
iex> Faker.Lorem.sentence(1, "?")
"Vitae?"
iex> Faker.Lorem.sentence(5, ".")
"Et quibusdam et sit aut."
iex> Faker.Lorem.sentence(3, ";")
"Expedita cumque est;"
"""
@spec sentence(integer, binary) :: String.t()
def sentence(num, mark) when is_integer(num) and is_binary(mark) do
tmp =
num
|> words()
|> Enum.join(" ")
|> String.capitalize()
tmp <> mark
end
@doc """
Returns a list of strings of the given length, each representing a sentence.
If a range is provided, the length of the list is random in between the
specified range.
Defaults to a range between 2 and 5 (inclusive).
## Examples
iex> Faker.Lorem.sentences()
["Deleniti consequatur et qui vitae et.", "Sit aut expedita cumque est necessitatibus beatae ex sunt!", "Soluta asperiores qui vitae animi et id et vitae.", "Quisquam corporis quisquam ab harum!"]
iex> Faker.Lorem.sentences(3..4)
["Numquam maxime ut aut inventore eius rerum beatae.", "Qui officia vel quaerat expedita.", "Perspiciatis rerum nam repellendus inventore nihil.", "Sequi ducimus qui voluptates magni quisquam sed odio."]
iex> Faker.Lorem.sentences(4)
["Vel error non impedit tempora minus.", "Fugit cupiditate fuga ab consectetur harum earum possimus totam.", "Quisquam modi accusantium eligendi numquam.", "Quod blanditiis est non id quibusdam qui omnis alias!"]
iex> Faker.Lorem.sentences(3)
["Dicta dolores at ut delectus magni atque eos beatae nulla.", "Laudantium qui dolorem pariatur voluptatibus sed et enim?", "Minima laudantium voluptate reiciendis repellat."]
"""
@spec sentences(integer | Range.t()) :: [String.t()]
def sentences(range \\ 2..5)
def sentences(first..last) do
sentences(Faker.random_between(first, last))
end
def sentences(num) do
sentence = &sentence/0
sentence
|> Stream.repeatedly()
|> Enum.take(num)
end
@doc """
Returns a list of strings of the given length, each representing a word.
If a range is provided, the length of the list is random in between the
provided range.
Defaults to a range between 3 and 6.
## Examples
iex> Faker.Lorem.words()
["ut", "sint", "deleniti", "consequatur", "et"]
iex> Faker.Lorem.words(1..2)
["vitae"]
iex> Faker.Lorem.words(2)
["et", "quibusdam"]
iex> Faker.Lorem.words(6)
["et", "sit", "aut", "expedita", "cumque", "est"]
"""
@spec words(integer | Range.t()) :: [String.t()]
def words(range \\ 3..6)
def words(first..last) do
words(Faker.random_between(first, last))
end
def words(num) do
word = &word/0
word
|> Stream.repeatedly()
|> Enum.take(num)
end
defp character do
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
Enum.at(alphabet, Faker.random_between(0, Enum.count(alphabet) - 1))
end
end
|
lib/faker/lorem.ex
| 0.623835 | 0.461805 |
lorem.ex
|
starcoder
|
defmodule ExRabbitMQ.Connection do
@moduledoc """
A `GenServer` implementing a long running connection to a RabbitMQ server.
Consumers and producers share connections and when a connection reaches the limit of
`65535` channels, a new connection is established.
To correctly monitor the open channels, users must not open channels manually (e.g., in the provided hooks).
Internally, a connection `GenServer` uses [`:pg2`](http://erlang.org/doc/man/pg2.html) and [`:ets`](http://erlang.org/doc/man/ets.html) to handle local subscriptions of consumers and producers.
[`:pg2`](http://erlang.org/doc/man/pg2.html) is used to name the pool of connections to RabbitMQ.
Only local members are considered so clustering cannot cause problems with local subscriptions.
[`:ets`](http://erlang.org/doc/man/ets.html) is used to hold the subscriptions of consumers and producers that are using the table holding connection `GenServer` instance.
"""
@module __MODULE__
use GenServer
require Logger
alias ExRabbitMQ.Connection
alias ExRabbitMQ.Connection.Config, as: ConnectionConfig
alias ExRabbitMQ.Constants
defstruct [
:connection,
:connection_pid,
:ets_consumers,
config: %ConnectionConfig{},
stale?: false
]
@doc false
def start_link(%ConnectionConfig{} = config) do
GenServer.start_link(@module, config)
end
@doc false
def init(config) do
Process.flag(:trap_exit, true)
:ok = :pg2.create(Constants.connection_pids_group_name())
:ok = :pg2.join(Constants.connection_pids_group_name(), self())
ets_consumers =
Constants.connection_pids_group_name() |> String.to_atom() |> :ets.new([:private])
Process.send(self(), :connect, [])
schedule_cleanup()
{:ok, %Connection{config: config, ets_consumers: ets_consumers}}
end
@doc """
Checks whether this process holds a usable connection to RabbitMQ.
`connection_pid` is the GenServer pid implementing the called `ExRabbitMQ.Connection`)
"""
@spec get(pid) :: true | false | {:error, any}
def get(connection_pid) do
case connection_pid do
nil ->
{:error, :nil_connection_pid}
connection_pid ->
try do
GenServer.call(connection_pid, :get)
catch
:exit, reason ->
{:error, reason}
end
end
end
@doc """
Subscribes a consumer process, via `self()`, to the managed ETS table.
If the ETS table already contains 65535 consumers, and thus the maximum allowed 65535 channels,
then the subscription is not allowed so that a new connection can be created.
`connection_pid` is the GenServer pid implementing the called `ExRabbitMQ.Connection`
`connection_config` is the connection config that the `ExRabbitMQ.Connection` has to be using
in order to allow the subscription
"""
@spec subscribe(pid, term) :: true | false
def subscribe(connection_pid, connection_config) do
GenServer.call(connection_pid, {:subscribe, self(), connection_config})
end
@doc """
Gracefully closes the RabbitMQ connection and terminates its GenServer handler identified by `connection_pid`.
"""
@spec close(pid) :: :ok
def close(connection_pid) do
GenServer.cast(connection_pid, :close)
end
@doc false
def handle_call(:get, _from, %Connection{connection: connection} = state) do
reply = if connection === nil, do: {:error, :nil_connection_pid}, else: {:ok, connection}
{:reply, reply, state}
end
@doc false
def handle_call(
{:subscribe, consumer_pid, connection_config},
_from,
%Connection{config: config, ets_consumers: ets_consumers} = state
) do
result =
if config === connection_config do
case :ets.info(ets_consumers)[:size] do
65_535 ->
false
_ ->
:ets.insert_new(ets_consumers, {consumer_pid})
Process.monitor(consumer_pid)
true
end
else
false
end
new_state = %{state | stale?: false}
{:reply, result, new_state}
end
@doc false
def handle_cast(
:close,
%Connection{
ets_consumers: ets_consumers,
connection: connection,
connection_pid: connection_pid
} = state
) do
if connection === nil do
{:stop, :normal, state}
else
Process.unlink(connection_pid)
AMQP.Connection.close(connection)
publish(ets_consumers, {:xrmq_connection, {:closed, nil}})
new_state = %{state | connection: nil, connection_pid: nil}
{:stop, :normal, new_state}
end
end
@doc false
def handle_info(:connect, %Connection{config: config, ets_consumers: ets_consumers} = state) do
Logger.debug("connecting to RabbitMQ")
case AMQP.Connection.open(
username: config.username,
password: <PASSWORD>,
host: config.host,
port: config.port,
virtual_host: config.vhost,
heartbeat: config.heartbeat
) do
{:ok, %AMQP.Connection{pid: connection_pid} = connection} ->
Logger.debug("connected to RabbitMQ")
Process.link(connection_pid)
publish(ets_consumers, {:xrmq_connection, {:open, connection}})
new_state = %{state | connection: connection, connection_pid: connection_pid}
{:noreply, new_state}
{:error, reason} ->
Logger.error("failed to connect to RabbitMQ: #{inspect(reason)}")
Process.send_after(self(), :connect, config.reconnect_after)
new_state = %{state | connection: nil, connection_pid: nil}
{:noreply, new_state}
end
end
@doc false
def handle_info(
{:EXIT, pid, _reason},
%Connection{config: config, connection_pid: connection_pid, ets_consumers: ets_consumers} =
state
)
when pid === connection_pid do
publish(ets_consumers, {:xrmq_connection, {:closed, nil}})
Logger.error("disconnected from RabbitMQ")
Process.send_after(self(), :connect, config.reconnect_after)
new_state = %{state | connection: nil, connection_pid: nil}
{:noreply, new_state}
end
@doc false
def handle_info(
{:DOWN, _ref, :process, consumer_pid, _reason},
%Connection{ets_consumers: ets_consumers} = state
) do
:ets.delete(ets_consumers, consumer_pid)
{:noreply, state}
end
@doc false
def handle_info(:cleanup, %{ets_consumers: ets_consumers, stale?: stale?} = state) do
if stale? do
{:stop, :normal, state}
else
new_state =
case :ets.info(ets_consumers)[:size] do
0 -> %{state | stale?: true}
_ -> state
end
schedule_cleanup()
{:noreply, new_state}
end
end
@doc false
def handle_info(_, state) do
{:noreply, state}
end
defp publish(ets_consumers, what) do
ets_consumers
|> :ets.select([{:_, [], [:"$_"]}])
|> Enum.split_with(fn {consumer_pid} ->
if Process.alive?(consumer_pid) do
send(consumer_pid, what)
else
:ets.delete(ets_consumers, consumer_pid)
end
end)
end
defp schedule_cleanup() do
Process.send_after(self(), :cleanup, 5000)
end
end
|
lib/ex_rabbit_m_q/connection.ex
| 0.837088 | 0.567997 |
connection.ex
|
starcoder
|
defmodule Sebex.ElixirAnalyzer.SourceAnalysis.Dependency do
alias Sebex.ElixirAnalyzer.SourceAnalysis.Parser
alias Sebex.ElixirAnalyzer.Span
@type t :: %__MODULE__{
name: atom,
version_spec: String.t() | map(),
version_spec_span: Span.t()
}
@derive Jason.Encoder
@enforce_keys [:name, :version_spec, :version_spec_span]
defstruct @enforce_keys
@spec extract(Macro.t()) :: list(t())
def extract(ast) do
ast
|> extract_deps_list()
|> Enum.map(&process_dep/1)
end
@spec extract_deps_list(Macro.t()) :: list(Macro.t())
defp extract_deps_list(ast) do
{_, result} =
Bunch.Macro.prewalk_while(ast, :not_found, fn
t, {:found, _} = acc ->
{:skip, t, acc}
{kw_def, _,
[
{:deps, _, args},
[
{
{:literal, _, [:do]},
{:literal, _, [deps_list]}
}
]
]} = t,
:not_found
when kw_def in [:def, :defp] and is_list(deps_list) and args in [[], nil] ->
{:skip, t, {:found, deps_list}}
t, :not_found ->
{:enter, t, :not_found}
end)
case result do
{:found, l} -> l
:not_found -> []
end
end
@spec process_dep(Macro.t()) :: t()
defp process_dep(
{:literal, _,
[
{
{:literal, _, [name]},
{:literal, _, [version_spec]} = version_literal
}
]}
)
when is_atom(name) and is_binary(version_spec) do
%__MODULE__{
name: name,
version_spec: version_spec,
version_spec_span: Span.literal(version_literal)
}
end
defp process_dep(
{:{}, _,
[
{:literal, _, [name]},
{:literal, _, [version_spec]} = version_literal,
_
]}
)
when is_atom(name) and is_binary(version_spec) do
%__MODULE__{
name: name,
version_spec: version_spec,
version_spec_span: Span.literal(version_literal)
}
end
defp process_dep(
{:literal, tuple_meta,
[
{
{:literal, _, [name]},
[{first_key, _} | _] = kw
}
]}
)
when is_atom(name) do
%__MODULE__{
name: name,
version_spec:
kw
|> Parser.decode_literal()
|> Enum.into(%{}),
version_spec_span:
first_key
|> Span.literal()
|> Span.set(Keyword.fetch!(tuple_meta, :closing), :end)
}
end
end
|
sebex_elixir_analyzer/lib/sebex_elixir_analyzer/source_analysis/dependency.ex
| 0.6973 | 0.44083 |
dependency.ex
|
starcoder
|
defmodule ExAmp.Provision.Readme do
@moduledoc false
use ExAmp.Task
alias ExAmp.Project
alias ExAmp.Provision.CI
@path "README.md"
embed_template(:content, """
# <%= @name %>
**<%= @description %>**
<% if @package? do %>
## Installation
Add `<%= @app %>` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[
{:<%= @app %>, "~> <%= @version %>"}
]
end
```
## Documentation
The docs can be found at [https://hexdocs.pm/<%= @app %>](https://hexdocs.pm/<%= @app %>).
<% end %>
""")
def gen do
project_config = Mix.Project.config()
app = Keyword.fetch!(project_config, :app)
name = Keyword.get(project_config, :name, app)
description = Keyword.get(project_config, :description, "TODO: Add description")
package? = Keyword.has_key?(project_config, :package)
version = Keyword.fetch!(project_config, :version)
unless package? do
log_info([:yellow, "No package config - please add `package: [...]` to project config"])
end
content =
content_template(
app: app,
name: name,
description: description,
package?: package?,
version: version
)
create_file(@path, content)
add_badge(:license)
add_badge(:ci)
add_badge(:hex)
end
def add_badge(:license) do
if repo_path = Project.github_repo_path() do
image_url = "https://img.shields.io/github/license/#{repo_path}.svg"
details_url = "https://github.com/#{repo_path}/blob/master/LICENSE.md"
label = "license"
do_add_badge(image_url, details_url, label)
end
end
def add_badge(:hex) do
if Project.package?() do
app = Project.app()
image_url = "https://img.shields.io/hexpm/v/#{app}.svg"
details_url = "https://hex.pm/packages/#{app}"
label = "Hex version"
do_add_badge(image_url, details_url, label)
end
end
def add_badge(:ci) do
with true <- CI.present?(),
repo_path when not is_nil(repo_path) <- Project.github_repo_path() do
image_url = "https://img.shields.io/circleci/project/github/#{repo_path}/master.svg"
details_url = "https://circleci.com/gh/surgeventures/#{repo_path}/tree/master"
label = "build status"
do_add_badge(image_url, details_url, label)
end
end
defp do_add_badge(image_url, details_url, label) do
log_step(:green, "adding readme badge", label)
with {_, {:ok, content}} <- {:read_config, File.read(@path)},
lines = String.split(content, "\n"),
{_, {:ok, badges, insert_at}} <- {:parse_badges, parse_badges(lines)},
{_, false} <- {:already_added, Enum.member?(badges, label)} do
badge_text = "[](#{details_url})"
lines =
if insert_at do
List.insert_at(lines, insert_at + 1, badge_text)
else
lines
|> List.insert_at(1, "")
|> List.insert_at(2, badge_text)
end
new_content = Enum.join(lines, "\n")
File.write!(@path, new_content)
{:ok, :added}
else
{:already_added, _} -> {:ok, :already_added}
{:parse_badges, _} -> {:error, :parse_badges}
end
|> case do
{:error, :parse_badges} ->
log_error("Unable to modify readme - please add #{label} manually")
_ ->
nil
end
end
defp parse_badges(lines) do
lines
|> Enum.with_index()
|> Enum.reduce(:top, fn
{"#" <> _, _}, :top ->
:title
{_, _}, :top ->
:error
{"", _}, :title ->
{:badges, [], nil}
{"[![" <> label_eol, ends_at}, {:badges, badges, _} ->
label = label_eol |> String.split("]") |> List.first()
{:badges, [label | badges], ends_at}
{_, _}, {:badges, badges, ends_at} ->
{:ok, Enum.reverse(badges), ends_at}
_, acc ->
acc
end)
|> case do
{:ok, _, _} = acc -> acc
_ -> :error
end
end
end
|
lib/ex_amp/provision/readme.ex
| 0.604165 | 0.726668 |
readme.ex
|
starcoder
|
defmodule EmojiMap.TwitterStream do
@moduledoc """
We get a Twitter Stream from locations within a bounding box. The
bounding box is so large that the tweets can come from all over the world.
This way we make sure that we only receive geo-tagged tweets. The reason for
this is: It isn't possible to search for hundreds of different keywords at
once. Plus location isn't a filter. So we find tweets from either the location
OR matching the keyword. We have to filter ourself.
"""
@doc """
The main function of this module. Gets the stream, filters for empty
coordinates, filters for emojis and then returns a simpler map.
"""
def get_emoji_stream do
# Sometimes the coordinates are still empty. Probably when a place was selected.
# Could be refined
ExTwitter.stream_filter([locations: "-168.8,-57.3,174.4,84.1"], :infinity)
|> Stream.filter(fn(x) -> x.coordinates != nil || x.place != nil end)
|> Stream.filter(&has_emoji(&1.text))
|> Stream.map(fn(x) ->
coordinates = cond do
x.coordinates != nil ->
x.coordinates |> Map.fetch!(:coordinates)
|> coord_arr_to_str
x.place != nil ->
x.place |> get_coordinates_from_place
|> coord_arr_to_str
true -> nil
end
emoji = first_emoji(x.text)
%{emoji: emoji, text: x.text, screen_name: x.user.screen_name,
profile_image_url: x.user.profile_image_url, coordinates: coordinates}
end)
end
@doc """
Checks wether or not an emoji is present.
"""
def has_emoji(tweet) do
case(Exmoji.Scanner.scan(tweet) |> Enum.count) do
0 -> false
_ -> true
end
end
@doc """
Returns the first emoji.
"""
def first_emoji(tweet) do
Exmoji.Scanner.scan(tweet)
|> hd
|> Exmoji.EmojiChar.render
end
@doc """
Turn array with float numbers from Twitter API into String
"""
def coord_arr_to_str(input_arr) do
input_arr
|> Enum.map(fn(x) -> "#{x}" end)
|> Enum.join(",")
end
@doc """
Places from the Twitter API are not a single point but a bouding box. For easy
use we will just use the first coordinates. To find out more check here:
https://dev.twitter.com/overview/api/tweets
"""
def get_coordinates_from_place(%{bounding_box: %{coordinates: [bounding_box]}}) do
bounding_box |> hd
end
end
|
backend/lib/emoji_map/twitter_stream.ex
| 0.853898 | 0.502075 |
twitter_stream.ex
|
starcoder
|
defprotocol ICalendar.Value do
@fallback_to_any true
def to_ics(data)
end
alias ICalendar.Value
defimpl Value, for: BitString do
def to_ics(x) do
x
|> String.replace(~S"\n", ~S"\\n")
|> String.replace("\n", ~S"\n")
end
end
defimpl Value, for: Tuple do
defmacro elem2(x, i1, i2) do
quote do
unquote(x) |> elem(unquote(i1)) |> elem(unquote(i2))
end
end
@doc """
This macro is used to establish whether a tuple is in the Erlang Timestamp
format (`{{year, month, day}, {hour, minute, second}}`).
"""
defmacro is_datetime_tuple(x) do
quote do
# Year
# Month
# Day
# Hour
# Minute
# Second
unquote(x) |> elem2(0, 0) |> is_integer and
unquote(x) |> elem2(0, 1) |> is_integer and
unquote(x) |> elem2(0, 1) <= 12 and
unquote(x) |> elem2(0, 1) >= 1 and
unquote(x) |> elem2(0, 2) |> is_integer and
unquote(x) |> elem2(0, 2) <= 31 and
unquote(x) |> elem2(0, 2) >= 1 and
unquote(x) |> elem2(1, 0) |> is_integer and
unquote(x) |> elem2(1, 0) <= 23 and
unquote(x) |> elem2(1, 0) >= 0 and
unquote(x) |> elem2(1, 1) |> is_integer and
unquote(x) |> elem2(1, 1) <= 59 and
unquote(x) |> elem2(1, 1) >= 0 and
unquote(x) |> elem2(1, 2) |> is_integer and
unquote(x) |> elem2(1, 2) <= 60 and
unquote(x) |> elem2(1, 2) >= 0
end
end
@doc """
This function converts Erlang timestamp tuples into DateTimes.
"""
def to_ics(timestamp) when is_datetime_tuple(timestamp) do
timestamp
|> Timex.to_datetime()
|> Value.to_ics()
end
def to_ics(x), do: x
end
defimpl Value, for: DateTime do
use Timex
@doc """
This function converts DateTimes to UTC timezone and then into Strings in the
iCal format.
"""
def to_ics(%DateTime{} = timestamp) do
format_string = "{YYYY}{0M}{0D}T{h24}{m}{s}"
{:ok, result} =
timestamp
|> Timex.format(format_string)
result
end
end
defimpl Value, for: Date do
use Timex
@doc """
This function converts DateTimes to UTC timezone and then into Strings in the
iCal format.
"""
def to_ics(%Date{} = timestamp) do
format_string = "{YYYY}{0M}{0D}"
{:ok, result} =
timestamp
|> Timex.format(format_string)
result
end
end
defimpl Value, for: Any do
def to_ics(x), do: x
end
|
lib/icalendar/value.ex
| 0.732687 | 0.429968 |
value.ex
|
starcoder
|
defmodule GrowthBook.Context do
@moduledoc """
Stores feature and experiment context.
Holds the state of features, experiment overrides, attributes and other "global" state. The
context works similar to `%Plug.Conn{}`, as it is created for each request and passed along
when working with features and experiments.
"""
alias GrowthBook.Feature
alias GrowthBook.ExperimentOverride
@typedoc """
Context
**Context** struct. Has a number of optional properties:
- **`enabled?`** (`boolean()`) - Switch to globally disable all experiments. Default `true`.
- **`attributes`** (`t:attributes/0`) - Map of user attributes that are used
to assign variations
- **`url`** (`String.t()`) - The URL of the current page
- **`features`** (`t:features/0`) - Feature definitions (usually pulled from an API or cache)
- **`forced_variations`** (`t:forced_variations/0`) - Force specific experiments to always assign
a specific variation (used for QA)
- **`qa_mode?`** (`boolean()`) - If `true`, random assignment is disabled and only explicitly
forced variations are used.
"""
@type t() :: %__MODULE__{
attributes: attributes(),
features: features(),
overrides: experiment_overrides(),
forced_variations: forced_variations(),
url: String.t() | nil,
enabled?: boolean(),
qa_mode?: boolean()
}
@typedoc """
Attributes
**Attributes** are an arbitrary JSON map containing user and request attributes. Here's an example:
```
%{
"id" => "123",
"anonId" => "abcdef",
"company" => "growthbook",
"url" => "/pricing",
"country" => "US",
"browser" => "firefox",
"age" => 25,
"beta" => true,
"account" => %{
"plan" => "team",
"seats" => 10
}
}
```
"""
@type attributes() :: %{required(String.t()) => term()}
@typedoc """
Experiment overrides
A map with feature names as keys and `%ExperimentOverride{}` struct as values.
"""
@type experiment_overrides() :: %{required(GrowthBook.feature_key()) => ExperimentOverride.t()}
@typedoc """
Features
A map of `%Feature{}` structs. Keys are string ids for the features.
```
%{
"feature-1" => %Feature{
default_value: false
},
"my_other_feature" => %Feature{
default_value: 1,
rules: [
%FeatureRule{
force: 2
}
]
}
}
```
"""
@type features() :: %{required(GrowthBook.feature_key()) => Feature.t()}
@typedoc """
Forced variations map
A hash or map that forces an `GrowthBook.Experiment` to always assign a specific variation.
Useful for QA.
Keys are the experiment key, values are the list index of the variation. For example:
```
%{
"my-test" => 0,
"other-test" => 1
}
```
"""
@type forced_variations() :: %{required(GrowthBook.feature_key()) => integer()}
defstruct attributes: %{},
features: %{},
overrides: %{},
forced_variations: %{},
url: nil,
enabled?: true,
qa_mode?: false
end
|
lib/growth_book/context.ex
| 0.919769 | 0.844794 |
context.ex
|
starcoder
|
defmodule InvoiceTracker do
@moduledoc """
Track invoices and payments.
"""
alias InvoiceTracker.{Invoice, Repo, TimeSummary, TimeTracker}
@doc """
Return a list of all invoices
"""
@spec all() :: [Invoice.t()]
def all, do: Repo.all()
@doc """
Return a list of all unpaid invoices
"""
@spec unpaid() :: [Invoice.t()]
def unpaid do
all()
|> Enum.reject(&Invoice.paid?/1)
end
@doc """
Return a list of all invoices that were active after a given date.
Active means:
* Unpaid as of that date
* Issued since that date
* Paid since that date
"""
@spec active_since(Date.t()) :: [Invoice.t()]
def active_since(date) do
all()
|> Enum.filter(&Invoice.active_since?(&1, date))
end
@doc """
Find an invoice by its number.
"""
@spec lookup(Invoice.key()) :: Invoice.t()
def lookup(number) do
{:ok, invoice} = Repo.find(number)
invoice
end
@doc """
Find the earliest invoice that hasn't yet been paid.
"""
@spec oldest_unpaid_invoice() :: Invoice.t()
def oldest_unpaid_invoice do
unpaid()
|> Enum.sort_by(&Map.get(&1, :date), &Timex.before?/2)
|> List.first()
end
@doc """
Return the next available invoice number.
"""
@spec next_invoice_number() :: Invoice.key()
def next_invoice_number do
1 + highest_invoice_number()
end
defp highest_invoice_number do
all()
|> Enum.map(& &1.number)
|> Enum.max(fn -> 0 end)
end
@doc """
Record an invoice.
"""
@spec record(Invoice.t()) :: :ok
def record(invoice), do: Repo.store(invoice)
@doc """
Mark an invoice as paid.
"""
@spec pay(Invoice.key(), Date.t()) :: :ok
def pay(number, date) do
Repo.update(number, &Invoice.pay(&1, date))
end
@doc """
Provide a time entry summary for an invoice.
"""
@type option ::
{:invoice_date, Date.t()}
| {:workspace_id, String.t()}
| {:client_id, String.t()}
@spec time_summary(TimeTracker.tracker(), [option]) :: TimeSummary.t()
def time_summary(
time_tracker,
invoice_date: invoice_date,
workspace_id: workspace_id,
client_id: client_id
) do
TimeTracker.summary(
time_tracker,
start_date: invoice_start_date(invoice_date),
end_date: invoice_end_date(invoice_date),
workspace_id: workspace_id,
client_id: client_id
)
end
defp invoice_start_date(invoice_date) do
end_date = invoice_end_date(invoice_date)
if end_date.day >= 16 do
%{end_date | day: 16}
else
%{end_date | day: 1}
end
end
defp invoice_end_date(invoice_date) do
Timex.shift(invoice_date, days: -1)
end
end
|
lib/invoice_tracker.ex
| 0.790854 | 0.410018 |
invoice_tracker.ex
|
starcoder
|
defmodule DateTimeParser.Parser.Epoch do
@moduledoc """
Parses a Unix Epoch timestamp. This is gated by the number of present digits. It must contain 10
or 11 seconds, with an optional subsecond up to 10 digits. Negative epoch timestamps are
supported.
"""
@behaviour DateTimeParser.Parser
@max_subsecond_digits 6
@one_second_in_microseconds (1 * :math.pow(10, 6)) |> trunc()
@epoch_regex ~r|\A(?<sign>-)?(?<seconds>\d{10,11})(?:\.(?<subseconds>\d{1,10}))?\z|
@impl DateTimeParser.Parser
def preflight(%{string: string} = parser) do
case Regex.named_captures(@epoch_regex, string) do
nil -> {:error, :not_compatible}
results -> {:ok, %{parser | preflight: results}}
end
end
@impl DateTimeParser.Parser
def parse(%{preflight: preflight} = parser) do
%{"sign" => sign, "seconds" => raw_seconds, "subseconds" => raw_subseconds} = preflight
is_negative = sign == "-"
has_subseconds = raw_subseconds != ""
with {:ok, seconds} <- parse_seconds(raw_seconds, is_negative, has_subseconds),
{:ok, subseconds} <- parse_subseconds(raw_subseconds, is_negative) do
from_tokens(parser, {seconds, subseconds})
end
end
@spec parse_seconds(String.t(), boolean(), boolean()) ::
{:ok, integer()}
defp parse_seconds(raw_seconds, is_negative, has_subseconds)
defp parse_seconds(raw_seconds, true, true) do
with {:ok, seconds} <- parse_seconds(raw_seconds, true, false) do
{:ok, seconds - 1}
end
end
defp parse_seconds(raw_seconds, true, false) do
with {:ok, seconds} <- parse_seconds(raw_seconds, false, false) do
{:ok, seconds * -1}
end
end
defp parse_seconds(raw_seconds, false, _) do
with {seconds, ""} <- Integer.parse(raw_seconds) do
{:ok, seconds}
end
end
@spec parse_subseconds(String.t(), boolean()) :: {:ok, {integer(), integer()}}
defp parse_subseconds("", _), do: {:ok, {0, 0}}
defp parse_subseconds(raw_subseconds, true) do
with {:ok, {truncated_microseconds, number_of_subsecond_digits}} <-
parse_subseconds(raw_subseconds, false) do
negative_truncated_microseconds =
if truncated_microseconds > 0 do
@one_second_in_microseconds - truncated_microseconds
else
truncated_microseconds
end
{:ok, {negative_truncated_microseconds, number_of_subsecond_digits}}
end
end
defp parse_subseconds(raw_subseconds, false) do
with {subseconds, ""} <- Float.parse("0.#{raw_subseconds}") do
microseconds = (subseconds * :math.pow(10, 6)) |> trunc()
precision = min(String.length(raw_subseconds), @max_subsecond_digits)
truncated_microseconds =
microseconds
|> Integer.digits()
|> Enum.take(@max_subsecond_digits)
|> Integer.undigits()
{:ok, {truncated_microseconds, precision}}
end
end
defp from_tokens(%{context: context}, {seconds, {microseconds, precision}}) do
truncated_microseconds =
microseconds
|> Integer.digits()
|> Enum.take(@max_subsecond_digits)
|> Integer.undigits()
with {:ok, datetime} <- DateTime.from_unix(seconds) do
for_context(context, %{datetime | microsecond: {truncated_microseconds, precision}})
end
end
defp for_context(:datetime, datetime), do: {:ok, datetime}
defp for_context(:time, datetime), do: {:ok, DateTime.to_time(datetime)}
defp for_context(:date, datetime), do: {:ok, DateTime.to_date(datetime)}
defp for_context(context, result) do
{:error, "cannot convert #{inspect(result)} to context #{context}"}
end
end
|
lib/parser/epoch.ex
| 0.886473 | 0.551876 |
epoch.ex
|
starcoder
|
defmodule Chacha20 do
@moduledoc """
Chacha20 symmetric stream cipher
https://tools.ietf.org/html/rfc7539
The calling semantics are still sub-optimal and no performance tuning has been done.
"""
import Bitwise
defp rotl(x, r), do: rem(x <<< r ||| x >>> (32 - r), 0x100000000)
defp sum(x, y), do: rem(x + y, 0x100000000)
@typedoc """
The shared encryption key.
"""
@type key :: binary
@typedoc """
The shared per-session nonce.
By spec, this nonce may be used to encrypt a stream of up to 256GiB
An eight-byte nonce is compatible with the original reference implementation.
"""
@type nonce :: binary
@typedoc """
The parameters and state of the current session
* The shared key
* The session nonce
* The next block number
* The unused portion of the current block
To start from block 0, the initial state is `{k,n,0,""}`
"""
@type chacha_parameters :: {key, nonce, non_neg_integer, binary}
# Many functions below are public but undocumented.
# This is to allow for testing vs the spec, without confusing consumers.
@doc false
def quarterround([a, b, c, d]) do
a = sum(a, b)
d = rotl(d ^^^ a, 16)
c = sum(c, d)
b = rotl(b ^^^ c, 12)
a = sum(a, b)
d = rotl(d ^^^ a, 8)
c = sum(c, d)
b = rotl(b ^^^ c, 7)
[a, b, c, d]
end
@doc false
def diaground([y0, y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11, y12, y13, y14, y15]) do
[z0, z5, z10, z15] = quarterround([y0, y5, y10, y15])
[z1, z6, z11, z12] = quarterround([y1, y6, y11, y12])
[z2, z7, z8, z13] = quarterround([y2, y7, y8, y13])
[z3, z4, z9, z14] = quarterround([y3, y4, y9, y14])
[z0, z1, z2, z3, z4, z5, z6, z7, z8, z9, z10, z11, z12, z13, z14, z15]
end
@doc false
def columnround([x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15]) do
[y0, y4, y8, y12] = quarterround([x0, x4, x8, x12])
[y1, y5, y9, y13] = quarterround([x1, x5, x9, x13])
[y2, y6, y10, y14] = quarterround([x2, x6, x10, x14])
[y3, y7, y11, y15] = quarterround([x3, x7, x11, x15])
[y0, y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11, y12, y13, y14, y15]
end
@doc false
def doubleround(x), do: x |> columnround |> diaground
@doc false
def doublerounds(x, 0), do: x
def doublerounds(x, n), do: x |> doubleround |> doublerounds(n - 1)
@doc false
def littleendian_inv(i), do: i |> :binary.encode_unsigned(:little) |> pad(4)
defp pad(s, n) when rem(byte_size(s), n) == 0, do: s
defp pad(s, n), do: pad(s <> <<0>>, n)
@doc """
Return an arbitrary block
This is probably most useful in fast-forwarding a stream.
"""
@spec block(key, nonce, non_neg_integer) :: binary
def block(k, n, b) when byte_size(k) == 32 do
xs = expand(k, n, b)
xs |> doublerounds(10) |> Enum.zip(xs)
|> Enum.reduce(<<>>, fn {z, x}, acc -> acc <> littleendian_inv(sum(x, z)) end)
end
defp words_as_ints(<<>>, acc), do: acc |> Enum.reverse()
defp words_as_ints(<<word::unsigned-little-integer-size(32), rest::binary>>, acc),
do: words_as_ints(rest, [word | acc])
@doc false
def expand(k, n, b) when byte_size(n) == 12 do
cs = "expand 32-byte k"
words_as_ints(cs <> k <> littleendian_inv(b) <> n, [])
end
def expand(k, n, b) when byte_size(n) == 8, do: expand(k, <<0, 0, 0, 0>> <> n, b)
@doc """
The crypt function suitable for a complete message.
This is a convenience wrapper when the full message is ready for processing.
The operations are symmetric, so if `crypt(m,k,n) = c`, then `crypt(c,k,n) = m`
"""
@spec crypt(binary, key, nonce, non_neg_integer) :: binary
def crypt(m, k, n, c \\ 0) do
{s, _p} = crypt_bytes(m, {k, n, c, ""}, [])
s
end
@doc """
The crypt function suitable for streaming
Use an initial state of `{k,n,0,""}`
The returned parameters can be used for the next available bytes.
Any previous emitted binary can be included in the `acc`, if desired.
"""
@spec crypt_bytes(binary, chacha_parameters, [binary]) :: {binary, chacha_parameters}
def crypt_bytes(<<>>, p, acc), do: {acc |> Enum.reverse() |> Enum.join(), p}
def crypt_bytes(m, {k, n, u, <<>>}, acc), do: crypt_bytes(m, {k, n, u + 1, block(k, n, u)}, acc)
def crypt_bytes(<<m, restm::binary>>, {k, n, u, <<b, restb::binary>>}, acc),
do: crypt_bytes(restm, {k, n, u, restb}, [<<bxor(m, b)>> | acc])
end
|
lib/chacha20.ex
| 0.848847 | 0.769903 |
chacha20.ex
|
starcoder
|
defmodule AWS.Marketplace.Metering do
@moduledoc """
AWS Marketplace Metering Service
This reference provides descriptions of the low-level AWS Marketplace
Metering Service API.
AWS Marketplace sellers can use this API to submit usage data for custom
usage dimensions.
**Submitting Metering Records**
<ul> <li> *MeterUsage*- Submits the metering record for a Marketplace
product. MeterUsage is called from an EC2 instance.
</li> <li> *BatchMeterUsage*- Submits the metering record for a set of
customers. BatchMeterUsage is called from a software-as-a-service (SaaS)
application.
</li> </ul> **Accepting New Customers**
<ul> <li> *ResolveCustomer*- Called by a SaaS application during the
registration process. When a buyer visits your website during the
registration process, the buyer submits a Registration Token through the
browser. The Registration Token is resolved through this API to obtain a
CustomerIdentifier and Product Code.
</li> </ul> **Entitlement and Metering for Paid Container Products**
<ul> <li> Paid container software products sold through AWS Marketplace
must integrate with the AWS Marketplace Metering Service and call the
RegisterUsage operation for software entitlement and metering. Calling
RegisterUsage from containers running outside of Amazon Elastic Container
Service (Amazon ECR) isn't supported. Free and BYOL products for ECS aren't
required to call RegisterUsage, but you can do so if you want to receive
usage data in your seller reports. For more information on using the
RegisterUsage operation, see [Container-Based
Products](https://docs.aws.amazon.com/marketplace/latest/userguide/container-based-products.html).
</li> </ul> BatchMeterUsage API calls are captured by AWS CloudTrail. You
can use Cloudtrail to verify that the SaaS metering records that you sent
are accurate by searching for records with the eventName of
BatchMeterUsage. You can also use CloudTrail to audit records over time.
For more information, see the * [AWS CloudTrail User
Guide](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html)
*.
"""
@doc """
BatchMeterUsage is called from a SaaS application listed on the AWS
Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with
the same records or a subset of the input records.
Every request to BatchMeterUsage is for one product. If you need to meter
usage for multiple products, you must make multiple calls to
BatchMeterUsage.
BatchMeterUsage can process up to 25 UsageRecords at a time.
"""
def batch_meter_usage(client, input, options \\ []) do
request(client, "BatchMeterUsage", input, options)
end
@doc """
API to emit metering records. For identical requests, the API is
idempotent. It simply returns the metering record ID.
MeterUsage is authenticated on the buyer's AWS account, generally when
running from an EC2 instance on the AWS Marketplace.
"""
def meter_usage(client, input, options \\ []) do
request(client, "MeterUsage", input, options)
end
@doc """
Paid container software products sold through AWS Marketplace must
integrate with the AWS Marketplace Metering Service and call the
RegisterUsage operation for software entitlement and metering. Calling
RegisterUsage from containers running outside of ECS is not currently
supported. Free and BYOL products for ECS aren't required to call
RegisterUsage, but you may choose to do so if you would like to receive
usage data in your seller reports. The sections below explain the behavior
of RegisterUsage. RegisterUsage performs two primary functions: metering
and entitlement.
<ul> <li> *Entitlement*: RegisterUsage allows you to verify that the
customer running your paid software is subscribed to your product on AWS
Marketplace, enabling you to guard against unauthorized use. Your container
image that integrates with RegisterUsage is only required to guard against
unauthorized use at container startup, as such a
CustomerNotSubscribedException/PlatformNotSupportedException will only be
thrown on the initial call to RegisterUsage. Subsequent calls from the same
Amazon ECS task instance (e.g. task-id) will not throw a
CustomerNotSubscribedException, even if the customer unsubscribes while the
Amazon ECS task is still running.
</li> <li> *Metering*: RegisterUsage meters software use per ECS task, per
hour, with usage prorated to the second. A minimum of 1 minute of usage
applies to tasks that are short lived. For example, if a customer has a 10
node ECS cluster and creates an ECS service configured as a Daemon Set,
then ECS will launch a task on all 10 cluster nodes and the customer will
be charged: (10 * hourly_rate). Metering for software use is automatically
handled by the AWS Marketplace Metering Control Plane -- your software is
not required to perform any metering specific actions, other than call
RegisterUsage once for metering of software use to commence. The AWS
Marketplace Metering Control Plane will also continue to bill customers for
running ECS tasks, regardless of the customers subscription state, removing
the need for your software to perform entitlement checks at runtime.
</li> </ul>
"""
def register_usage(client, input, options \\ []) do
request(client, "RegisterUsage", input, options)
end
@doc """
ResolveCustomer is called by a SaaS application during the registration
process. When a buyer visits your website during the registration process,
the buyer submits a registration token through their browser. The
registration token is resolved through this API to obtain a
CustomerIdentifier and product code.
"""
def resolve_customer(client, input, options \\ []) do
request(client, "ResolveCustomer", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "metering.marketplace"}
host = get_host("metering.marketplace", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSMPMeteringService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/marketplace_metering.ex
| 0.812198 | 0.522263 |
marketplace_metering.ex
|
starcoder
|
defmodule Category do
@typedoc """
Functor dictionary
intuitive type: fmap : (a -> b) -> f a -> f b
* `map`: (f a, a -> b) -> f b # params are swapped to facilitate piping, mandatory
* `lift_left`: a -> f b -> f a # default implementation provided, optional
"""
@type t :: %__MODULE__{
id: any,
}
def __struct__, do: %{
__struct__: __MODULE__,
id: fn -> raise("Category: missing definition for fmap") end,
..: fn _, _ -> raise("Category: missing definition for lift_left") end,
<<<: fn _, _ -> raise("Category: missing definition for <<<") end,
>>>: fn _, _ -> raise("Category: missing definition for <<<") end,
}
def __struct__(kv) do
{map, keys} =
Enum.reduce(kv, {__struct__(), [:id, :.., :<<<, :>>>]}, fn {key, val}, {map, keys} ->
{Map.replace!(map, key, val), List.delete(keys, key)}
end)
case keys do
[] ->
map
_ ->
raise ArgumentError,
"the following keys must also be given when building " <>
"struct #{inspect(__MODULE__)}: #{inspect(keys)}"
end
end
def define(base_dict) do
base_dict = Map.new(base_dict)
id = Map.fetch!(base_dict, :id)
compose = Map.fetch!(base_dict, :..)
compose_rtl = Map.get(base_dict, :<<<, compose)
compose_ltr = Map.get(base_dict, :>>>, fn f, g -> compose.(g, f) end)
%__MODULE__{id: id, ..: compose, <<<: compose_rtl, >>>: compose_ltr}
end
# Prelude Control.Category> t1 = (+1) <<< (*2)
# Prelude Control.Category> t1 2
# 5
# Prelude Control.Category> t2 = (+1) >>> (*2)
# Prelude Control.Category> t2 2
# 6
# Prelude Control.Category> t1 = (+1) <<< (*2) <<< negate
# Prelude Control.Category> t1 = (+1) <<< (*2)
# Prelude Control.Category> t12 = (+1) <<< (*2) <<< negate
# Prelude Control.Category> t12 2
# -3
# Prelude Control.Category> t22 = (+1) >>> (*2) >>> negate
# Prelude Control.Category> t22 2
# -6
@doc """
t1 = fn x -> x + 1 end <<< fn x -> x * 2 end
t1.(2)
t2 = fn x -> x + 1 end >>> fn x -> x * 2 end
t2.(2)
t12 = fn x -> x + 1 end <<< fn x -> x * 2 end <<< fn x -> -x end
t12.(2)
t22 = fn x -> x + 1 end >>> fn x -> x * 2 end >>> fn x -> -x end
t22.(2)
"""
def dummy, do: nil
end
|
typeclassopedia/lib/category.ex
| 0.771026 | 0.464962 |
category.ex
|
starcoder
|
defmodule Quetzal.LiveView do
@moduledoc """
Quetzal Live View provides easy interface to handle events, components and
messages into Quetzal architechture in a fast and fashion way. It uses Phoenix Live View
to render components and their upgrades.
In order to use set the Quetzal.LiveView instead Phoenix.LiveView directly over your
live module:
## Example:
You don't need `mount/2` or `render/1` when using the Quetzal Live View is all done:
defmodule AppWeb.PieLive do
use Quetzal.LiveView
end
In order to render components to the live view use the `components/0` callback:
## Example:
This will render a pie graph into the view using custom components:
defmodule AppWeb.PieLive do
use Quetzal.LiveView
@impl Quetzal.LiveView
def components() do
{"MyApp", [{Quetzal.Graph.graph [id: "mypiegraph"], [type: "pie", labels: ["RED", "BLUE"], values: [20, 10]]}]}
end
end
You can use the graph and other components to extend your views with custom graphs, tables, etc.
Also is possible upgrade the components from the server live view, use `update_components/2` over your live view:
## Example:
This example generates a random number and push to live view so Quetzal updates the component in the view:
defmodule AppWeb.PieLive do
use Quetzal.LiveView
@impl Quetzal.LiveView
def components(_session) do
{"MyApp", [{Quetzal.Graph, [id: "mypie"], [type: "pie", labels: ["Red", "Blue"], values: [10, 20]]}]}
end
def trigger_update() do
:timer.sleep(5000)
newvalues = for _n <- 1..3, do: :rand.uniform(100)
components = [mypie: [labels: ["Black", "White", "Gray"], values: newvalues]]
update_components("MyApp", components)
trigger_update()
end
end
"""
defmacro __using__(opts) do
quote do
use Phoenix.LiveView
import unquote(__MODULE__)
@behaviour unquote(__MODULE__)
def render(var!(assigns)) do
~L"""
<%= Phoenix.HTML.raw @raw_components %>
"""
end
def mount(session, socket) do
{app, components} = case components(session) do
{app, components} -> {app, components}
components -> {UUID.uuid1(), components}
end
case app do
app when is_binary(app) ->
Registry.register(Quetzal.Registry, app, [])
socket = socket
|> assign(:components, components)
|> assign(:raw_components, """
#{raw_components(components)}
""")
{:ok, socket}
app ->
raise "App should be a String or binary, #{inspect app} was provided."
end
end
def handle_event(event, params, socket) do
# instead delivery to callback, send a message to callback process
# and allow delivery to custom definition implemented, if the params target is missing set
# the same as event so avoid crash from callback handler
params = case Map.get(params, "_target") == nil do
true -> params |> Map.put("_target", [event])
false -> params
end
eval = :gen_server.call(Quetzal.Callback, {:dispatch, unquote(opts), event, params}, :infinity)
socket = case eval do
{:error, _error} ->
socket
outputs when is_list(eval) ->
# change property in component so we can assign items again to
# allow live view server performs an update
components = socket.assigns[:components]
|> render_new_components(outputs)
socket
|> assign(:state, {event, params |> Map.delete("_target")})
|> assign(:components, components)
|> assign(:raw_components, """
#{raw_components(components)}
""")
_ ->
socket # there is some error thrown by callback
end
{:noreply, socket}
end
def handle_call(:state, _from, socket) do
{:reply, socket.assigns[:state], socket}
end
def handle_info({:upgrade, new_components}, socket) do
components = socket.assigns[:components]
|> render_new_components(new_components)
socket = socket
|> assign(:components, components)
|> assign(:raw_components, """
#{raw_components(components)}
""")
{:noreply, socket}
end
def handle_info(_, socket) do
{:noreply, socket}
end
defp render_new_components(components, new_components) do
components
|> Enum.map(fn {t, opts} ->
id = opts[:id] |> String.to_atom
properties = new_components |> Keyword.get(id, nil)
{t, opts} = case properties do
nil -> {t, opts}
properties -> {t, update_opts(opts, properties)}
end
with [child|_]=children <- Keyword.get(opts, :children, nil),
new_children <- render_new_components(children, new_components)
do
{t, opts |> Keyword.replace!(:children, new_children)}
else
nil -> {t, opts}
_children -> {t, opts}
end
{t, component_opts, opts} ->
id = component_opts[:id] |> String.to_atom
properties = new_components |> Keyword.get(id, nil)
case properties do
nil -> {t, component_opts, opts}
properties -> {t, component_opts, update_opts(opts, properties)}
end
end)
end
defp raw_components(components) do
# since components are passed trough a single config, mask it as html tags
components
|> Enum.map(fn {render, options} ->
options = case are_valid_component?(options) do
true -> raw_components(options)
false -> options
end
case Code.ensure_compiled?(render) do
true -> render.html_tag(options)
false -> {render, options}
end
{render, component_opts, options} -> # render graphs
render.graph(component_opts, options)
end)
end
defp update_opts(opts, properties) do
opts
|> Enum.map(fn {property, output} ->
{property, properties |> Keyword.get(property, output)}
end)
end
defp are_valid_component?([{Quetzal.Graph, _, _} | next]), do: are_valid_component?(next)
defp are_valid_component?([{_, _} | next]), do: are_valid_component?(next)
defp are_valid_component?([]), do: true
defp are_valid_component?(_) do
false
end
end
end
@callback components(session :: map) :: any()
@doc """
Updates the components sending a message to live view,
it receives components as string as in Quetzal components definition
## Example:
components = [mypiegraph: [labels: ["Black", "White", "Gray"], values: [black, white, gray]]]
update_components("MyApp", components)
"""
def update_components(app, components, pids \\ []) do
Registry.dispatch(Quetzal.Registry, app, fn entries ->
entries
|> Enum.each(fn {pid, _} ->
case pids do
[] ->
send(pid, {:upgrade, components})
pids ->
with true <- Enum.member?(pids, pid)
do
send(pid, {:upgrade, components})
else
false -> :ok
end
end
end)
end)
end
@doc """
Returns a key/value pairs for each process in registry connected to the live view socket.
It's used to update components conditionally instead of broadcasting the same for all processes.
## Example:
```
iex(1)> AppWeb.StateLiveView.state("MyApp")
[
{#PID<0.491.0>,
{"myform", %{"_target" => ["mytext"], "mytext" => "hello", "mytext2" => ""}}},
{#PID<0.513.0>,
{"myform", %{"_target" => ["mytext"], "mytext" => "hola", "mytext2" => ""}}}
]
```
In the example there are two process connected but with different states loaded so update will be
applied only for one of them.
"""
def state(app) do
Registry.lookup(Quetzal.Registry, app)
|> Enum.map(fn {pid, _value} ->
{pid, GenServer.call(pid, :state)}
end)
end
end
|
lib/quetzal_live_view.ex
| 0.880245 | 0.567098 |
quetzal_live_view.ex
|
starcoder
|
defmodule Oli.Qa do
@moduledoc """
Qa uses two tables:
1) Reviews
2) Warnings
The reviews table links a project to a high-level review "type," for example accessibility, content, or pedagogy.
Reviews are marked completed when all of the warnings for that type are finished processing and created.
Reviews are used in the UI to keep track of the progress of course reviews: some reviews may take longer to
run than others, for example if they are resolving remote URLs or doing complex parsing logic across many resources.
The warnings table links a review to specific action item a author can take to improve their project.
Each warning has a subtype which indicates what explanation and action item should be shown to the author.
Warnings are directly shown in the UI as a list of dismissable action items.
For example, the structure looks something like this:
Project ->
Many reviews of type "accessibility," "pedagogy," etc.
A review may be completed or still processing
Review ->
Many warnings of subtype "broken link," "no attached objectives," etc.
A warning usually has some content that contains the issue (like the json for the broken link).
It also maps to a description and an action item in the `ProjectView`.
"""
alias Oli.Authoring.Course
alias Oli.Qa.{Reviews}
alias Oli.Qa.Reviewers.{Accessibility, Content, Pedagogy, Equity}
def review_project(project_slug) do
Reviews.delete_reviews(Course.get_project_by_slug(project_slug).id)
# Each review must create a row in the review table for the review type
# When the review is completed (all warnings finished processing), it must be marked as done.
project_slug
|> Accessibility.review()
|> Content.review()
|> Pedagogy.review()
|> maybe_equity_review()
end
defp maybe_equity_review(project_slug) do
case Oli.Features.enabled?("equity") do
true -> Equity.review(project_slug)
false -> project_slug
end
end
end
|
lib/oli/qa.ex
| 0.760028 | 0.565839 |
qa.ex
|
starcoder
|
defmodule Jabbax.Serializer do
@moduledoc false
use Jabbax.Document
alias Jabbax.StructureError
def call(doc = %Document{}) do
doc
|> dig_and_serialize_data
|> dig_and_serialize_included
|> dig_and_serialize_meta
|> dig_and_serialize_errors
|> dig_and_serialize_links
|> struct_to_map_with_present_keys
|> put_empty_data
|> put_version
end
def call(arg) do
raise(StructureError, context: "document", expected: "Document", actual: arg)
end
defp dig_and_serialize_data(parent = %{}) do
Map.update!(parent, :data, &serialize_data/1)
end
defp serialize_data(nil), do: nil
defp serialize_data(list) when is_list(list), do: Enum.map(list, &serialize_data/1)
defp serialize_data(resource_identifier = %ResourceId{}) do
resource_identifier
|> dig_and_serialize_type
|> dig_and_serialize_id
|> dig_and_serialize_meta
|> struct_to_map_with_present_keys
end
defp serialize_data(resource_object = %Resource{}) do
resource_object
|> dig_and_serialize_type
|> dig_and_serialize_id
|> dig_and_serialize_attributes
|> dig_and_serialize_meta
|> dig_and_serialize_relationships
|> dig_and_serialize_links
|> struct_to_map_with_present_keys
end
defp serialize_data(arg) do
raise(StructureError,
context: "data",
expected: ~w{Resource ResourceId [Resource] [ResourceId]},
actual: arg
)
end
defp dig_and_serialize_included(parent = %{}) do
Map.update!(parent, :included, &serialize_included/1)
end
defp serialize_included([]), do: nil
defp serialize_included(list) when is_list(list), do: serialize_data(list)
defp serialize_included(arg) do
raise(StructureError, context: "included", expected: "[Resource]", actual: arg)
end
defp dig_and_serialize_relationships(parent = %{}) do
Map.update!(parent, :relationships, &serialize_relationships/1)
end
defp serialize_relationships(nil), do: nil
defp serialize_relationships(relationship_map) when relationship_map == %{}, do: nil
defp serialize_relationships(relationship_map = %{}) do
relationship_map
|> Enum.map(&serialize_relationship_pair/1)
|> Enum.into(%{})
end
defp serialize_relationships(arg) do
raise(StructureError, context: "relationships", expected: "%{key => Relationship}", actual: arg)
end
defp serialize_relationship_pair({name, relationship}) do
{serialize_key(name), serialize_relationship(relationship)}
end
defp serialize_relationship(relationship = %Relationship{}) do
relationship
|> dig_and_serialize_data
|> dig_and_serialize_meta
|> dig_and_serialize_links
|> struct_to_map_with_present_keys
|> put_empty_data
end
defp serialize_relationship(data) when is_list(data) or is_map(data) do
%{
data: serialize_data(data)
}
end
defp serialize_relationship(arg) do
raise(StructureError, context: "relationship", expected: "Relationship", actual: arg)
end
defp dig_and_serialize_links(parent = %{}) do
Map.update!(parent, :links, &serialize_links/1)
end
defp serialize_links(nil), do: nil
defp serialize_links(link_map) when link_map == %{}, do: nil
defp serialize_links(link_map = %{}) do
link_map
|> Enum.map(&serialize_link_pair/1)
|> Enum.into(%{})
end
defp serialize_links(arg) do
raise(StructureError, context: "links", expected: "%{key => Link}", actual: arg)
end
defp serialize_link_pair({name, link}) do
{serialize_key(name), serialize_link(link)}
end
defp serialize_link(href) when is_binary(href), do: href
defp serialize_link(%{href: href, meta: nil} = %Link{}) when is_binary(href), do: href
defp serialize_link(%{href: href, meta: meta} = %Link{}) when is_binary(href) and meta == %{} do
href
end
defp serialize_link(link = %{href: href} = %Link{}) when is_binary(href) do
link
|> dig_and_serialize_meta
|> struct_to_map_with_present_keys
end
defp serialize_link(%Link{href: arg}) do
raise(StructureError, context: "Link.href", expected: "binary", actual: arg)
end
defp serialize_link(arg) do
raise(StructureError, context: "link", expected: "Link", actual: arg)
end
defp dig_and_serialize_errors(parent = %{}), do: Map.update!(parent, :errors, &serialize_errors/1)
defp serialize_errors(nil), do: nil
defp serialize_errors([]), do: nil
defp serialize_errors(error_list) when is_list(error_list) do
Enum.map(error_list, &serialize_error/1)
end
defp serialize_errors(arg) do
raise(StructureError, context: "errors", expected: "[Error]", actual: arg)
end
defp serialize_error(error = %Error{}) do
error
|> dig_and_serialize_meta
|> dig_and_serialize_key(:code)
|> dig_and_serialize_error_source
|> dig_and_serialize_links
|> struct_to_map_with_present_keys
end
defp dig_and_serialize_error_source(parent = %{}) do
Map.update!(parent, :source, &serialize_error_source/1)
end
defp serialize_error_source(nil), do: nil
defp serialize_error_source(error_source = %ErrorSource{}) do
error_source
|> dig_and_serialize_key(:parameter)
|> dig_and_serialize_key(:pointer)
|> struct_to_map_with_present_keys
end
defp serialize_error_source(arg) do
raise(StructureError, context: "error source", expected: "ErrorSource", actual: arg)
end
defp dig_and_serialize_attributes(parent = %{}) do
Map.update!(parent, :attributes, &serialize_key_values/1)
end
defp dig_and_serialize_meta(parent = %{}) do
Map.update!(parent, :meta, &serialize_key_values/1)
end
defp dig_and_serialize_type(parent = %{}), do: dig_and_serialize_key(parent, :type)
defp dig_and_serialize_id(parent = %{}), do: Map.update!(parent, :id, &serialize_id/1)
defp serialize_id(id) when is_integer(id), do: Integer.to_string(id)
defp serialize_id(id) when is_binary(id), do: id
defp serialize_id(arg) do
raise(StructureError, context: "id", expected: ~w{binary integer}, actual: arg)
end
defp dig_and_serialize_key(parent = %{}, key), do: Map.update!(parent, key, &serialize_key/1)
defp serialize_key(nil), do: nil
defp serialize_key(key) when is_atom(key), do: key |> Atom.to_string() |> serialize_key
defp serialize_key(key) when is_binary(key), do: String.replace(key, "_", "-")
defp serialize_key(arg) do
raise(StructureError, context: "key", expected: ~w{binary atom}, actual: arg)
end
defp serialize_key_values(nil), do: nil
defp serialize_key_values(key_values_map) when key_values_map == %{}, do: nil
defp serialize_key_values(key_values_map = %{}) do
key_values_map
|> Enum.map(&serialize_key_value_pair/1)
|> Enum.into(%{})
end
defp serialize_key_values(arg) do
raise(StructureError, context: "attributes/meta", expected: "%{key => value}", actual: arg)
end
defp serialize_key_value_pair({name, value}) do
{serialize_key(name), serialize_value(value)}
end
defp serialize_value(key_values_map = %{}), do: serialize_key_values(key_values_map)
defp serialize_value(value_list) when is_list(value_list) do
Enum.map(value_list, &serialize_value/1)
end
defp serialize_value(value), do: value
defp struct_to_map_with_present_keys(struct = %{}) do
struct
|> Map.from_struct()
|> Enum.filter(fn {_, value} -> value != nil end)
|> Enum.into(%{})
end
defp put_empty_data(doc = %{data: _}), do: doc
defp put_empty_data(doc = %{meta: _}), do: doc
defp put_empty_data(doc = %{errors: _}), do: doc
defp put_empty_data(doc = %{}), do: Map.put(doc, :data, nil)
defp put_version(doc = %{jsonapi: _}), do: doc
defp put_version(doc = %{}), do: Map.put(doc, :jsonapi, %{version: "1.0"})
end
|
lib/jabbax/serializer.ex
| 0.686895 | 0.468851 |
serializer.ex
|
starcoder
|
defmodule DarkMatter.Modules do
@moduledoc """
Utils for working with modules.
"""
@moduledoc since: "1.0.5"
import DarkMatter.Guards, only: [is_module: 1]
import DarkMatter.Mfas, only: [is_mfa: 1]
alias DarkMatter.Structs
@type path() :: String.t()
@doc """
Determine if a given module contains a `defstruct` definition.
## Examples
iex> defines_struct?(IO.Stream)
true
iex> defines_struct?(IO)
false
iex> defines_struct?(nil)
** (FunctionClauseError) no function clause matching in DarkMatter.Modules.defines_struct?/1
"""
@spec defines_struct?(module()) :: boolean()
def defines_struct?(module) when is_module(module) do
exports?({module, :__struct__, 0}) and exports?({module, :__struct__, 1})
end
@doc """
Determine if a given module contains a `defstruct` definition.
## Examples
iex> exports?({Kernel, :+, 2})
true
iex> exports?({Kernel, :*, 0})
false
iex> exports?({Kernel, :/, -100})
** (FunctionClauseError) no function clause matching in DarkMatter.Modules.exports?/1
iex> exports?(nil)
** (FunctionClauseError) no function clause matching in DarkMatter.Modules.exports?/1
"""
@spec exports?(mfa()) :: boolean()
def exports?({module, fun, arity} = mfa) when is_mfa(mfa) do
if Code.ensure_loaded?(module) do
Kernel.function_exported?(module, fun, arity)
else
{fun, arity} in module.__info__(:functions)
end
end
@doc """
Determine keys for a given `module` or raises `ArgumentError`.
## Examples
iex> struct_keys!(IO.Stream)
[:device, :line_or_bytes, :raw]
iex> struct_keys!(IO)
** (ArgumentError) Expected `defstruct` definition for: `IO`
iex> struct_keys!(nil)
** (FunctionClauseError) no function clause matching in DarkMatter.Modules.struct_keys!/1
"""
@spec struct_keys!(module()) :: [atom()]
def struct_keys!(module) when is_module(module) do
unless defines_struct?(module) do
raise ArgumentError,
message: "Expected `defstruct` definition for: `#{inspect(module)}`"
end
Structs.keys(module.__struct__())
end
@doc """
Loads all accessible modules, and returns list of loaded modules
"""
@spec load_all() :: [path()]
def load_all do
Enum.flat_map(:code.get_path(), &load_dir/1)
end
@doc """
Loads all accessible modules, and returns list of loaded modules from a given `dir`.
"""
@spec load_dir(path()) :: [path()]
def load_dir(dir) when is_binary(dir) do
dir
|> File.ls!()
|> Stream.filter(&beam_file?/1)
|> Stream.map(&beam_to_module/1)
|> Enum.map(fn module ->
:ok = load_path(module, "#{dir}/#{module}")
module
end)
end
@doc """
Determine if a given `path` is a beam file.
"""
@spec beam_file?(path()) :: boolean()
def beam_file?(path) when is_binary(path) do
String.ends_with?(path, ".beam")
end
@doc """
Turn a given `path` into its `module`.
"""
@spec beam_to_module(path()) :: module()
def beam_to_module(path) when is_binary(path) do
~r/(\.beam)$/
|> Regex.replace(path, fn _binary, _capture -> "" end)
|> String.to_atom()
end
@doc """
Loads all accessible modules, and returns list of loaded modules
"""
@spec load_path(module(), path()) ::
:ok
| {:error, :badfile}
| {:error, :nofile}
| {:error, :not_purged}
| {:error, :on_load_failure}
| {:error, :sticky_directory}
| {:error, {:module_mismatch, module()}}
def load_path(module, path) when is_module(module) and is_binary(path) do
case do_load_path(module, path) do
{:module, ^module} -> :ok
{:module, module} -> {:error, {:module_mismatch, module}}
{:error, error} -> {:error, error}
end
end
defp do_load_path(module, path) when is_module(module) and is_binary(path) do
if Code.ensure_loaded?(module) do
{:module, module}
else
path
|> String.to_charlist()
|> :code.load_abs()
end
end
end
|
lib/dark_matter/modules.ex
| 0.888414 | 0.455744 |
modules.ex
|
starcoder
|
defmodule Adventofcode.Day07RecursiveCircus do
defstruct name: nil, weight: nil, children: [], parent: nil, total_weight: 0
def bottom_program(input) do
input
|> parse()
|> start_programs()
|> connect_programs()
|> find_bottom_and_stop_programs()
end
def unbalanced_weight(input) do
input
|> parse()
|> start_programs()
|> connect_programs()
|> find_unbalanced_weight_and_stop_programs()
end
defp start_programs(programs) do
Enum.reduce(programs, %{}, fn {program_name, program_state}, acc ->
{:ok, pid} = Agent.start_link(fn -> program_state end)
Map.put(acc, program_name, pid)
end)
end
defp connect_programs(programs) do
programs
|> Map.values()
|> Enum.map(&set_children_pids(&1, programs))
|> Enum.map(&set_parent_pids(&1, programs))
end
defp set_children_pids(pid, programs) do
Agent.update(pid, fn program ->
children =
Enum.reduce(program.children, %{}, fn child, acc ->
Map.put(acc, child, Map.get(programs, child))
end)
%{program | children: children}
end)
pid
end
defp set_parent_pids(pid, programs) do
Agent.update(pid, fn program ->
parent =
programs
|> Map.values()
|> Enum.reject(&(&1 == pid))
|> Enum.find(&parent_of(&1, pid))
%{program | parent: parent}
end)
pid
end
defp parent_of(program_pid, target_pid) do
Agent.get(program_pid, &(target_pid in Map.values(&1.children)))
end
defp find_bottom_and_stop_programs([pid | _] = programs) do
program = find_bottom(pid)
Enum.each(programs, &Agent.stop/1)
program.name
end
defp find_unbalanced_weight_and_stop_programs([pid | _] = programs) do
bottom_program = find_bottom(pid)
weight = find_unbalanced_program(bottom_program)
Enum.each(programs, &Agent.stop/1)
weight
end
defp find_unbalanced_program(program, expected_weight \\ nil) do
case grouped_weights(program.children) do
[{_unbalanced_weight, [child]}, {expected_weight, _}] ->
find_unbalanced_program(child, expected_weight)
[{_balanced_weight, _children}] ->
expected_weight - program.total_weight + program.weight
end
end
def grouped_weights([]), do: nil
def grouped_weights(children) do
children
|> Map.values()
|> Enum.map(&Agent.get(&1, fn p -> p end))
|> Enum.map(&%{&1 | total_weight: get_total_weight(&1)})
|> Enum.group_by(& &1.total_weight)
|> Enum.sort_by(fn {_, programs} -> length(programs) end)
end
def get_total_weight(program) do
children_weight =
program.children
|> Map.values()
|> Enum.map(&Agent.get(&1, fn p -> p end))
|> Enum.map(&get_total_weight/1)
|> Enum.sum()
program.weight + children_weight
end
defp find_bottom(pid) do
program = Agent.get(pid, & &1)
case program do
%{parent: nil} -> program
%{parent: pid} -> find_bottom(pid)
end
end
defp parse(input) do
input
|> String.trim_trailing()
|> String.split("\n")
|> Enum.map(&parse_line/1)
|> Enum.map(&{&1.name, &1})
|> Enum.into(%{})
end
defp parse_line(line) do
~r/(?<name>\w+)\s+\((?<weight>\w+)\)(?:\s+\-\>\s+(?<children>[\w,\s]+))?/
|> Regex.named_captures(line)
|> build_struct()
end
defp parse_children(""), do: []
defp parse_children(data), do: String.split(data, ", ")
defp build_struct(%{"name" => name, "weight" => weight, "children" => children}) do
weight = String.to_integer(weight)
%__MODULE__{name: name, weight: weight, children: parse_children(children)}
end
end
|
lib/day_07_recursive_circus.ex
| 0.630116 | 0.462048 |
day_07_recursive_circus.ex
|
starcoder
|
defmodule Roadtrip.Garage.Measurement do
@moduledoc """
Describes an odometer measurement for a vehicle.
Measurements are always odometer/timestamp pairs. They may be extended with
additional information in the future.
"""
use Roadtrip.Schema
import Ecto.Changeset
alias Roadtrip.Garage.{Vehicle, Refuel}
schema "measurements" do
field :odometer, :decimal
field :moment, :utc_datetime
# Refueling is embedded here, but will mostly be nil.
field :price, :decimal
field :volume, :decimal
belongs_to :vehicle, Vehicle
# These are used to manage the form presented by the web interface.
# TODO(myrrlyn): Migrate them strictly into RoadtripWeb.
field :tz, :string, virtual: true
field :use_refuel, :boolean, virtual: true
timestamps()
end
@doc false
def changeset(measurement, attrs) do
measurement
|> cast(attrs, [:odometer, :moment, :vehicle_id])
|> cast(attrs, [:price, :volume])
|> cast(attrs, [:tz, :use_refuel])
|> validate_required([:odometer, :moment, :vehicle_id])
|> validate_inclusion(:tz, Tzdata.zone_list(),
message: "This is not a known timezone identifier"
)
|> update_change(:moment, fn moment ->
try do
moment
|> DateTime.to_naive()
|> DateTime.from_naive!(attrs["tz"])
|> DateTime.shift_zone!("Etc/UTC")
rescue
_ -> moment
end
end)
end
@doc """
Gets the `Refuel` structure associated with a `Measurement`.
`Refuel` is a logical structure that covers the two database columns `:price`
and `:volume`. You may access these columns directly for numeric queries, but
should generally prefer to work with `Refuel` structures for most rendering
work.
"""
@spec refuel(%__MODULE__{}) :: %Refuel{}
def refuel(%__MODULE__{price: p, volume: v}) do
changes = %Refuel{} |> Refuel.changeset(%{price: p, volume: v})
if changes.valid? do
changes |> apply_changes()
else
nil
end
end
@doc """
Renders the timestamp value in a relatively human-friendly manner.
"""
def show_moment(%__MODULE__{moment: moment}),
do: moment |> Timex.format!("{YYYY} {Mshort} {0D} at {h24}:{m}")
@doc """
Renders the odometer value with a delimiter every three digits
"""
def show_odometer(%__MODULE__{odometer: odo}),
do: odo |> Number.Delimit.number_to_delimited(precision: 1, delimiter: ",")
@doc """
Parses a row from a CSV batch-upload file.
The CSV file **must** have the following columns:
- `Date`: A datetime formatted as `YYYY-MM-DD hh:mm`. It is assumed to be UTC.
- `Odometer`: An integer. Tenths are currently not supported.
The CSV file *may* have the following columns:
- `Price`: a decimal number. It may have a currency symbol; all non-number
characters are removed before parsing.
- `Volume`: a decimal number.
"""
@spec parse_csv_row(%{String.t() => String.t()}) :: Ecto.Changeset.t() | nil
def parse_csv_row(%{
"Date" => date,
"Odometer" => odometer,
"Price" => price,
"Volume" => volume
}) do
moment =
case date |> Timex.parse("{YYYY}-{0M}-{0D} {h24}:{m}") do
{:ok, moment} -> moment
{:error, _} -> raise ArgumentError, "Failed to parse #{date}"
end
|> DateTime.from_naive!("Etc/UTC")
price = Regex.replace(~r/[^0-9\.]*/, price, "")
cs =
%__MODULE__{}
|> cast(%{odometer: odometer, moment: moment, price: price, volume: volume}, [
:odometer,
:moment,
:price,
:volume
])
if cs.valid? do
cs.changes
else
nil
end
end
end
|
apps/roadtrip/lib/roadtrip/garage/measurement.ex
| 0.626581 | 0.558929 |
measurement.ex
|
starcoder
|
defmodule Mix.Tasks.Run do
use Mix.Task
@shortdoc "Starts and runs the current application"
@moduledoc """
Starts the current application and runs code.
`mix run` can be used to start the current application dependencies,
the application itself, and optionally run some code in its context.
For long running systems, this is typically done with the `--no-halt`
option:
mix run --no-halt
Once the current application and its dependencies have been started,
you can run a script in its context by passing a filename:
mix run my_app_script.exs arg1 arg2 arg3
Code to be executed can also be passed inline with the `-e` option:
mix run -e "DbUtils.delete_old_records()" -- arg1 arg2 arg3
In both cases, the command-line arguments for the script or expression
are available in `System.argv/0`.
Before doing anything, Mix will compile the current application if
needed, unless you pass `--no-compile`.
If for some reason the application needs to be configured before it is
started, the `--no-start` option can be used and you are then responsible
for starting all applications by using functions such as
`Application.ensure_all_started/1`. For more information about the
application life-cycle and dynamically configuring applications, see
the `Application` module.
If you need to pass options to the Elixir executable at the same time
you use `mix run`, it can be done as follows:
elixir --sname hello -S mix run --no-halt
This task is automatically reenabled, so it can be called multiple times
with different arguments.
## Command-line options
* `--config`, `-c` - loads the given configuration file
* `--eval`, `-e` - evaluates the given code
* `--require`, `-r` - executes the given pattern/file
* `--parallel`, `-p` - makes all requires parallel
* `--preload-modules` - preloads all modules defined in applications
* `--no-compile` - does not compile even if files require compilation
* `--no-deps-check` - does not check dependencies
* `--no-archives-check` - does not check archives
* `--no-halt` - does not halt the system after running the command
* `--no-mix-exs` - allows the command to run even if there is no mix.exs
* `--no-start` - does not start applications after compilation
* `--no-elixir-version-check` - does not check the Elixir version from mix.exs
"""
@impl true
def run(args) do
{opts, head} =
OptionParser.parse_head!(
args,
aliases: [r: :require, p: :parallel, e: :eval, c: :config],
strict: [
parallel: :boolean,
require: :keep,
eval: :keep,
config: :keep,
mix_exs: :boolean,
halt: :boolean,
compile: :boolean,
deps_check: :boolean,
start: :boolean,
archives_check: :boolean,
elixir_version_check: :boolean,
parallel_require: :keep,
preload_modules: :boolean
]
)
run(args, opts, head, &Code.eval_string/1, &Code.require_file/1)
unless Keyword.get(opts, :halt, true), do: Process.sleep(:infinity)
Mix.Task.reenable("run")
:ok
end
@doc false
@spec run(
OptionParser.argv(),
keyword,
OptionParser.argv(),
(String.t() -> term()),
(String.t() -> term())
) :: :ok
def run(args, opts, head, expr_evaluator, file_evaluator) do
# TODO: Remove on v2.0
opts =
Enum.flat_map(opts, fn
{:parallel_require, value} ->
IO.warn(
"the --parallel-require option is deprecated in favour of using " <>
"--parallel to make all requires parallel and --require VAL for requiring"
)
[require: value, parallel: true]
opt ->
[opt]
end)
{file, argv} =
case {Keyword.has_key?(opts, :eval), head} do
{true, _} -> {nil, head}
{_, [head | tail]} -> {head, tail}
{_, []} -> {nil, []}
end
System.argv(argv)
process_config(opts)
# Start app after rewriting System.argv,
# but before requiring and evaling.
cond do
Mix.Project.get() ->
Mix.Task.run("app.start", args)
"--no-mix-exs" in args ->
:ok
true ->
Mix.raise(
"Cannot execute \"mix run\" without a Mix.Project, " <>
"please ensure you are running Mix in a directory with a mix.exs file " <>
"or pass the --no-mix-exs option"
)
end
process_load(opts, expr_evaluator)
if file do
if File.regular?(file) do
file_evaluator.(file)
else
Mix.raise("No such file: #{file}")
end
end
:ok
end
defp process_config(opts) do
Enum.each(opts, fn
{:config, value} ->
Mix.Task.run("loadconfig", [value])
_ ->
:ok
end)
end
defp process_load(opts, expr_evaluator) do
require_runner =
if opts[:parallel] do
fn files ->
case Kernel.ParallelCompiler.require(files) do
{:ok, _, _} -> :ok
{:error, _, _} -> exit({:shutdown, 1})
end
end
else
fn files -> Enum.each(files, &Code.require_file/1) end
end
Enum.each(opts, fn
{:require, value} ->
case filter_patterns(value) do
[] ->
Mix.raise("No files matched pattern #{inspect(value)} given to --require")
filtered ->
require_runner.(filtered)
end
{:eval, value} ->
expr_evaluator.(value)
_ ->
:ok
end)
end
defp filter_patterns(pattern) do
Enum.filter(Enum.uniq(Path.wildcard(pattern)), &File.regular?(&1))
end
end
|
lib/mix/lib/mix/tasks/run.ex
| 0.75505 | 0.406656 |
run.ex
|
starcoder
|
defmodule Polyglot.Interpreter do
alias Polyglot.Parser
import Polyglot.Plural
def interpret(lang, str, args \\ %{}) do
{:ok, ast} = Parser.parse(str)
interpret_ast(ast, %{lang: lang, printer: nil}, args)
end
def interpret_ast({:select, arg, m}, env, args) do
v = Map.get(args, arg)
case Map.get(m, v) do
nil ->
case Map.get(m, "other") do
nil ->
["{Unknown SELECT option `", to_string(v), "` with arg `", arg, "`}"]
node ->
interpret_ast(node, %{env | printer: v}, args)
end
node ->
interpret_ast(node, %{env | printer: v}, args)
end
end
def interpret_ast({:plural, arg, m}, env, args) do
v = Map.get(args, arg)
p = pluralise(env.lang, :cardinal, v)
case Map.get(m, "=#{v}") do
nil ->
case Map.get(m, p) do
nil ->
["{Uncovered PLURAL result `", p, "` from `", v, "`}"]
node ->
interpret_ast(node, %{env | printer: to_string(v)}, args)
end
node ->
interpret_ast(node, %{env | printer: to_string(v)}, args)
end
end
def interpret_ast({:ordinal, arg, m}, env, args) do
v = Map.get(args, arg)
p = pluralise(env.lang, :ordinal, v)
case Map.get(m, "=#{v}") do
nil ->
case Map.get(m, p) do
nil ->
["{Uncovered ORDINAL result `", p, "` from `", v, "`}"]
node ->
interpret_ast(node, %{env | printer: to_string(v)}, args)
end
node ->
interpret_ast(node, %{env | printer: to_string(v)}, args)
end
end
def interpret_ast({:range, arg, m}, env, args) do
v = Map.get(args, arg)
formatted_range = Polyglot.Compiler.format_range(v)
p = pluralise(env.lang, :range, v)
case Map.get(m, p) do
nil ->
["{Uncovered RANGE result `", p, "` from `", formatted_range, "`}"]
node ->
interpret_ast(node, %{env | printer: formatted_range}, args)
end
end
def interpret_ast(tokens, env, args) when is_list(tokens) do
for token <- tokens, do: interpret_ast(token, env, args)
end
def interpret_ast({:variable, var_name}, _env, args) do
to_string Map.get(args, var_name)
end
def interpret_ast(:hash, env, _args) do
if Map.has_key?(env, :printer), do: env.printer, else: "#"
end
def interpret_ast(:comma, _env, _args), do: ","
def interpret_ast(s, _env, _args) when is_bitstring(s) do
s
end
end
|
lib/polyglot/interpreter.ex
| 0.513425 | 0.465813 |
interpreter.ex
|
starcoder
|
defmodule AWS.EFS do
@moduledoc """
Amazon Elastic File System
Amazon Elastic File System (Amazon EFS) provides simple, scalable file storage
for use with Amazon EC2 Linux and Mac instances in the Amazon Web Services
Cloud.
With Amazon EFS, storage capacity is elastic, growing and shrinking
automatically as you add and remove files, so that your applications have the
storage they need, when they need it. For more information, see the [Amazon Elastic File System API
Reference](https://docs.aws.amazon.com/efs/latest/ug/api-reference.html) and the
[Amazon Elastic File System User Guide](https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2015-02-01",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "elasticfilesystem",
global?: false,
protocol: "rest-json",
service_id: "EFS",
signature_version: "v4",
signing_name: "elasticfilesystem",
target_prefix: nil
}
end
@doc """
Creates an EFS access point.
An access point is an application-specific view into an EFS file system that
applies an operating system user and group, and a file system path, to any file
system request made through the access point. The operating system user and
group override any identity information provided by the NFS client. The file
system path is exposed as the access point's root directory. Applications using
the access point can only access data in the application's own directory and any
subdirectories. To learn more, see [Mounting a file system using EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html).
This operation requires permissions for the
`elasticfilesystem:CreateAccessPoint` action.
"""
def create_access_point(%Client{} = client, input, options \\ []) do
url_path = "/2015-02-01/access-points"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a new, empty file system.
The operation requires a creation token in the request that Amazon EFS uses to
ensure idempotent creation (calling the operation with same creation token has
no effect). If a file system does not currently exist that is owned by the
caller's Amazon Web Services account with the specified creation token, this
operation does the following:
* Creates a new, empty file system. The file system will have an
Amazon EFS assigned ID, and an initial lifecycle state `creating`.
* Returns with the description of the created file system.
Otherwise, this operation returns a `FileSystemAlreadyExists` error with the ID
of the existing file system.
For basic use cases, you can use a randomly generated UUID for the creation
token.
The idempotent operation allows you to retry a `CreateFileSystem` call without
risk of creating an extra file system. This can happen when an initial call
fails in a way that leaves it uncertain whether or not a file system was
actually created. An example might be that a transport level timeout occurred or
your connection was reset. As long as you use the same creation token, if the
initial call had succeeded in creating a file system, the client can learn of
its existence from the `FileSystemAlreadyExists` error.
For more information, see [Creating a file system](https://docs.aws.amazon.com/efs/latest/ug/creating-using-create-fs.html#creating-using-create-fs-part1)
in the *Amazon EFS User Guide*.
The `CreateFileSystem` call returns while the file system's lifecycle state is
still `creating`. You can check the file system creation status by calling the
`DescribeFileSystems` operation, which among other things returns the file
system state.
This operation accepts an optional `PerformanceMode` parameter that you choose
for your file system. We recommend `generalPurpose` performance mode for most
file systems. File systems using the `maxIO` performance mode can scale to
higher levels of aggregate throughput and operations per second with a tradeoff
of slightly higher latencies for most file operations. The performance mode
can't be changed after the file system has been created. For more information,
see [Amazon EFS performance modes](https://docs.aws.amazon.com/efs/latest/ug/performance.html#performancemodes.html).
You can set the throughput mode for the file system using the `ThroughputMode`
parameter.
After the file system is fully created, Amazon EFS sets its lifecycle state to
`available`, at which point you can create one or more mount targets for the
file system in your VPC. For more information, see `CreateMountTarget`. You
mount your Amazon EFS file system on an EC2 instances in your VPC by using the
mount target. For more information, see [Amazon EFS: How it Works](https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html).
This operation requires permissions for the `elasticfilesystem:CreateFileSystem`
action.
"""
def create_file_system(%Client{} = client, input, options \\ []) do
url_path = "/2015-02-01/file-systems"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Creates a mount target for a file system.
You can then mount the file system on EC2 instances by using the mount target.
You can create one mount target in each Availability Zone in your VPC. All EC2
instances in a VPC within a given Availability Zone share a single mount target
for a given file system. If you have multiple subnets in an Availability Zone,
you create a mount target in one of the subnets. EC2 instances do not need to be
in the same subnet as the mount target in order to access their file system.
You can create only one mount target for an EFS file system using One Zone
storage classes. You must create that mount target in the same Availability Zone
in which the file system is located. Use the `AvailabilityZoneName` and
`AvailabiltyZoneId` properties in the `DescribeFileSystems` response object to
get this information. Use the `subnetId` associated with the file system's
Availability Zone when creating the mount target.
For more information, see [Amazon EFS: How it Works](https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html).
To create a mount target for a file system, the file system's lifecycle state
must be `available`. For more information, see `DescribeFileSystems`.
In the request, provide the following:
* The file system ID for which you are creating the mount target.
* A subnet ID, which determines the following:
* The VPC in which Amazon EFS creates the mount target
* The Availability Zone in which Amazon EFS creates the
mount target
* The IP address range from which Amazon EFS selects the
IP address of the mount target (if you don't specify an IP address in the
request)
After creating the mount target, Amazon EFS returns a response that includes, a
`MountTargetId` and an `IpAddress`. You use this IP address when mounting the
file system in an EC2 instance. You can also use the mount target's DNS name
when mounting the file system. The EC2 instance on which you mount the file
system by using the mount target can resolve the mount target's DNS name to its
IP address. For more information, see [How it Works: Implementation Overview](https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html#how-it-works-implementation).
Note that you can create mount targets for a file system in only one VPC, and
there can be only one mount target per Availability Zone. That is, if the file
system already has one or more mount targets created for it, the subnet
specified in the request to add another mount target must meet the following
requirements:
* Must belong to the same VPC as the subnets of the existing mount
targets
* Must not be in the same Availability Zone as any of the subnets of
the existing mount targets
If the request satisfies the requirements, Amazon EFS does the following:
* Creates a new mount target in the specified subnet.
* Also creates a new network interface in the subnet as follows:
* If the request provides an `IpAddress`, Amazon EFS
assigns that IP address to the network interface. Otherwise, Amazon EFS assigns
a free address in the subnet (in the same way that the Amazon EC2
`CreateNetworkInterface` call does when a request does not specify a primary
private IP address).
* If the request provides `SecurityGroups`, this network
interface is associated with those security groups. Otherwise, it belongs to the
default security group for the subnet's VPC.
* Assigns the description `Mount target *fsmt-id* for
file system *fs-id* ` where ` *fsmt-id* ` is the mount target ID, and ` *fs-id*
` is the `FileSystemId`.
* Sets the `requesterManaged` property of the network
interface to `true`, and the `requesterId` value to `EFS`.
Each Amazon EFS mount target has one corresponding requester-managed EC2 network
interface. After the network interface is created, Amazon EFS sets the
`NetworkInterfaceId` field in the mount target's description to the network
interface ID, and the `IpAddress` field to its address. If network interface
creation fails, the entire `CreateMountTarget` operation fails.
The `CreateMountTarget` call returns only after creating the network interface,
but while the mount target state is still `creating`, you can check the mount
target creation status by calling the `DescribeMountTargets` operation, which
among other things returns the mount target state.
We recommend that you create a mount target in each of the Availability Zones.
There are cost considerations for using a file system in an Availability Zone
through a mount target created in another Availability Zone. For more
information, see [Amazon EFS](http://aws.amazon.com/efs/). In addition, by
always using a mount target local to the instance's Availability Zone, you
eliminate a partial failure scenario. If the Availability Zone in which your
mount target is created goes down, then you can't access your file system
through that mount target.
This operation requires permissions for the following action on the file system:
* `elasticfilesystem:CreateMountTarget`
This operation also requires permissions for the following Amazon EC2 actions:
* `ec2:DescribeSubnets`
* `ec2:DescribeNetworkInterfaces`
* `ec2:CreateNetworkInterface`
"""
def create_mount_target(%Client{} = client, input, options \\ []) do
url_path = "/2015-02-01/mount-targets"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a replication configuration that replicates an existing EFS file system
to a new, read-only file system.
For more information, see [Amazon EFS replication](https://docs.aws.amazon.com/efs/latest/ug/efs-replication.html) in
the *Amazon EFS User Guide*. The replication configuration specifies the
following:
* **Source file system** - An existing EFS file system that you want
replicated. The source file system cannot be a destination file system in an
existing replication configuration.
* **Destination file system configuration** - The configuration of
the destination file system to which the source file system will be replicated.
There can only be one destination file system in a replication configuration.
The destination file system configuration consists of the following properties:
* **Amazon Web Services Region** - The Amazon Web
Services Region in which the destination file system is created. Amazon EFS
replication is available in all Amazon Web Services Regions that Amazon EFS is
available in, except Africa (Cape Town), Asia Pacific (Hong Kong), Asia Pacific
(Jakarta), Europe (Milan), and Middle East (Bahrain).
* **Availability Zone** - If you want the destination
file system to use EFS One Zone availability and durability, you must specify
the Availability Zone to create the file system in. For more information about
EFS storage classes, see [ Amazon EFS storage classes](https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) in the
*Amazon EFS User Guide*.
* **Encryption** - All destination file systems are
created with encryption at rest enabled. You can specify the Key Management
Service (KMS) key that is used to encrypt the destination file system. If you
don't specify a KMS key, your service-managed KMS key for Amazon EFS is used.
After the file system is created, you cannot change the KMS key.
The following properties are set by default:
* **Performance mode** - The destination file system's performance
mode matches that of the source file system, unless the destination file system
uses EFS One Zone storage. In that case, the General Purpose performance mode is
used. The performance mode cannot be changed.
* **Throughput mode** - The destination file system uses the
Bursting Throughput mode by default. After the file system is created, you can
modify the throughput mode.
The following properties are turned off by default:
* **Lifecycle management** - EFS lifecycle management and EFS
Intelligent-Tiering are not enabled on the destination file system. After the
destination file system is created, you can enable EFS lifecycle management and
EFS Intelligent-Tiering.
* **Automatic backups** - Automatic daily backups not enabled on the
destination file system. After the file system is created, you can change this
setting.
For more information, see [Amazon EFS replication](https://docs.aws.amazon.com/efs/latest/ug/efs-replication.html) in
the *Amazon EFS User Guide*.
"""
def create_replication_configuration(
%Client{} = client,
source_file_system_id,
input,
options \\ []
) do
url_path =
"/2015-02-01/file-systems/#{AWS.Util.encode_uri(source_file_system_id)}/replication-configuration"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
DEPRECATED - `CreateTags` is deprecated and not maintained.
To create tags for EFS resources, use the API action.
Creates or overwrites tags associated with a file system. Each tag is a
key-value pair. If a tag key specified in the request already exists on the file
system, this operation overwrites its value with the value provided in the
request. If you add the `Name` tag to your file system, Amazon EFS returns it in
the response to the `DescribeFileSystems` operation.
This operation requires permission for the `elasticfilesystem:CreateTags`
action.
"""
def create_tags(%Client{} = client, file_system_id, input, options \\ []) do
url_path = "/2015-02-01/create-tags/#{AWS.Util.encode_uri(file_system_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Deletes the specified access point.
After deletion is complete, new clients can no longer connect to the access
points. Clients connected to the access point at the time of deletion will
continue to function until they terminate their connection.
This operation requires permissions for the
`elasticfilesystem:DeleteAccessPoint` action.
"""
def delete_access_point(%Client{} = client, access_point_id, input, options \\ []) do
url_path = "/2015-02-01/access-points/#{AWS.Util.encode_uri(access_point_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Deletes a file system, permanently severing access to its contents.
Upon return, the file system no longer exists and you can't access any contents
of the deleted file system.
You need to manually delete mount targets attached to a file system before you
can delete an EFS file system. This step is performed for you when you use the
Amazon Web Services console to delete a file system.
You cannot delete a file system that is part of an EFS Replication
configuration. You need to delete the replication configuration first.
You can't delete a file system that is in use. That is, if the file system has
any mount targets, you must first delete them. For more information, see
`DescribeMountTargets` and `DeleteMountTarget`.
The `DeleteFileSystem` call returns while the file system state is still
`deleting`. You can check the file system deletion status by calling the
`DescribeFileSystems` operation, which returns a list of file systems in your
account. If you pass file system ID or creation token for the deleted file
system, the `DescribeFileSystems` returns a `404 FileSystemNotFound` error.
This operation requires permissions for the `elasticfilesystem:DeleteFileSystem`
action.
"""
def delete_file_system(%Client{} = client, file_system_id, input, options \\ []) do
url_path = "/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Deletes the `FileSystemPolicy` for the specified file system.
The default `FileSystemPolicy` goes into effect once the existing policy is
deleted. For more information about the default file system policy, see [Using Resource-based Policies with
EFS](https://docs.aws.amazon.com/efs/latest/ug/res-based-policies-efs.html).
This operation requires permissions for the
`elasticfilesystem:DeleteFileSystemPolicy` action.
"""
def delete_file_system_policy(%Client{} = client, file_system_id, input, options \\ []) do
url_path = "/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}/policy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes the specified mount target.
This operation forcibly breaks any mounts of the file system by using the mount
target that is being deleted, which might disrupt instances or applications
using those mounts. To avoid applications getting cut off abruptly, you might
consider unmounting any mounts of the mount target, if feasible. The operation
also deletes the associated network interface. Uncommitted writes might be lost,
but breaking a mount target using this operation does not corrupt the file
system itself. The file system you created remains. You can mount an EC2
instance in your VPC by using another mount target.
This operation requires permissions for the following action on the file system:
* `elasticfilesystem:DeleteMountTarget`
The `DeleteMountTarget` call returns while the mount target state is still
`deleting`. You can check the mount target deletion by calling the
`DescribeMountTargets` operation, which returns a list of mount target
descriptions for the given file system.
The operation also requires permissions for the following Amazon EC2 action on
the mount target's network interface:
* `ec2:DeleteNetworkInterface`
"""
def delete_mount_target(%Client{} = client, mount_target_id, input, options \\ []) do
url_path = "/2015-02-01/mount-targets/#{AWS.Util.encode_uri(mount_target_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Deletes an existing replication configuration.
To delete a replication configuration, you must make the request from the Amazon
Web Services Region in which the destination file system is located. Deleting a
replication configuration ends the replication process. After a replication
configuration is deleted, the destination file system is no longer read-only.
You can write to the destination file system after its status becomes
`Writeable`.
"""
def delete_replication_configuration(
%Client{} = client,
source_file_system_id,
input,
options \\ []
) do
url_path =
"/2015-02-01/file-systems/#{AWS.Util.encode_uri(source_file_system_id)}/replication-configuration"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
DEPRECATED - `DeleteTags` is deprecated and not maintained.
To remove tags from EFS resources, use the API action.
Deletes the specified tags from a file system. If the `DeleteTags` request
includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause
an error. For more information about tags and related restrictions, see [Tag restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
in the *Billing and Cost Management User Guide*.
This operation requires permissions for the `elasticfilesystem:DeleteTags`
action.
"""
def delete_tags(%Client{} = client, file_system_id, input, options \\ []) do
url_path = "/2015-02-01/delete-tags/#{AWS.Util.encode_uri(file_system_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Returns the description of a specific Amazon EFS access point if the
`AccessPointId` is provided.
If you provide an EFS `FileSystemId`, it returns descriptions of all access
points for that file system. You can provide either an `AccessPointId` or a
`FileSystemId` in the request, but not both.
This operation requires permissions for the
`elasticfilesystem:DescribeAccessPoints` action.
"""
def describe_access_points(
%Client{} = client,
access_point_id \\ nil,
file_system_id \\ nil,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/2015-02-01/access-points"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(file_system_id) do
[{"FileSystemId", file_system_id} | query_params]
else
query_params
end
query_params =
if !is_nil(access_point_id) do
[{"AccessPointId", access_point_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the account preferences settings for the Amazon Web Services account
associated with the user making the request, in the current Amazon Web Services
Region.
For more information, see [Managing Amazon EFS resource IDs](efs/latest/ug/manage-efs-resource-ids.html).
"""
def describe_account_preferences(%Client{} = client, options \\ []) do
url_path = "/2015-02-01/account-preferences"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the backup policy for the specified EFS file system.
"""
def describe_backup_policy(%Client{} = client, file_system_id, options \\ []) do
url_path = "/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}/backup-policy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the `FileSystemPolicy` for the specified EFS file system.
This operation requires permissions for the
`elasticfilesystem:DescribeFileSystemPolicy` action.
"""
def describe_file_system_policy(%Client{} = client, file_system_id, options \\ []) do
url_path = "/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}/policy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the description of a specific Amazon EFS file system if either the file
system `CreationToken` or the `FileSystemId` is provided.
Otherwise, it returns descriptions of all file systems owned by the caller's
Amazon Web Services account in the Amazon Web Services Region of the endpoint
that you're calling.
When retrieving all file system descriptions, you can optionally specify the
`MaxItems` parameter to limit the number of descriptions in a response.
Currently, this number is automatically set to 10. If more file system
descriptions remain, Amazon EFS returns a `NextMarker`, an opaque token, in the
response. In this case, you should send a subsequent request with the `Marker`
request parameter set to the value of `NextMarker`.
To retrieve a list of your file system descriptions, this operation is used in
an iterative process, where `DescribeFileSystems` is called first without the
`Marker` and then the operation continues to call it with the `Marker` parameter
set to the value of the `NextMarker` from the previous response until the
response has no `NextMarker`.
The order of file systems returned in the response of one `DescribeFileSystems`
call and the order of file systems returned across the responses of a multi-call
iteration is unspecified.
This operation requires permissions for the
`elasticfilesystem:DescribeFileSystems` action.
"""
def describe_file_systems(
%Client{} = client,
creation_token \\ nil,
file_system_id \\ nil,
marker \\ nil,
max_items \\ nil,
options \\ []
) do
url_path = "/2015-02-01/file-systems"
headers = []
query_params = []
query_params =
if !is_nil(max_items) do
[{"MaxItems", max_items} | query_params]
else
query_params
end
query_params =
if !is_nil(marker) do
[{"Marker", marker} | query_params]
else
query_params
end
query_params =
if !is_nil(file_system_id) do
[{"FileSystemId", file_system_id} | query_params]
else
query_params
end
query_params =
if !is_nil(creation_token) do
[{"CreationToken", creation_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the current `LifecycleConfiguration` object for the specified Amazon EFS
file system.
EFS lifecycle management uses the `LifecycleConfiguration` object to identify
which files to move to the EFS Infrequent Access (IA) storage class. For a file
system without a `LifecycleConfiguration` object, the call returns an empty
array in the response.
When EFS Intelligent-Tiering is enabled, `TransitionToPrimaryStorageClass` has a
value of `AFTER_1_ACCESS`.
This operation requires permissions for the
`elasticfilesystem:DescribeLifecycleConfiguration` operation.
"""
def describe_lifecycle_configuration(%Client{} = client, file_system_id, options \\ []) do
url_path =
"/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}/lifecycle-configuration"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the security groups currently in effect for a mount target.
This operation requires that the network interface of the mount target has been
created and the lifecycle state of the mount target is not `deleted`.
This operation requires permissions for the following actions:
* `elasticfilesystem:DescribeMountTargetSecurityGroups` action on
the mount target's file system.
* `ec2:DescribeNetworkInterfaceAttribute` action on the mount
target's network interface.
"""
def describe_mount_target_security_groups(%Client{} = client, mount_target_id, options \\ []) do
url_path = "/2015-02-01/mount-targets/#{AWS.Util.encode_uri(mount_target_id)}/security-groups"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the descriptions of all the current mount targets, or a specific mount
target, for a file system.
When requesting all of the current mount targets, the order of mount targets
returned in the response is unspecified.
This operation requires permissions for the
`elasticfilesystem:DescribeMountTargets` action, on either the file system ID
that you specify in `FileSystemId`, or on the file system of the mount target
that you specify in `MountTargetId`.
"""
def describe_mount_targets(
%Client{} = client,
access_point_id \\ nil,
file_system_id \\ nil,
marker \\ nil,
max_items \\ nil,
mount_target_id \\ nil,
options \\ []
) do
url_path = "/2015-02-01/mount-targets"
headers = []
query_params = []
query_params =
if !is_nil(mount_target_id) do
[{"MountTargetId", mount_target_id} | query_params]
else
query_params
end
query_params =
if !is_nil(max_items) do
[{"MaxItems", max_items} | query_params]
else
query_params
end
query_params =
if !is_nil(marker) do
[{"Marker", marker} | query_params]
else
query_params
end
query_params =
if !is_nil(file_system_id) do
[{"FileSystemId", file_system_id} | query_params]
else
query_params
end
query_params =
if !is_nil(access_point_id) do
[{"AccessPointId", access_point_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves the replication configuration for a specific file system.
If a file system is not specified, all of the replication configurations for the
Amazon Web Services account in an Amazon Web Services Region are retrieved.
"""
def describe_replication_configurations(
%Client{} = client,
file_system_id \\ nil,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/2015-02-01/file-systems/replication-configurations"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(file_system_id) do
[{"FileSystemId", file_system_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
DEPRECATED - The `DescribeTags` action is deprecated and not maintained.
To view tags associated with EFS resources, use the `ListTagsForResource` API
action.
Returns the tags associated with a file system. The order of tags returned in
the response of one `DescribeTags` call and the order of tags returned across
the responses of a multiple-call iteration (when using pagination) is
unspecified.
This operation requires permissions for the `elasticfilesystem:DescribeTags`
action.
"""
def describe_tags(
%Client{} = client,
file_system_id,
marker \\ nil,
max_items \\ nil,
options \\ []
) do
url_path = "/2015-02-01/tags/#{AWS.Util.encode_uri(file_system_id)}/"
headers = []
query_params = []
query_params =
if !is_nil(max_items) do
[{"MaxItems", max_items} | query_params]
else
query_params
end
query_params =
if !is_nil(marker) do
[{"Marker", marker} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Lists all tags for a top-level EFS resource.
You must provide the ID of the resource that you want to retrieve the tags for.
This operation requires permissions for the
`elasticfilesystem:DescribeAccessPoints` action.
"""
def list_tags_for_resource(
%Client{} = client,
resource_id,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/2015-02-01/resource-tags/#{AWS.Util.encode_uri(resource_id)}"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Modifies the set of security groups in effect for a mount target.
When you create a mount target, Amazon EFS also creates a new network interface.
For more information, see `CreateMountTarget`. This operation replaces the
security groups in effect for the network interface associated with a mount
target, with the `SecurityGroups` provided in the request. This operation
requires that the network interface of the mount target has been created and the
lifecycle state of the mount target is not `deleted`.
The operation requires permissions for the following actions:
* `elasticfilesystem:ModifyMountTargetSecurityGroups` action on the
mount target's file system.
* `ec2:ModifyNetworkInterfaceAttribute` action on the mount target's
network interface.
"""
def modify_mount_target_security_groups(
%Client{} = client,
mount_target_id,
input,
options \\ []
) do
url_path = "/2015-02-01/mount-targets/#{AWS.Util.encode_uri(mount_target_id)}/security-groups"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Use this operation to set the account preference in the current Amazon Web
Services Region to use long 17 character (63 bit) or short 8 character (32 bit)
resource IDs for new EFS file system and mount target resources.
All existing resource IDs are not affected by any changes you make. You can set
the ID preference during the opt-in period as EFS transitions to long resource
IDs. For more information, see [Managing Amazon EFS resource IDs](https://docs.aws.amazon.com/efs/latest/ug/manage-efs-resource-ids.html).
Starting in October, 2021, you will receive an error if you try to set the
account preference to use the short 8 character format resource ID. Contact
Amazon Web Services support if you receive an error and must use short IDs for
file system and mount target resources.
"""
def put_account_preferences(%Client{} = client, input, options \\ []) do
url_path = "/2015-02-01/account-preferences"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates the file system's backup policy.
Use this action to start or stop automatic backups of the file system.
"""
def put_backup_policy(%Client{} = client, file_system_id, input, options \\ []) do
url_path = "/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}/backup-policy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Applies an Amazon EFS `FileSystemPolicy` to an Amazon EFS file system.
A file system policy is an IAM resource-based policy and can contain multiple
policy statements. A file system always has exactly one file system policy,
which can be the default policy or an explicit policy set or updated using this
API operation. EFS file system policies have a 20,000 character limit. When an
explicit policy is set, it overrides the default policy. For more information
about the default file system policy, see [Default EFS File System Policy](https://docs.aws.amazon.com/efs/latest/ug/iam-access-control-nfs-efs.html#default-filesystempolicy).
EFS file system policies have a 20,000 character limit.
This operation requires permissions for the
`elasticfilesystem:PutFileSystemPolicy` action.
"""
def put_file_system_policy(%Client{} = client, file_system_id, input, options \\ []) do
url_path = "/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}/policy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Use this action to manage EFS lifecycle management and intelligent tiering.
A `LifecycleConfiguration` consists of one or more `LifecyclePolicy` objects
that define the following:
* **EFS Lifecycle management** - When Amazon EFS automatically
transitions files in a file system into the lower-cost Infrequent Access (IA)
storage class.
To enable EFS Lifecycle management, set the value of `TransitionToIA` to one of
the available options.
* **EFS Intelligent tiering** - When Amazon EFS automatically
transitions files from IA back into the file system's primary storage class
(Standard or One Zone Standard.
To enable EFS Intelligent Tiering, set the value of
`TransitionToPrimaryStorageClass` to `AFTER_1_ACCESS`.
For more information, see [EFS Lifecycle Management](https://docs.aws.amazon.com/efs/latest/ug/lifecycle-management-efs.html).
Each Amazon EFS file system supports one lifecycle configuration, which applies
to all files in the file system. If a `LifecycleConfiguration` object already
exists for the specified file system, a `PutLifecycleConfiguration` call
modifies the existing configuration. A `PutLifecycleConfiguration` call with an
empty `LifecyclePolicies` array in the request body deletes any existing
`LifecycleConfiguration` and turns off lifecycle management and intelligent
tiering for the file system.
In the request, specify the following:
* The ID for the file system for which you are enabling, disabling,
or modifying lifecycle management and intelligent tiering.
* A `LifecyclePolicies` array of `LifecyclePolicy` objects that
define when files are moved into IA storage, and when they are moved back to
Standard storage.
Amazon EFS requires that each `LifecyclePolicy` object have only have a single
transition, so the `LifecyclePolicies` array needs to be structured with
separate `LifecyclePolicy` objects. See the example requests in the following
section for more information.
This operation requires permissions for the
`elasticfilesystem:PutLifecycleConfiguration` operation.
To apply a `LifecycleConfiguration` object to an encrypted file system, you need
the same Key Management Service permissions as when you created the encrypted
file system.
"""
def put_lifecycle_configuration(%Client{} = client, file_system_id, input, options \\ []) do
url_path =
"/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}/lifecycle-configuration"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a tag for an EFS resource.
You can create tags for EFS file systems and access points using this API
operation.
This operation requires permissions for the `elasticfilesystem:TagResource`
action.
"""
def tag_resource(%Client{} = client, resource_id, input, options \\ []) do
url_path = "/2015-02-01/resource-tags/#{AWS.Util.encode_uri(resource_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Removes tags from an EFS resource.
You can remove tags from EFS file systems and access points using this API
operation.
This operation requires permissions for the `elasticfilesystem:UntagResource`
action.
"""
def untag_resource(%Client{} = client, resource_id, input, options \\ []) do
url_path = "/2015-02-01/resource-tags/#{AWS.Util.encode_uri(resource_id)}"
headers = []
{query_params, input} =
[
{"TagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates the throughput mode or the amount of provisioned throughput of an
existing file system.
"""
def update_file_system(%Client{} = client, file_system_id, input, options \\ []) do
url_path = "/2015-02-01/file-systems/#{AWS.Util.encode_uri(file_system_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
202
)
end
end
|
lib/aws/generated/efs.ex
| 0.907066 | 0.503906 |
efs.ex
|
starcoder
|
defmodule Bamboo.SentEmail do
@moduledoc """
Used for storing and retrieving sent emails when used with Bamboo.LocalAdapter
When emails are sent with the Bamboo.LocalAdapter, they are stored in
Bamboo.SentEmail. Use the functions in this module to store and retrieve the emails.
Remember to start the Bamboo app by adding it to the app list in `mix.exs` or
starting it with `Application.ensure_all_started(:bamboo)`
"""
@id_length 16
defmodule DeliveriesError do
defexception [:message]
def exception(emails) do
message = """
SentEmail.one/1 expected to find one email, got #{Enum.count(emails)}:
#{email_list(emails)}
This function is used when you expect only one email to have been sent. If
you meant to send more than one email, you can call
SentEmail.all/0 to get all sent emails.
For example: SentEmail.all |> List.first
"""
%DeliveriesError{message: message}
end
defp email_list(emails) do
emails
|> Enum.map(&inspect/1)
|> Enum.join("\n")
end
end
defmodule NoDeliveriesError do
defexception [:message]
def exception(_) do
message = "expected to find one email, but got none."
%NoDeliveriesError{message: message}
end
end
@doc "Starts the SentEmail Agent"
def start_link do
Agent.start_link(fn -> [] end, name: __MODULE__)
end
@doc """
Gets the email's id.
The email must be an email that was sent with Bamboo.LocalAdapter or added
via SentEmail.push/1, otherwise the id will not have been set.
"""
def get_id(%Bamboo.Email{private: %{local_adapter_id: id}}) do
id
end
def get_id(%Bamboo.Email{}) do
raise """
SentEmail.get_id/1 expected the email to have an id, but no id was present.
This is usually because the email was not sent with Bamboo.LocalAdapter
or wasn't pushed with SentEmail.push/1
"""
end
def get_id(email) do
raise "SentEmail.get_id/1 expected a %Bamboo.Email{}, instead got: #{inspect email}"
end
@doc """
Gets an email by id. Returns nil if it can't find a matching email.
"""
def get(id) do
do_get(id)
end
@doc """
Gets an email by id. Raises if it can't find one.
"""
def get!(id) do
do_get(id) || raise NoDeliveriesError, nil
end
defp do_get(id) do
Enum.find all, nil, fn(email) ->
email |> get_id |> String.downcase == String.downcase(id)
end
end
@doc "Returns a list of all sent emails"
def all do
Agent.get(__MODULE__, fn(emails) -> emails end)
end
@doc """
Adds an email to the list of sent emails
Adds an email to the beginning of the sent emails list. Also gives the email
an id that can be fetched with SentEmail.get_id/1.
"""
def push(email) do
email = put_rand_id(email)
Agent.update(__MODULE__, fn(emails) ->
[email | emails]
end)
email
end
defp put_rand_id(email) do
email |> Bamboo.Email.put_private(:local_adapter_id, rand_id)
end
defp rand_id do
:crypto.strong_rand_bytes(@id_length)
|> Base.url_encode64
|> binary_part(0, @id_length)
end
@doc """
Returns exactly one sent email. Raises if none, or more than one are found
Raises `NoDeliveriesError` if there are no emails. Raises `DeliveriesError` if
there are 2 or more emails.
"""
def one do
case all do
[email] -> email
[] -> raise NoDeliveriesError
emails -> raise DeliveriesError, emails
end
end
@doc "Clears all sent emails"
def reset do
Agent.update(__MODULE__, fn(_) ->
[]
end)
end
end
|
lib/bamboo/sent_email.ex
| 0.675765 | 0.596698 |
sent_email.ex
|
starcoder
|
defmodule OMG.Watcher.ExitProcessor.Finalizations do
@moduledoc """
Encapsulates managing and executing the behaviors related to treating exits by the child chain and watchers
Keeps a state of exits that are in progress, updates it with news from the root chain, compares to the
state of the ledger (`OMG.State`), issues notifications as it finds suitable.
Should manage all kinds of exits allowed in the protocol and handle the interactions between them.
This is the functional, zero-side-effect part of the exit processor. Logic should go here:
- orchestrating the persistence of the state
- finding invalid exits, disseminating them as events according to rules
- enabling to challenge invalid exits
- figuring out critical failure of invalid exit challenging (aka `:unchallenged_exit` event)
- MoreVP protocol managing in general
For the imperative shell, see `OMG.Watcher.ExitProcessor`
"""
alias OMG.State.Transaction
alias OMG.Utxo
alias OMG.Watcher.ExitProcessor.Core
alias OMG.Watcher.ExitProcessor.ExitInfo
alias OMG.Watcher.ExitProcessor.InFlightExitInfo
use OMG.Utils.LoggerExt
require Utxo
@doc """
Finalize exits based on Ethereum events, removing from tracked state if valid.
Invalid finalizing exits should continue being tracked as `is_active`, to continue emitting events.
This includes non-`is_active` exits that finalize invalid, which are turned to be `is_active` now.
"""
@spec finalize_exits(Core.t(), validities :: {list(Utxo.Position.t()), list(Utxo.Position.t())}) ::
{Core.t(), list(), list()}
def finalize_exits(%Core{exits: exits} = state, {valid_finalizations, invalid}) do
# handling valid finalizations
new_exits_kv_pairs =
exits
|> Map.take(valid_finalizations)
|> Enum.into(%{}, fn {utxo_pos, exit_info} -> {utxo_pos, %ExitInfo{exit_info | is_active: false}} end)
new_state1 = %{state | exits: Map.merge(exits, new_exits_kv_pairs)}
db_updates = new_exits_kv_pairs |> Enum.map(&ExitInfo.make_db_update/1)
# invalid ones - activating, in case they were inactive, to keep being invalid forever
{new_state2, activating_db_updates} = activate_on_invalid_finalization(new_state1, invalid)
{new_state2, db_updates ++ activating_db_updates}
end
defp activate_on_invalid_finalization(%Core{exits: exits} = state, invalid_finalizations) do
exits_to_activate =
exits
|> Map.take(invalid_finalizations)
|> Enum.map(fn {k, v} -> {k, Map.update!(v, :is_active, fn _ -> true end)} end)
|> Map.new()
activating_db_updates =
exits_to_activate
|> Enum.map(&ExitInfo.make_db_update/1)
state = %{state | exits: Map.merge(exits, exits_to_activate)}
{state, activating_db_updates}
end
@doc """
Returns a tuple of `{:ok, %{ife_exit_id => {finalized_input_exits | finalized_output_exits}}, list(events_exits)}`.
Finalized input exits and finalized output exits structures both fit into `OMG.State.exit_utxos/1`.
Events exits list contains Ethereum's finalization events paired with utxos they exits. This data is needed to
broadcast information to the consumers about utxos that needs to marked as spend as the result of finalization.
When there are invalid finalizations, returns one of the following:
- {:inactive_piggybacks_finalizing, list of piggybacks that exit processor state is not aware of}
- {:unknown_in_flight_exit, set of in-flight exit ids that exit processor is not aware of}
"""
@spec prepare_utxo_exits_for_in_flight_exit_finalizations(Core.t(), [map()]) ::
{:ok, map(), list()}
| {:inactive_piggybacks_finalizing, list()}
| {:unknown_in_flight_exit, MapSet.t(non_neg_integer())}
def prepare_utxo_exits_for_in_flight_exit_finalizations(%Core{in_flight_exits: ifes}, finalizations) do
finalizations = finalizations |> Enum.map(&ife_id_to_binary/1)
with {:ok, ifes_by_id} <- get_all_finalized_ifes_by_ife_contract_id(finalizations, ifes),
{:ok, []} <- known_piggybacks?(finalizations, ifes_by_id) do
{exiting_positions_by_ife_id, events_with_positions} =
finalizations
|> Enum.reverse()
|> Enum.reduce({%{}, []}, &combine_utxo_exits_with_finalization(&1, &2, ifes_by_id))
{
:ok,
exiting_positions_by_ife_id,
Enum.reject(events_with_positions, &Kernel.match?({_, []}, &1))
}
end
end
# converts from int, which is how the contract serves it
defp ife_id_to_binary(finalization),
do: Map.update!(finalization, :in_flight_exit_id, fn id -> <<id::192>> end)
defp get_all_finalized_ifes_by_ife_contract_id(finalizations, ifes) do
finalizations_ids =
finalizations
|> Enum.map(fn %{in_flight_exit_id: id} -> id end)
|> MapSet.new()
by_contract_id =
ifes
|> Enum.map(fn {_tx_hash, %InFlightExitInfo{contract_id: id} = ife} -> {id, ife} end)
|> Map.new()
known_ifes =
by_contract_id
|> Map.keys()
|> MapSet.new()
unknown_ifes = MapSet.difference(finalizations_ids, known_ifes)
if Enum.empty?(unknown_ifes) do
{:ok, by_contract_id}
else
{:unknown_in_flight_exit, unknown_ifes}
end
end
defp known_piggybacks?(finalizations, ifes_by_id) do
finalizations
|> Enum.filter(&finalization_not_piggybacked?(&1, ifes_by_id))
|> case do
[] -> {:ok, []}
not_piggybacked -> {:inactive_piggybacks_finalizing, not_piggybacked}
end
end
defp finalization_not_piggybacked?(
%{in_flight_exit_id: ife_id, output_index: output_index, omg_data: %{piggyback_type: piggyback_type}},
ifes_by_id
),
do: not InFlightExitInfo.is_active?(ifes_by_id[ife_id], {piggyback_type, output_index})
defp combine_utxo_exits_with_finalization(
%{in_flight_exit_id: ife_id, output_index: output_index, omg_data: %{piggyback_type: piggyback_type}} = event,
{exiting_positions, events_with_positions},
ifes_by_id
) do
ife = ifes_by_id[ife_id]
# a runtime sanity check - if this were false it would mean all piggybacks finalized so contract wouldn't allow that
true = InFlightExitInfo.is_active?(ife, {piggyback_type, output_index})
# figure out if there's any UTXOs really exiting from the `OMG.State` from this IFE's piggybacked input/output
exiting_positions_for_piggyback = get_exiting_positions(ife, output_index, piggyback_type)
{
Map.update(exiting_positions, ife_id, exiting_positions_for_piggyback, &(exiting_positions_for_piggyback ++ &1)),
[{event, exiting_positions_for_piggyback} | events_with_positions]
}
end
defp get_exiting_positions(ife, output_index, :input) do
%InFlightExitInfo{tx: %Transaction.Signed{raw_tx: tx}} = ife
input_position = tx |> Transaction.get_inputs() |> Enum.at(output_index)
[input_position]
end
defp get_exiting_positions(ife, output_index, :output) do
case ife.tx_seen_in_blocks_at do
nil -> []
{Utxo.position(blknum, txindex, _), _proof} -> [Utxo.position(blknum, txindex, output_index)]
end
end
@doc """
Finalizes in-flight exits.
Returns a tuple of {:ok, updated state, database updates}.
When there are invalid finalizations, returns one of the following:
- {:inactive_piggybacks_finalizing, list of piggybacks that exit processor state is not aware of}
- {:unknown_in_flight_exit, set of in-flight exit ids that exit processor is not aware of}
"""
@spec finalize_in_flight_exits(Core.t(), [map()], map()) ::
{:ok, Core.t(), list()}
| {:inactive_piggybacks_finalizing, list()}
| {:unknown_in_flight_exit, MapSet.t(non_neg_integer())}
def finalize_in_flight_exits(%Core{in_flight_exits: ifes} = state, finalizations, invalidities_by_ife_id) do
# convert ife_id from int (given by contract) to a binary
finalizations = Enum.map(finalizations, &ife_id_to_binary/1)
with {:ok, ifes_by_id} <- get_all_finalized_ifes_by_ife_contract_id(finalizations, ifes),
{:ok, []} <- known_piggybacks?(finalizations, ifes_by_id) do
{ifes_by_id, updated_ifes} =
finalizations
|> Enum.reduce({ifes_by_id, MapSet.new()}, &finalize_single_exit/2)
|> activate_on_invalid_utxo_exits(invalidities_by_ife_id)
db_updates =
ifes_by_id
|> Map.take(Enum.to_list(updated_ifes))
|> Map.values()
# re-key those IFEs by tx_hash as how they are originally stored
|> Enum.map(&{Transaction.raw_txhash(&1.tx), &1})
|> Enum.map(&InFlightExitInfo.make_db_update/1)
ifes =
ifes_by_id
# re-key those IFEs by tx_hash as how they are originally stored
|> Map.values()
|> Enum.into(%{}, &{Transaction.raw_txhash(&1.tx), &1})
{:ok, %{state | in_flight_exits: ifes}, db_updates}
end
end
defp finalize_single_exit(
%{in_flight_exit_id: ife_id, output_index: output_index, omg_data: %{piggyback_type: piggyback_type}},
{ifes_by_id, updated_ifes}
) do
combined_index = {piggyback_type, output_index}
ife = ifes_by_id[ife_id]
if InFlightExitInfo.is_active?(ife, combined_index) do
{:ok, finalized_ife} = InFlightExitInfo.finalize(ife, combined_index)
ifes_by_id = Map.put(ifes_by_id, ife_id, finalized_ife)
updated_ifes = MapSet.put(updated_ifes, ife_id)
{ifes_by_id, updated_ifes}
else
{ifes_by_id, updated_ifes}
end
end
defp activate_on_invalid_utxo_exits({ifes_by_id, updated_ifes}, invalidities_by_ife_id) do
ids_to_activate =
invalidities_by_ife_id
|> Enum.filter(fn {_ife_id, invalidities} -> not Enum.empty?(invalidities) end)
|> Enum.map(fn {ife_id, _invalidities} -> ife_id end)
|> MapSet.new()
# iterates over the ifes that are spotted with invalid finalizing (their `ife_ids`) and activates the ifes
new_ifes_by_id =
Enum.reduce(ids_to_activate, ifes_by_id, fn id, ifes -> Map.update!(ifes, id, &InFlightExitInfo.activate/1) end)
{new_ifes_by_id, MapSet.union(ids_to_activate, updated_ifes)}
end
end
|
apps/omg_watcher/lib/omg_watcher/exit_processor/finalizations.ex
| 0.742888 | 0.581541 |
finalizations.ex
|
starcoder
|
defmodule Pummpcomm.Session.Exchange.ReadInsulinSensitivities do
@moduledoc """
Reads insulin sensitivities throughout the day.
"""
use Bitwise
alias Pummpcomm.Insulin
alias Pummpcomm.Session.{Command, Response}
# Constants
@max_count 8
@mgdl 1
@mmol 2
@opcode 0x8B
# Functions
@doc """
Makes `Pummpcomm.Session.Command.t` to read insulin sensitivities throughout the day from pump with `pump_serial`
"""
@spec make(Command.pump_serial()) :: Command.t()
def make(pump_serial) do
%Command{opcode: @opcode, pump_serial: pump_serial}
end
@doc """
Decodes `Pummpcomm.Session.Response.t` to insulin sensitivites
"""
@spec decode(Response.t()) :: {
:ok,
%{
units: String.t(),
sensitivities: [
%{sensitivity: Insulin.blood_glucose_per_unit(), start: NaiveDateTime.t()}
]
}
}
def decode(%Response{opcode: @opcode, data: <<units::8, rest::binary>>}) do
{:ok,
%{units: decode_units(units), sensitivities: decode_sensitivity(rest, [], @max_count, units)}}
end
## Private Functions
defp basal_time(raw_time) do
Timex.now()
|> Timex.beginning_of_day()
|> Timex.shift(minutes: 30 * raw_time)
|> DateTime.to_time()
end
defp convert_sensitivity_value(@mgdl, sensitivity), do: sensitivity
defp convert_sensitivity_value(@mmol, sensitivity), do: sensitivity / 10
defp decode_sensitivity(_, decoded_sensitivities, 0, _), do: Enum.reverse(decoded_sensitivities)
defp decode_sensitivity(<<_::2, fdf8:f53e:61e4::18, _::binary>>, decoded_sensitivities, _, _)
when length(decoded_sensitivities) > 0,
do: Enum.reverse(decoded_sensitivities)
defp decode_sensitivity(
<<0::1, sensitivity_high::1, start_time::6, sensitivity_low::8, rest::binary>>,
decoded_sensitivities,
count,
units
) do
sensitivity = (sensitivity_high <<< 8) + sensitivity_low
decoded = %{
start: basal_time(start_time),
sensitivity: convert_sensitivity_value(units, sensitivity)
}
decode_sensitivity(rest, [decoded | decoded_sensitivities], count - 1, units)
end
defp decode_units(@mgdl), do: "mg/dL"
defp decode_units(@mmol), do: "mmol/L"
end
|
lib/pummpcomm/session/exchange/read_insulin_sensitivities.ex
| 0.830937 | 0.417212 |
read_insulin_sensitivities.ex
|
starcoder
|
defmodule Upload do
@moduledoc """
An opinionated file uploader.
"""
@enforce_keys [:key, :path, :filename]
defstruct [:key, :path, :filename, status: :pending]
@type t :: %Upload{
key: String.t(),
filename: String.t(),
path: String.t()
}
@type transferred :: %Upload{
key: String.t(),
filename: String.t(),
path: String.t(),
status: :transferred
}
@type uploadable :: Plug.Upload.t() | Upload.t()
@type uploadable_path :: String.t() | Upload.t()
@doc """
Get the adapter from config.
"""
def adapter do
Upload.Config.get(__MODULE__, :adapter, Upload.Adapters.Local)
end
@doc """
Get the URL for a given key. It will behave differently based
on the adapter you're using.
### Local
iex> Upload.get_url("123456.png")
"/uploads/123456.png"
### S3
iex> Upload.get_url("123456.png")
"https://my_bucket_name.s3.amazonaws.com/123456.png"
### Fake / Test
iex> Upload.get_url("123456.png")
"123456.png"
"""
@spec get_url(Upload.t() | String.t()) :: String.t()
def get_url(%__MODULE__{key: key}), do: get_url(key)
def get_url(key) when is_binary(key), do: adapter().get_url(key)
@doc """
Get the URL for a given key. It will behave differently based
on the adapter you're using.
### Examples
iex> Upload.get_signed_url("123456.png")
{:ok, "http://yoururl.com/123456.png?X-Amz-Expires=3600..."}
iex> Upload.get_signed_url("123456.png", expires_in: 4200)
{:ok, "http://yoururl.com/123456.png?X-Amz-Expires=4200..."}
"""
@spec get_signed_url(Upload.t() | String.t(), Keyword.t()) ::
{:ok, String.t()} | {:error, String.t()}
def get_signed_url(upload, opts \\ [])
def get_signed_url(%__MODULE__{key: key}, opts), do: get_signed_url(key, opts)
def get_signed_url(key, opts) when is_binary(key), do: adapter().get_signed_url(key, opts)
@doc """
Transfer the file to where it will be stored.
"""
@spec transfer(Upload.t()) :: {:ok, Upload.transferred()} | {:error, String.t()}
def transfer(%__MODULE__{} = upload), do: adapter().transfer(upload)
@doc """
Deletes the file where it is stored.
"""
@spec delete(String.t()) :: :ok | {:error, String.t()}
def delete(key), do: adapter().delete(key)
@doc """
Converts a `Plug.Upload` to an `Upload`.
## Examples
iex> Upload.cast(%Plug.Upload{path: "/path/to/foo.png", filename: "foo.png"})
{:ok, %Upload{path: "/path/to/foo.png", filename: "foo.png", key: "<KEY>"}}
iex> Upload.cast(100)
:error
"""
@spec cast(uploadable, list) :: {:ok, Upload.t()} | :error
def cast(uploadable, opts \\ [])
def cast(%Upload{} = upload, _opts), do: {:ok, upload}
def cast(%Plug.Upload{filename: filename, path: path}, opts) do
do_cast(filename, path, opts)
end
def cast(_not_uploadable, _opts) do
:error
end
@doc """
Cast a file path to an `Upload`.
*Warning:* Do not use `cast_path` with unsanitized user input.
## Examples
iex> Upload.cast_path("/path/to/foo.png")
{:ok, %Upload{path: "/path/to/foo.png", filename: "foo.png", key: "<KEY>"}}
iex> Upload.cast_path(100)
:error
"""
@spec cast_path(uploadable_path, list) :: {:ok, Upload.t()} | :error
def cast_path(path, opts \\ [])
def cast_path(%Upload{} = upload, _opts), do: {:ok, upload}
def cast_path(path, opts) when is_binary(path) do
path
|> Path.basename()
|> do_cast(path, opts)
end
def cast_path(_, _opts) do
:error
end
defp do_cast(filename, path, opts) do
{:ok,
%__MODULE__{
key: generate_key(filename, opts),
path: path,
filename: filename,
status: :pending
}}
end
@doc """
Converts a filename to a unique key.
## Examples
iex> Upload.generate_key("phoenix.png")
"b9452178-9a54-5e99-8e64-a059b01b88cf.png"
iex> Upload.generate_key("phoenix.png", generate_key: false)
"phoenix.png"
iex> Upload.generate_key("phoenix.png", prefix: ["logos"])
"logos/b9452178-9a54-5e99-8e64-a059b01b88cf.png"
"""
@spec generate_key(String.t(), [{:prefix, list}]) :: String.t()
def generate_key(filename, opts \\ []) when is_binary(filename) do
if Keyword.get(opts, :generate_key, true) do
uuid = UUID.uuid4(:hex)
ext = get_extension(filename)
opts
|> Keyword.get(:prefix, [])
|> Enum.join("/")
|> Path.join(uuid <> ext)
else
opts
|> Keyword.get(:prefix, [])
|> Enum.join("/")
|> Path.join(filename)
end
end
@doc """
Gets the extension from a filename.
## Examples
iex> Upload.get_extension("foo.png")
".png"
iex> Upload.get_extension("foo.PNG")
".png"
iex> Upload.get_extension("foo")
""
iex> {:ok, upload} = Upload.cast_path("/path/to/foo.png")
...> Upload.get_extension(upload)
".png"
"""
@spec get_extension(String.t() | Upload.t()) :: String.t()
def get_extension(%Upload{filename: filename}) do
get_extension(filename)
end
def get_extension(filename) when is_binary(filename) do
filename |> Path.extname() |> String.downcase()
end
end
|
lib/upload.ex
| 0.834171 | 0.406155 |
upload.ex
|
starcoder
|
defmodule Credo.Check.Warning.MapGetUnsafePass do
@moduledoc false
@checkdoc """
`Map.get/2` can lead into runtime errors if the result is passed into a pipe
without a proper default value. This happens when the next function in the
pipe cannot handle `nil` values correctly.
Example:
%{foo: [1, 2 ,3], bar: [4, 5, 6]}
|> Map.get(:missing_key)
|> Enum.each(&IO.puts/1)
This will cause a `Protocol.UndefinedError`, since `nil` isn't `Enumerable`.
Often times while iterating over enumerables zero iterations is preferrable
to being forced to deal with an exception. Had there been a `[]` default
parameter this could have been averted.
If you are sure the value exists and can't be nil, please use `Map.fetch!/2`.
If you are not sure, `Map.get/3` can help you provide a safe default value.
"""
@explanation [check: @checkdoc]
@call_string "Map.get"
@unsafe_modules [:Enum]
use Credo.Check, base_priority: :normal
@doc false
def run(source_file, params \\ []) do
issue_meta = IssueMeta.for(source_file, params)
Credo.Code.prewalk(source_file, &traverse(&1, &2, issue_meta))
end
defp traverse({:|>, _meta, _args} = ast, issues, issue_meta) do
pipe_issues =
ast
|> Macro.unpipe()
|> Enum.with_index()
|> find_pipe_issues(issue_meta)
{ast, issues ++ pipe_issues}
end
defp traverse(ast, issues, _issue_meta) do
{ast, issues}
end
defp find_pipe_issues(pipe, issue_meta) do
pipe
|> Enum.reduce([], fn {expr, idx}, acc ->
required_length = required_argument_length(idx)
{next_expr, _} = Enum.at(pipe, idx + 1, {nil, nil})
case {expr, nil_safe?(next_expr)} do
{{{{:., meta, [{_, _, [:Map]}, :get]}, _, args}, _}, false}
when length(args) != required_length ->
acc ++ [issue_for(issue_meta, meta[:line], @call_string)]
_ ->
acc
end
end)
end
defp required_argument_length(idx) when idx == 0, do: 3
defp required_argument_length(_), do: 2
defp nil_safe?(expr) do
case expr do
{{{:., _, [{_, _, [module]}, _]}, _, _}, _} ->
!(module in @unsafe_modules)
_ ->
true
end
end
defp issue_for(issue_meta, line_no, trigger) do
format_issue(
issue_meta,
message: "Map.get with no default return value is potentially unsafe
in pipes, use Map.get/3 instead",
trigger: trigger,
line_no: line_no
)
end
end
|
lib/credo/check/warning/map_get_unsafe_pass.ex
| 0.827759 | 0.422386 |
map_get_unsafe_pass.ex
|
starcoder
|
defmodule AWS.DeviceFarm do
@moduledoc """
Welcome to the AWS Device Farm API documentation, which contains APIs for:
<ul> <li> Testing on desktop browsers
Device Farm makes it possible for you to test your web applications on
desktop browsers using Selenium. The APIs for desktop browser testing
contain `TestGrid` in their names. For more information, see [Testing Web
Applications on Selenium with Device
Farm](https://docs.aws.amazon.com/devicefarm/latest/testgrid/).
</li> <li> Testing on real mobile devices
Device Farm makes it possible for you to test apps on physical phones,
tablets, and other devices in the cloud. For more information, see the
[Device Farm Developer
Guide](https://docs.aws.amazon.com/devicefarm/latest/developerguide/).
</li> </ul>
"""
@doc """
Creates a device pool.
"""
def create_device_pool(client, input, options \\ []) do
request(client, "CreateDevicePool", input, options)
end
@doc """
Creates a profile that can be applied to one or more private fleet device
instances.
"""
def create_instance_profile(client, input, options \\ []) do
request(client, "CreateInstanceProfile", input, options)
end
@doc """
Creates a network profile.
"""
def create_network_profile(client, input, options \\ []) do
request(client, "CreateNetworkProfile", input, options)
end
@doc """
Creates a project.
"""
def create_project(client, input, options \\ []) do
request(client, "CreateProject", input, options)
end
@doc """
Specifies and starts a remote access session.
"""
def create_remote_access_session(client, input, options \\ []) do
request(client, "CreateRemoteAccessSession", input, options)
end
@doc """
Creates a Selenium testing project. Projects are used to track
`TestGridSession` instances.
"""
def create_test_grid_project(client, input, options \\ []) do
request(client, "CreateTestGridProject", input, options)
end
@doc """
Creates a signed, short-term URL that can be passed to a Selenium
`RemoteWebDriver` constructor.
"""
def create_test_grid_url(client, input, options \\ []) do
request(client, "CreateTestGridUrl", input, options)
end
@doc """
Uploads an app or test scripts.
"""
def create_upload(client, input, options \\ []) do
request(client, "CreateUpload", input, options)
end
@doc """
Creates a configuration record in Device Farm for your Amazon Virtual
Private Cloud (VPC) endpoint.
"""
def create_v_p_c_e_configuration(client, input, options \\ []) do
request(client, "CreateVPCEConfiguration", input, options)
end
@doc """
Deletes a device pool given the pool ARN. Does not allow deletion of
curated pools owned by the system.
"""
def delete_device_pool(client, input, options \\ []) do
request(client, "DeleteDevicePool", input, options)
end
@doc """
Deletes a profile that can be applied to one or more private device
instances.
"""
def delete_instance_profile(client, input, options \\ []) do
request(client, "DeleteInstanceProfile", input, options)
end
@doc """
Deletes a network profile.
"""
def delete_network_profile(client, input, options \\ []) do
request(client, "DeleteNetworkProfile", input, options)
end
@doc """
Deletes an AWS Device Farm project, given the project ARN.
Deleting this resource does not stop an in-progress run.
"""
def delete_project(client, input, options \\ []) do
request(client, "DeleteProject", input, options)
end
@doc """
Deletes a completed remote access session and its results.
"""
def delete_remote_access_session(client, input, options \\ []) do
request(client, "DeleteRemoteAccessSession", input, options)
end
@doc """
Deletes the run, given the run ARN.
Deleting this resource does not stop an in-progress run.
"""
def delete_run(client, input, options \\ []) do
request(client, "DeleteRun", input, options)
end
@doc """
Deletes a Selenium testing project and all content generated under it.
<important> You cannot undo this operation.
</important> <note> You cannot delete a project if it has active sessions.
</note>
"""
def delete_test_grid_project(client, input, options \\ []) do
request(client, "DeleteTestGridProject", input, options)
end
@doc """
Deletes an upload given the upload ARN.
"""
def delete_upload(client, input, options \\ []) do
request(client, "DeleteUpload", input, options)
end
@doc """
Deletes a configuration for your Amazon Virtual Private Cloud (VPC)
endpoint.
"""
def delete_v_p_c_e_configuration(client, input, options \\ []) do
request(client, "DeleteVPCEConfiguration", input, options)
end
@doc """
Returns the number of unmetered iOS or unmetered Android devices that have
been purchased by the account.
"""
def get_account_settings(client, input, options \\ []) do
request(client, "GetAccountSettings", input, options)
end
@doc """
Gets information about a unique device type.
"""
def get_device(client, input, options \\ []) do
request(client, "GetDevice", input, options)
end
@doc """
Returns information about a device instance that belongs to a private
device fleet.
"""
def get_device_instance(client, input, options \\ []) do
request(client, "GetDeviceInstance", input, options)
end
@doc """
Gets information about a device pool.
"""
def get_device_pool(client, input, options \\ []) do
request(client, "GetDevicePool", input, options)
end
@doc """
Gets information about compatibility with a device pool.
"""
def get_device_pool_compatibility(client, input, options \\ []) do
request(client, "GetDevicePoolCompatibility", input, options)
end
@doc """
Returns information about the specified instance profile.
"""
def get_instance_profile(client, input, options \\ []) do
request(client, "GetInstanceProfile", input, options)
end
@doc """
Gets information about a job.
"""
def get_job(client, input, options \\ []) do
request(client, "GetJob", input, options)
end
@doc """
Returns information about a network profile.
"""
def get_network_profile(client, input, options \\ []) do
request(client, "GetNetworkProfile", input, options)
end
@doc """
Gets the current status and future status of all offerings purchased by an
AWS account. The response indicates how many offerings are currently
available and the offerings that will be available in the next period. The
API returns a `NotEligible` error if the user is not permitted to invoke
the operation. If you must be able to invoke this operation, contact
[<EMAIL>](mailto:<EMAIL>).
"""
def get_offering_status(client, input, options \\ []) do
request(client, "GetOfferingStatus", input, options)
end
@doc """
Gets information about a project.
"""
def get_project(client, input, options \\ []) do
request(client, "GetProject", input, options)
end
@doc """
Returns a link to a currently running remote access session.
"""
def get_remote_access_session(client, input, options \\ []) do
request(client, "GetRemoteAccessSession", input, options)
end
@doc """
Gets information about a run.
"""
def get_run(client, input, options \\ []) do
request(client, "GetRun", input, options)
end
@doc """
Gets information about a suite.
"""
def get_suite(client, input, options \\ []) do
request(client, "GetSuite", input, options)
end
@doc """
Gets information about a test.
"""
def get_test(client, input, options \\ []) do
request(client, "GetTest", input, options)
end
@doc """
Retrieves information about a Selenium testing project.
"""
def get_test_grid_project(client, input, options \\ []) do
request(client, "GetTestGridProject", input, options)
end
@doc """
A session is an instance of a browser created through a `RemoteWebDriver`
with the URL from `CreateTestGridUrlResult$url`. You can use the following
to look up sessions:
<ul> <li> The session ARN (`GetTestGridSessionRequest$sessionArn`).
</li> <li> The project ARN and a session ID
(`GetTestGridSessionRequest$projectArn` and
`GetTestGridSessionRequest$sessionId`).
</li> </ul> <p/>
"""
def get_test_grid_session(client, input, options \\ []) do
request(client, "GetTestGridSession", input, options)
end
@doc """
Gets information about an upload.
"""
def get_upload(client, input, options \\ []) do
request(client, "GetUpload", input, options)
end
@doc """
Returns information about the configuration settings for your Amazon
Virtual Private Cloud (VPC) endpoint.
"""
def get_v_p_c_e_configuration(client, input, options \\ []) do
request(client, "GetVPCEConfiguration", input, options)
end
@doc """
Installs an application to the device in a remote access session. For
Android applications, the file must be in .apk format. For iOS
applications, the file must be in .ipa format.
"""
def install_to_remote_access_session(client, input, options \\ []) do
request(client, "InstallToRemoteAccessSession", input, options)
end
@doc """
Gets information about artifacts.
"""
def list_artifacts(client, input, options \\ []) do
request(client, "ListArtifacts", input, options)
end
@doc """
Returns information about the private device instances associated with one
or more AWS accounts.
"""
def list_device_instances(client, input, options \\ []) do
request(client, "ListDeviceInstances", input, options)
end
@doc """
Gets information about device pools.
"""
def list_device_pools(client, input, options \\ []) do
request(client, "ListDevicePools", input, options)
end
@doc """
Gets information about unique device types.
"""
def list_devices(client, input, options \\ []) do
request(client, "ListDevices", input, options)
end
@doc """
Returns information about all the instance profiles in an AWS account.
"""
def list_instance_profiles(client, input, options \\ []) do
request(client, "ListInstanceProfiles", input, options)
end
@doc """
Gets information about jobs for a given test run.
"""
def list_jobs(client, input, options \\ []) do
request(client, "ListJobs", input, options)
end
@doc """
Returns the list of available network profiles.
"""
def list_network_profiles(client, input, options \\ []) do
request(client, "ListNetworkProfiles", input, options)
end
@doc """
Returns a list of offering promotions. Each offering promotion record
contains the ID and description of the promotion. The API returns a
`NotEligible` error if the caller is not permitted to invoke the operation.
Contact
[<EMAIL>](mailto:<EMAIL>)
if you must be able to invoke this operation.
"""
def list_offering_promotions(client, input, options \\ []) do
request(client, "ListOfferingPromotions", input, options)
end
@doc """
Returns a list of all historical purchases, renewals, and system renewal
transactions for an AWS account. The list is paginated and ordered by a
descending timestamp (most recent transactions are first). The API returns
a `NotEligible` error if the user is not permitted to invoke the operation.
If you must be able to invoke this operation, contact
[<EMAIL>](mailto:<EMAIL>).
"""
def list_offering_transactions(client, input, options \\ []) do
request(client, "ListOfferingTransactions", input, options)
end
@doc """
Returns a list of products or offerings that the user can manage through
the API. Each offering record indicates the recurring price per unit and
the frequency for that offering. The API returns a `NotEligible` error if
the user is not permitted to invoke the operation. If you must be able to
invoke this operation, contact
[<EMAIL>](mailto:<EMAIL>@amazon.com).
"""
def list_offerings(client, input, options \\ []) do
request(client, "ListOfferings", input, options)
end
@doc """
Gets information about projects.
"""
def list_projects(client, input, options \\ []) do
request(client, "ListProjects", input, options)
end
@doc """
Returns a list of all currently running remote access sessions.
"""
def list_remote_access_sessions(client, input, options \\ []) do
request(client, "ListRemoteAccessSessions", input, options)
end
@doc """
Gets information about runs, given an AWS Device Farm project ARN.
"""
def list_runs(client, input, options \\ []) do
request(client, "ListRuns", input, options)
end
@doc """
Gets information about samples, given an AWS Device Farm job ARN.
"""
def list_samples(client, input, options \\ []) do
request(client, "ListSamples", input, options)
end
@doc """
Gets information about test suites for a given job.
"""
def list_suites(client, input, options \\ []) do
request(client, "ListSuites", input, options)
end
@doc """
List the tags for an AWS Device Farm resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Gets a list of all Selenium testing projects in your account.
"""
def list_test_grid_projects(client, input, options \\ []) do
request(client, "ListTestGridProjects", input, options)
end
@doc """
Returns a list of the actions taken in a `TestGridSession`.
"""
def list_test_grid_session_actions(client, input, options \\ []) do
request(client, "ListTestGridSessionActions", input, options)
end
@doc """
Retrieves a list of artifacts created during the session.
"""
def list_test_grid_session_artifacts(client, input, options \\ []) do
request(client, "ListTestGridSessionArtifacts", input, options)
end
@doc """
Retrieves a list of sessions for a `TestGridProject`.
"""
def list_test_grid_sessions(client, input, options \\ []) do
request(client, "ListTestGridSessions", input, options)
end
@doc """
Gets information about tests in a given test suite.
"""
def list_tests(client, input, options \\ []) do
request(client, "ListTests", input, options)
end
@doc """
Gets information about unique problems, such as exceptions or crashes.
Unique problems are defined as a single instance of an error across a run,
job, or suite. For example, if a call in your application consistently
raises an exception (`OutOfBoundsException in MyActivity.java:386`),
`ListUniqueProblems` returns a single entry instead of many individual
entries for that exception.
"""
def list_unique_problems(client, input, options \\ []) do
request(client, "ListUniqueProblems", input, options)
end
@doc """
Gets information about uploads, given an AWS Device Farm project ARN.
"""
def list_uploads(client, input, options \\ []) do
request(client, "ListUploads", input, options)
end
@doc """
Returns information about all Amazon Virtual Private Cloud (VPC) endpoint
configurations in the AWS account.
"""
def list_v_p_c_e_configurations(client, input, options \\ []) do
request(client, "ListVPCEConfigurations", input, options)
end
@doc """
Immediately purchases offerings for an AWS account. Offerings renew with
the latest total purchased quantity for an offering, unless the renewal was
overridden. The API returns a `NotEligible` error if the user is not
permitted to invoke the operation. If you must be able to invoke this
operation, contact
[<EMAIL>](mailto:<EMAIL>).
"""
def purchase_offering(client, input, options \\ []) do
request(client, "PurchaseOffering", input, options)
end
@doc """
Explicitly sets the quantity of devices to renew for an offering, starting
from the `effectiveDate` of the next period. The API returns a
`NotEligible` error if the user is not permitted to invoke the operation.
If you must be able to invoke this operation, contact
[<EMAIL>](mailto:<EMAIL>).
"""
def renew_offering(client, input, options \\ []) do
request(client, "RenewOffering", input, options)
end
@doc """
Schedules a run.
"""
def schedule_run(client, input, options \\ []) do
request(client, "ScheduleRun", input, options)
end
@doc """
Initiates a stop request for the current job. AWS Device Farm immediately
stops the job on the device where tests have not started. You are not
billed for this device. On the device where tests have started, setup suite
and teardown suite tests run to completion on the device. You are billed
for setup, teardown, and any tests that were in progress or already
completed.
"""
def stop_job(client, input, options \\ []) do
request(client, "StopJob", input, options)
end
@doc """
Ends a specified remote access session.
"""
def stop_remote_access_session(client, input, options \\ []) do
request(client, "StopRemoteAccessSession", input, options)
end
@doc """
Initiates a stop request for the current test run. AWS Device Farm
immediately stops the run on devices where tests have not started. You are
not billed for these devices. On devices where tests have started
executing, setup suite and teardown suite tests run to completion on those
devices. You are billed for setup, teardown, and any tests that were in
progress or already completed.
"""
def stop_run(client, input, options \\ []) do
request(client, "StopRun", input, options)
end
@doc """
Associates the specified tags to a resource with the specified
`resourceArn`. If existing tags on a resource are not specified in the
request parameters, they are not changed. When a resource is deleted, the
tags associated with that resource are also deleted.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Deletes the specified tags from a resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates information about a private device instance.
"""
def update_device_instance(client, input, options \\ []) do
request(client, "UpdateDeviceInstance", input, options)
end
@doc """
Modifies the name, description, and rules in a device pool given the
attributes and the pool ARN. Rule updates are all-or-nothing, meaning they
can only be updated as a whole (or not at all).
"""
def update_device_pool(client, input, options \\ []) do
request(client, "UpdateDevicePool", input, options)
end
@doc """
Updates information about an existing private device instance profile.
"""
def update_instance_profile(client, input, options \\ []) do
request(client, "UpdateInstanceProfile", input, options)
end
@doc """
Updates the network profile.
"""
def update_network_profile(client, input, options \\ []) do
request(client, "UpdateNetworkProfile", input, options)
end
@doc """
Modifies the specified project name, given the project ARN and a new name.
"""
def update_project(client, input, options \\ []) do
request(client, "UpdateProject", input, options)
end
@doc """
Change details of a project.
"""
def update_test_grid_project(client, input, options \\ []) do
request(client, "UpdateTestGridProject", input, options)
end
@doc """
Updates an uploaded test spec.
"""
def update_upload(client, input, options \\ []) do
request(client, "UpdateUpload", input, options)
end
@doc """
Updates information about an Amazon Virtual Private Cloud (VPC) endpoint
configuration.
"""
def update_v_p_c_e_configuration(client, input, options \\ []) do
request(client, "UpdateVPCEConfiguration", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "devicefarm"}
host = build_host("devicefarm", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "DeviceFarm_20150623.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/device_farm.ex
| 0.770162 | 0.545346 |
device_farm.ex
|
starcoder
|
defmodule Hangman.Reduction.Engine.Worker do
@moduledoc """
Module implements workers that handle `Hangman` words reduction.
Used primarily by `Reduction.Engine` through `reduce_and_store/4` to perform
a series of steps:
* Retrieves `pass` data from `Pass.Cache`.
* Reduces word set based on `reduce_key`.
* Stores reduced set back into `Pass.Cache`.
* Returns new `Pass`.
"""
use GenServer
alias Hangman.{Pass, Words}
require Logger
@doc """
GenServer start_link wrapper function
"""
@spec start_link(pos_integer) :: GenServer.on_start()
def start_link(worker_id) do
_ = Logger.debug("Starting Engine Reduce Worker #{worker_id}")
args = {}
options = [name: via_tuple(worker_id)]
GenServer.start_link(__MODULE__, args, options)
end
@doc """
Primary `worker` function which retrieves current `pass words` data,
filters words with `regex`, tallies reduced word stream, creates new
`Words` abstraction and stores it back into words pass table.
If pass size happens to be small enough, will also return
remaining `Hangman` possible words left to aid in `guess` selection.
Returns `pass`. Method is serialized.
"""
@spec reduce_and_store(pos_integer, Pass.key(), Regex.t(), map) :: Pass.t()
def reduce_and_store(worker_id, pass_key, regex_key, %MapSet{} = exc) do
l = [worker_id, pass_key, regex_key, exc]
_ =
Logger.debug(
"reduction engine worker #{worker_id}, " <> "reduce and store, args #{inspect(l)}"
)
GenServer.call(via_tuple(worker_id), {:reduce_and_store, pass_key, regex_key, exc})
end
# Used to register / lookup process in process registry via gproc
@spec via_tuple(pos_integer) :: tuple
defp via_tuple(worker_id) do
{:via, :gproc, {:n, :l, {:reduction_engine_worker, worker_id}}}
end
@doc """
Terminate callback
No special cleanup
"""
@callback terminate(term, term) :: :ok
def terminate(_reason, _state) do
_ = Logger.debug("Terminating Reduction Engine Worker Server")
:ok
end
# GenServer callback function to handle reduce and store request
# @callback handle_call(atom, Pass.key, Regex.t, MapSet.t, term, tuple) :: tuple
def handle_call({:reduce_and_store, pass_key, regex_key, exclusion}, _from, {}) do
pass_info = do_reduce_and_store(pass_key, regex_key, exclusion)
{:reply, pass_info, {}}
end
# Primary worker function which retrieves current pass words data,
# filters words with regex.
# Takes reduced word set and tallies it, creates new
# Chunk abstraction and stores it back into words pass table.
# If pass size happens to be small enough, will also return
# remaining hangman possible words left to aid in guess selection.
# Returns pass metadata.
@spec do_reduce_and_store(Pass.key(), Regex.t(), Enumerable.t()) :: Pass.t()
defp do_reduce_and_store(pass_key, regex_key, exclusion) do
# Request word list data from Pass
data = %Words{} = Pass.Reduction.words(pass_key)
# REDUCE
# Create new Words abstraction after filtering out failed word matches
new_data = %Words{} = data |> Words.filter(regex_key)
# STORE
# Write to cache
receipt = %Pass{} = Pass.Reduction.store(pass_key, new_data, exclusion)
# Return pass receipt metadata
receipt
end
end
|
lib/hangman/reduction_engine_worker.ex
| 0.827793 | 0.487673 |
reduction_engine_worker.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.