code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Cldr.DateTime do
@moduledoc """
Provides an API for the localization and formatting of a `DateTime`
struct or any map with the keys `:year`, `:month`,
`:day`, `:calendar`, `:hour`, `:minute`, `:second` and optionally `:microsecond`.
`Cldr.DateTime` provides support for the built-in calendar
`Calendar.ISO`. Use of other calendars may not produce
the expected results.
CLDR provides standard format strings for `DateTime` which
are reresented by the names `:short`, `:medium`, `:long`
and `:full`. This allows for locale-independent
formatting since each locale will define the underlying
format string as appropriate.
"""
require Cldr
alias Cldr.DateTime.{Format, Formatter}
alias Cldr.LanguageTag
@format_types [:short, :medium, :long, :full]
defmodule Formats do
defstruct Module.get_attribute(Cldr.DateTime, :format_types)
end
@doc """
Formats a DateTime according to a format string
as defined in CLDR and described in [TR35](http://unicode.org/reports/tr35/tr35-dates.html)
## Arguments
* `datetime` is a `%DateTime{}` `or %NaiveDateTime{}`struct or any map that contains the keys
`:year`, `:month`, `:day`, `:calendar`. `:hour`, `:minute` and `:second` with optional
`:microsecond`.
* `options` is a keyword list of options for formatting.
## Options
* `format:` `:short` | `:medium` | `:long` | `:full` or a format string or
any of the keys returned by `Cldr.DateTime.available_format_names`.
The default is `:medium`
* `locale` is any valid locale name returned by `Cldr.known_locale_names/0`
or a `Cldr.LanguageTag` struct. The default is `Cldr.get_current_locale/0`
* `number_system:` a number system into which the formatted date digits should
be transliterated
* `era: :variant` will use a variant for the era is one is available in the locale.
In the "en" for example, the locale `era: :variant` will return "BCE" instead of "BC".
* `period: :variant` will use a variant for the time period and flexible time period if
one is available in the locale. For example, in the "en" locale `period: :variant` will
return "pm" instead of "PM"
## Returns
* `{:ok, formatted_datetime}` or
* `{:error, reason}`
## Examples
iex> {:ok, datetime} = DateTime.from_naive(~N[2000-01-01 23:59:59.0], "Etc/UTC")
iex> Cldr.DateTime.to_string datetime, locale: "en"
{:ok, "Jan 1, 2000, 11:59:59 PM"}
iex> Cldr.DateTime.to_string datetime, format: :long, locale: "en"
{:ok, "January 1, 2000 at 11:59:59 PM UTC"}
iex> Cldr.DateTime.to_string datetime, format: :hms, locale: "en"
{:ok, "23:59:59"}
iex> Cldr.DateTime.to_string datetime, format: :full, locale: "en"
{:ok, "Saturday, January 1, 2000 at 11:59:59 PM GMT"}
iex> Cldr.DateTime.to_string datetime, format: :full, locale: "fr"
{:ok, "samedi 1 janvier 2000 à 23:59:59 UTC"}
"""
def to_string(date, options \\ [])
def to_string(
%{
year: _year,
month: _month,
day: _day,
hour: _hour,
minute: _minute,
second: _second,
calendar: calendar
} = datetime,
options
) do
options = Keyword.merge(default_options(), options)
with {:ok, locale} <- Cldr.validate_locale(options[:locale]),
{:ok, cldr_calendar} <- Formatter.type_from_calendar(calendar),
{:ok, format_string} <-
format_string_from_format(options[:format], locale, cldr_calendar),
{:ok, formatted} <- Formatter.format(datetime, format_string, locale, options) do
{:ok, formatted}
else
{:error, reason} -> {:error, reason}
end
end
def to_string(datetime, _options) do
error_return(datetime, [:year, :month, :day, :hour, :minute, :second, :calendar])
end
defp default_options do
[format: :medium, locale: Cldr.get_current_locale()]
end
@doc """
Formats a DateTime according to a format string
as defined in CLDR and described in [TR35](http://unicode.org/reports/tr35/tr35-dates.html)
## Arguments
* `datetime` is a `%DateTime{}` `or %NaiveDateTime{}`struct or any map that contains the keys
`:year`, `:month`, `:day`, `:calendar`. `:hour`, `:minute` and `:second` with optional
`:microsecond`.
* `options` is a keyword list of options for formatting.
## Options
* `format:` `:short` | `:medium` | `:long` | `:full` or a format string or
any of the keys returned by `Cldr.DateTime.available_format_names` or a format string.
The default is `:medium`
* `locale` is any valid locale name returned by `Cldr.known_locale_names/0`
or a `Cldr.LanguageTag` struct. The default is `Cldr.get_current_locale/0`
* `number_system:` a number system into which the formatted date digits should
be transliterated
* `era: :variant` will use a variant for the era is one is available in the locale.
In the "en" for example, the locale `era: :variant` will return "BCE" instead of "BC".
* `period: :variant` will use a variant for the time period and flexible time period if
one is available in the locale. For example, in the "en" locale `period: :variant` will
return "pm" instead of "PM"
## Returns
* `formatted_datetime` or
* raises an exception
## Examples
iex> {:ok, datetime} = DateTime.from_naive(~N[2000-01-01 23:59:59.0], "Etc/UTC")
iex> Cldr.DateTime.to_string! datetime, locale: "en"
"Jan 1, 2000, 11:59:59 PM"
iex> Cldr.DateTime.to_string! datetime, format: :long, locale: "en"
"January 1, 2000 at 11:59:59 PM UTC"
iex> Cldr.DateTime.to_string! datetime, format: :full, locale: "en"
"Saturday, January 1, 2000 at 11:59:59 PM GMT"
iex> Cldr.DateTime.to_string! datetime, format: :full, locale: "fr"
"samedi 1 janvier 2000 à 23:59:59 UTC"
"""
def to_string!(date_time, options \\ [])
def to_string!(date_time, options) do
case to_string(date_time, options) do
{:ok, string} -> string
{:error, {exception, message}} -> raise exception, message
end
end
# Standard format
defp format_string_from_format(format, %LanguageTag{cldr_locale_name: locale_name}, calendar)
when format in @format_types do
with {:ok, formats} <- Format.date_time_formats(locale_name, calendar) do
{:ok, Map.get(formats, format)}
end
end
# Look up for the format in :available_formats
defp format_string_from_format(format, %LanguageTag{cldr_locale_name: locale_name}, calendar)
when is_atom(format) do
with {:ok, formats} <- Format.date_time_available_formats(locale_name, calendar),
format_string <- Map.get(formats, format) do
if format_string do
{:ok, format_string}
else
{:error,
{Cldr.InvalidDateTimeFormatType,
"Invalid datetime format type #{inspect(format)}. " <>
"The valid types are #{inspect(formats)}."}}
end
end
end
# Format with a number system
defp format_string_from_format(
%{number_system: number_system, format: format},
locale,
calendar
) do
{:ok, format_string} = format_string_from_format(format, locale, calendar)
{:ok, %{number_system: number_system, format: format_string}}
end
# Straight up format string
defp format_string_from_format(format_string, _locale, _calendar)
when is_binary(format_string) do
{:ok, format_string}
end
defp error_return(map, requirements) do
{:error,
{ArgumentError,
"Invalid date_time. Date_time is a map that requires at least #{inspect(requirements)} fields. " <>
"Found: #{inspect(map)}"}}
end
end
|
lib/cldr/datetime/datetime.ex
| 0.936894 | 0.748812 |
datetime.ex
|
starcoder
|
if Code.ensure_loaded?(Phoenix) do
defmodule Versioning.View do
@moduledoc """
A set of functions used with `Phoenix` views.
Typically, this module should be imported into your view modules. In a normal
phoenix application, this can usually be done with the following:
defmodule YourAppWeb do
# ...
def view do
quote do
use Phoenix.View, root: "lib/your_app_web/templates", namespace: "web"
# ...
import Versioning.View
# ...
end
end
end
Please see the documentation at `Phoenix.View` for details on how to set up
a typical view.
In places that you would use `Phoenix.View.render_one/4`, this module provides
`render_version/6`. In places that you would use `Phoenix.View.render_many/4`,
this module provides `render_versions/6`.
In order to use these functions, you must already have applied the schema and
requested version to the conn. This is typically done with `Versioning.Plug`
or through the helpers available in `Versioning.Controller`.
## Example
Below is an example of how to use versioning in a typical view:
defmodule YourApp.UserView do
use YourApp.View
def render("index.json", %{conn: conn, users: users}) do
%{
"users" => render_versions(conn, users, "User", UserView, "user.json"),
}
end
def render("show.json", %{conn: conn, users: users}) do
%{
"user" => render_version(conn, users, "User", UserView, "user.json"),
}
end
def render("user.json", %{user: user}) do
%{"name" => user.name, "address" => user.address}
end
end
A typical call, such as:
render_many(users, UserView, "user.json")
Is replaced by the following:
render_versions(conn, users, "User", UserView, "user.json")
In order to render versions of our data, we must pass the conn struct, our
data to be versioned, the type the data represents in our schema, the view
module to use, the template to use, as well as an additional assigns.
The contents of the "user.json" template represent the latest version of your
data. They will be run through your versioning schema to the version requested
by the user. The output returned by your schema is what will be finally
rendered.
"""
@doc """
Renders a versioned collection.
A collection is any enumerable of structs. This function returns the
rendered versioned collection in a list:
render_versions(conn, users, "User", UserView, "show.json")
Under the hood, this will render each item using `Phoenix.View.render/3` - so
the latest version of the data should be represented in your view using typical
view standards.
After the data has been rendered, it will be passed to your schema and
versioned to the version that has been requested.
"""
@spec render_versions(Plug.Conn.t(), list(), binary(), module(), binary(), map()) :: [any()]
def render_versions(conn, collection, type, view, template, assigns \\ %{}) do
Enum.map(collection, fn resource ->
data = Phoenix.View.render_one(resource, view, template, assigns)
do_versioning(conn, data, type)
end)
end
@doc """
Renders a single versioned item if not nil.
render_version(conn, user, "User", UserView, "show.json")
This require
Under the hood, this will render the item using `Phoenix.View.render/3` - so
the latest version of the data should be represented in your view using typical
view standards.
After the data has been rendered, it will be passed to your schema and
versioned to requested target version.
"""
@spec render_version(Plug.Conn.t(), any(), binary(), module(), binary(), map()) :: any()
def render_version(conn, resource, type, view, template, assigns \\ %{})
def render_version(_conn, _type, nil, _view, _template, _assigns), do: nil
def render_version(conn, resource, type, view, template, assigns) do
data = Phoenix.View.render_one(resource, view, template, assigns)
do_versioning(conn, data, type)
end
defp do_versioning(conn, data, type) do
{schema, current, target} = get_versioning(conn)
versioning = Versioning.new(data, current, target, type)
case schema.run(versioning) do
{:ok, versioning} -> versioning.data
{:error, error} -> raise error
end
end
defp get_versioning(conn) do
schema = Versioning.Controller.fetch_schema!(conn)
current = schema.__schema__(:latest, :string)
target = Versioning.Controller.fetch_version!(conn)
{schema, current, target}
end
end
end
|
lib/versioning/view.ex
| 0.823754 | 0.617959 |
view.ex
|
starcoder
|
defmodule Game.Grid do
@moduledoc false
alias __MODULE__
alias Game.{Cell, Coordinate, Tile}
@enforce_keys [:cells, :columns, :rows, :tiles]
@columns 1..4
@rows 1..4
@vectors %{
up: %{x: 0, y: -1},
down: %{x: 0, y: 1},
left: %{x: -1, y: 0},
right: %{x: 1, y: 0}
}
defstruct [:cells, :columns, :rows, :tiles]
# ======================================================================================
# Public
# ======================================================================================
def contains_combinable_cells?(%Grid{cells: cells} = grid) do
Enum.any?(cells, fn
%Cell{coordinate: %Coordinate{x: x, y: y}, tile: %Tile{value: value}} ->
Enum.any?(@vectors, fn {_direction, vector} ->
adjacent_coordinate = Coordinate.new(x + vector.x, y + vector.y)
adjacent_tile = Tile.at_coordinate(grid, adjacent_coordinate)
adjacent_tile && adjacent_tile.value === value
end)
_ ->
false
end)
end
def contains_empty_cells?(%Grid{} = grid) do
grid
|> list_empty_cells()
|> Enum.any?()
end
def farthest_empty_cell(%Grid{} = grid, %Cell{coordinate: coordinate} = cell, direction) do
next_coordinate = Coordinate.new(coordinate.x + @vectors[direction].x, coordinate.y + @vectors[direction].y)
next_cell = Cell.at_coordinate(grid, next_coordinate)
obstruction? = is_nil(next_cell) or not is_nil(next_cell.tile)
case obstruction? do
false -> farthest_empty_cell(grid, next_cell, direction)
true -> %{farthest: cell, next: next_cell}
end
end
def list_empty_cells(%Grid{cells: cells}) do
cells |> Enum.filter(&is_nil(&1.tile))
end
def list_occupied_cells(%Grid{cells: cells}) do
cells |> Enum.reject(&is_nil(&1.tile))
end
def moves_available?(grid) do
contains_empty_cells?(grid) or contains_combinable_cells?(grid)
end
def new() do
cells =
for row <- @rows, column <- @columns do
Coordinate.new(column, row) |> Cell.new()
end
%Grid{cells: cells, columns: @columns, rows: @rows, tiles: %{}}
end
end
|
lib/game/grid.ex
| 0.804252 | 0.703014 |
grid.ex
|
starcoder
|
defmodule ExWire.Packet.Capability.Par.SnapshotManifest do
@moduledoc """
Respond to a GetSnapshotManifest message with either an empty RLP list or a
1-item RLP list containing a snapshot manifest
```
`SnapshotManifest` [`0x12`, `manifest` or nothing]
```
"""
import Exth, only: [maybe_decode_unsigned: 1]
@behaviour ExWire.Packet
defmodule Manifest do
@moduledoc """
A Manifest from a warp-sync peer.
version: snapshot format version. Must be set to 2.
state_hashes: a list of all the state chunks in this snapshot
block_hashes: a list of all the block chunks in this snapshot
state_root: the root which the rebuilt state trie should have. Used to ensure validity
block_number: the number of the best block in the snapshot; the one which the state coordinates to.
block_hash: the best block in the snapshot's hash.
"""
defstruct [
:version,
:state_hashes,
:block_hashes,
:state_root,
:block_number,
:block_hash
]
end
@type manifest :: %Manifest{
version: integer(),
state_hashes: list(EVM.hash()),
block_hashes: list(EVM.hash()),
state_root: binary(),
block_number: integer(),
block_hash: EVM.hash()
}
@type t :: %__MODULE__{
manifest: manifest() | nil
}
defstruct manifest: nil
@doc """
Returns the relative message id offset for this message.
This will help determine what its message ID is relative to other Packets in the same Capability.
"""
@impl true
@spec message_id_offset() :: 0x12
def message_id_offset do
0x12
end
@doc """
Given a SnapshotManifest packet, serializes for transport over Eth Wire Protocol.
## Examples
iex> %ExWire.Packet.Capability.Par.SnapshotManifest{manifest: nil}
...> |> ExWire.Packet.Capability.Par.SnapshotManifest.serialize()
[]
iex> %ExWire.Packet.Capability.Par.SnapshotManifest{
...> manifest: %ExWire.Packet.Capability.Par.SnapshotManifest.Manifest{
...> version: 2,
...> state_hashes: [<<1::256>>, <<2::256>>],
...> block_hashes: [<<3::256>>, <<4::256>>],
...> state_root: <<5::256>>,
...> block_number: 6,
...> block_hash: <<7::256>>
...> }
...> }
...> |> ExWire.Packet.Capability.Par.SnapshotManifest.serialize()
[2, [<<1::256>>, <<2::256>>], [<<3::256>>, <<4::256>>], <<5::256>>, 6, <<7::256>>]
"""
@impl true
def serialize(_packet = %__MODULE__{manifest: nil}), do: []
def serialize(_packet = %__MODULE__{manifest: manifest}) do
[
manifest.version,
manifest.state_hashes,
manifest.block_hashes,
manifest.state_root,
manifest.block_number,
manifest.block_hash
]
end
@doc """
Given an RLP-encoded SnapshotManifest packet from Eth Wire Protocol,
decodes into a SnapshotManifest struct.
## Examples
iex> ExWire.Packet.Capability.Par.SnapshotManifest.deserialize([])
%ExWire.Packet.Capability.Par.SnapshotManifest{manifest: nil}
iex> ExWire.Packet.Capability.Par.SnapshotManifest.deserialize([[2, [<<1::256>>, <<2::256>>], [<<3::256>>, <<4::256>>], <<5::256>>, 6, <<7::256>>]])
%ExWire.Packet.Capability.Par.SnapshotManifest{
manifest: %ExWire.Packet.Capability.Par.SnapshotManifest.Manifest{
version: 2,
state_hashes: [<<1::256>>, <<2::256>>],
block_hashes: [<<3::256>>, <<4::256>>],
state_root: <<5::256>>,
block_number: 6,
block_hash: <<7::256>>
}
}
iex> ExWire.Packet.Capability.Par.SnapshotManifest.deserialize([[3, [<<1::256>>, <<2::256>>], [<<3::256>>, <<4::256>>], <<5::256>>, 6, <<7::256>>]])
** (MatchError) no match of right hand side value: 3
"""
@impl true
def deserialize([]), do: %__MODULE__{manifest: nil}
def deserialize([rlp]) do
[
version,
state_hashes,
block_hashes,
state_root,
block_number,
block_hash
] = rlp
%__MODULE__{
manifest: %Manifest{
version: 2 = maybe_decode_unsigned(version),
state_hashes: state_hashes,
block_hashes: block_hashes,
state_root: state_root,
block_number: maybe_decode_unsigned(block_number),
block_hash: block_hash
}
}
end
@doc """
Handles a SnapshotManifest message. We should send our manifest
to the peer. For now, we'll do nothing.
## Examples
iex> %ExWire.Packet.Capability.Par.SnapshotManifest{}
...> |> ExWire.Packet.Capability.Par.SnapshotManifest.handle()
:ok
"""
@impl true
def handle(_packet = %__MODULE__{}) do
# TODO: Respond with empty manifest
:ok
end
end
|
apps/ex_wire/lib/ex_wire/packet/capability/par/snapshot_manifest.ex
| 0.815233 | 0.755005 |
snapshot_manifest.ex
|
starcoder
|
defmodule BitcrowdEcto.Assertions do
@moduledoc """
Useful little test assertions related to `t:Ecto.Changeset.t/0`.
## Example
Import this in your `ExUnit.CaseTemplate`:
defmodule MyApp.TestCase do
use ExUnit.CaseTemplate
using do
quote do
import Ecto
import Ecto.Changeset
import Ecto.Query
import BitcrowdEcto.Assertions
end
end
end
"""
@moduledoc since: "0.1.0"
import ExUnit.Assertions
alias Ecto.Changeset
@doc """
A better error helper that transforms the errors on a given field into a list of
`[<message>, <value of the :validation metadata field>]`.
If multiple validations failed, the list will contain more elements! That simple.
## Metadata
By default, `flat_errors_on/2` extracts metadata from the `:validation` and `:constraint` keys,
as those are were Ecto stores its metadata. Custom metadata at different keys can be extracted
using the `:metadata` option.
"""
@doc since: "0.1.0"
@spec flat_errors_on(Changeset.t(), atom) :: [String.t() | atom]
@spec flat_errors_on(Changeset.t(), atom, [{:metadata, atom}]) :: [String.t() | atom]
def flat_errors_on(changeset, field, opts \\ []) do
metadata =
opts
|> Keyword.get(:metadata, [:constraint, :validation])
|> List.wrap()
changeset.errors
|> Keyword.get_values(field)
|> Enum.flat_map(fn {msg, opts} ->
interpolated =
Regex.replace(~r"%{(\w+)}", msg, fn _, key ->
opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string()
end)
metadata =
metadata
|> Enum.map(&Keyword.get(opts, &1))
|> Enum.reject(&is_nil/1)
[interpolated | metadata]
end)
end
@doc """
Asserts that a changeset contains a given error on a given field.
Returns the changeset for chainability.
"""
@doc since: "0.1.0"
@spec assert_error_on(Changeset.t(), atom, atom | [atom]) :: Changeset.t() | no_return
@spec assert_error_on(Changeset.t(), atom, atom | [atom], [{:metadata, atom}]) ::
Changeset.t() | no_return
def assert_error_on(changeset, field, error, opts \\ [])
def assert_error_on(changeset, _field, [], _opts), do: changeset
def assert_error_on(changeset, field, [error | rest], opts) do
changeset
|> assert_error_on(field, error, opts)
|> assert_error_on(field, rest, opts)
end
def assert_error_on(changeset, field, error, opts) do
assert error in flat_errors_on(changeset, field, opts)
changeset
end
for validation <- [:required, :format, :number, :inclusion, :acceptance] do
@doc """
Asserts that a changeset contains a failed "#{validation}" validation on a given field.
Returns the changeset for chainability.
"""
@doc since: "0.1.0"
@spec unquote(:"assert_#{validation}_error_on")(Changeset.t(), atom) ::
Changeset.t() | no_return
def unquote(:"assert_#{validation}_error_on")(changeset, field) do
assert_error_on(changeset, field, unquote(validation))
end
end
for constraint <- [:unique, :no_assoc] do
@doc """
Asserts that a changeset contains a failed "#{constraint}" constraint validation on a given field.
Returns the changeset for chainability.
"""
@doc since: "0.1.0"
@spec unquote(:"assert_#{constraint}_constraint_error_on")(Changeset.t(), atom) ::
Changeset.t() | no_return
def unquote(:"assert_#{constraint}_constraint_error_on")(changeset, field) do
assert_error_on(changeset, field, unquote(constraint))
end
end
@doc """
Asserts that a changeset contains a failed "foreign_key" constraint validation on a given field.
Returns the changeset for chainability.
"""
@doc since: "0.1.0"
@spec assert_foreign_constraint_error_on(Changeset.t(), atom) :: Changeset.t() | no_return
@deprecated "Use assert_foreign_key_constraint_error_on/2 instead"
def assert_foreign_constraint_error_on(changeset, field) do
assert_foreign_key_constraint_error_on(changeset, field)
end
@doc """
Asserts that a changeset contains a failed "foreign_key" constraint validation on a given field.
Returns the changeset for chainability.
"""
@doc since: "0.10.0"
def assert_foreign_key_constraint_error_on(changeset, field) do
assert_error_on(changeset, field, :foreign)
end
for {constraint, {type, error_type}} <- [
unique: {:unique, :unique},
foreign_key: {:foreign_key, :foreign},
no_assoc: {:foreign_key, :no_assoc}
] do
@doc """
Asserts that a changeset contains a constraint on a given field.
This function looks into the changeset's (internal) `constraints` field to see if a
`*_constraint` function has been called on it. Tests using this do not need to actually
perform the database operation. However, given that the constraints only work in
combination with a corresponding database constraint, it is advisable to perform the
operation and use `assert_#{constraint}_constraint_error_on/2` instead.
Returns the changeset for chainability.
## Options
The given options are used as match values against the constraint map. They loosely
correspond to the options of `Ecto.Changeset.#{constraint}_constraint/2`, only `:name`
becomes `:constraint`.
"""
@doc since: "0.10.0"
@spec unquote(:"assert_#{constraint}_constraint_on")(Changeset.t(), atom) ::
Changeset.t() | no_return
@spec unquote(:"assert_#{constraint}_constraint_on")(Changeset.t(), atom, keyword) ::
Changeset.t() | no_return
def unquote(:"assert_#{constraint}_constraint_on")(changeset, field, opts \\ []) do
opts =
Keyword.merge(opts, error_type: unquote(error_type), type: unquote(type), field: field)
assert(
has_matching_constraint?(changeset, opts),
"""
Expected changeset to have a #{unquote(constraint)} constraint on field #{inspect(field)},
but didn't find one.
Constraints:
#{inspect(changeset.constraints, pretty: true)}
"""
)
changeset
end
end
# Checks that a changeset has a constraint with matching attributes.
defp has_matching_constraint?(changeset, attributes) do
Enum.any?(changeset.constraints, fn constraint ->
Enum.all?(attributes, fn {key, value} ->
constraint[key] == value
end)
end)
end
@doc """
Asserts that a changeset does not contain an error on a given field.
Returns the changeset for chainability.
"""
@doc since: "0.1.0"
@spec refute_errors_on(Changeset.t(), atom) :: Changeset.t() | no_return
def refute_errors_on(changeset, field) do
assert flat_errors_on(changeset, field) == []
changeset
end
@doc """
Asserts that a changeset contains a change of a given field.
Returns the changeset for chainability.
"""
@doc since: "0.1.0"
@spec assert_changes(Changeset.t(), atom) :: Changeset.t() | no_return
def assert_changes(changeset, field) do
assert Map.has_key?(changeset.changes, field)
changeset
end
@doc """
Asserts that a changeset contains a change of a given field to a given value.
Returns the changeset for chainability.
"""
@doc since: "0.1.0"
@spec assert_changes(Changeset.t(), atom, any) :: Changeset.t() | no_return
def assert_changes(changeset, field, value) do
assert Map.get(changeset.changes, field) == value
changeset
end
@doc """
Refutes that a changeset accepts changes to a given field.
Returns the changeset for chainability.
"""
@doc since: "0.1.0"
@spec refute_changes(Changeset.t(), atom) :: Changeset.t() | no_return
def refute_changes(changeset, field) do
refute Map.has_key?(changeset.changes, field)
changeset
end
@doc """
Asserts that a given function changes the integer fetched by another function by a delta.
## Example
assert_difference fn -> Repo.count(Foo) end, 1 fn ->
%Foo{} |> Repo.insert()
end
"""
@doc since: "0.1.0"
@spec assert_difference((() -> float | integer), float | integer, (() -> any)) ::
Changeset.t() | no_return
@spec assert_difference((() -> float | integer), float | integer, (() -> any), [
{:message, String.t()}
]) :: Changeset.t() | no_return
def assert_difference(what, by, how, opts \\ []) do
msg = Keyword.get(opts, :message, "#{inspect(what)} hasn't changed by #{by}")
value_before = what.()
rv = how.()
value_after = what.()
assert value_before == value_after - by,
"""
#{msg}
value before: #{inspect(value_before)}
value after: #{inspect(value_after)}
"""
rv
end
@doc """
Assert that a given function doesn't change the value fetched by another function.
## Example
refute_difference fn -> Repo.count(Foo) end, fn ->
Repo.insert(%Foo{})
end
"""
@doc since: "0.1.0"
@spec refute_difference((() -> any), (() -> any)) :: Changeset.t() | no_return
@spec refute_difference((() -> any), (() -> any), [{:message, String.t()}]) ::
Changeset.t() | no_return
def refute_difference(what, how, opts \\ []) do
msg = Keyword.get(opts, :message, "#{inspect(what)} has changed")
value_before = what.()
rv = how.()
value_after = what.()
assert value_before == value_after,
"""
#{msg}
value before: #{inspect(value_before)}
value after: #{inspect(value_after)}
"""
rv
end
@doc """
Assert that a given function changes the count of a given database table.
## Example
assert_count_difference Repo, Foo, 1, fn ->
Repo.insert(%Foo{})
end
"""
@doc since: "0.1.0"
@spec assert_count_difference(Ecto.Repo.t(), module, integer, (() -> any)) ::
Changeset.t() | no_return
def assert_count_difference(repo, schema, by, how) do
assert_difference(fn -> repo.count(schema) end, by, how,
message: "#{inspect(schema)} hasn't changed by #{by}"
)
end
@doc """
Assert multiple database table count changes.
See `assert_count_difference/4` for details.
## Example
assert_count_differences([{MyApp.Foo, 1}, {MyApp.Bar, -1}], fn ->
%MyApp.Foo{} |> MyApp.Repo.insert()
%MyApp.Bar{id: 1} |> MyApp.Repo.delete()
end
"""
@doc since: "0.1.0"
@spec assert_count_differences(Ecto.Repo.t(), [{module, integer}], (() -> any)) ::
Changeset.t() | no_return
def assert_count_differences(_repo, [], how), do: how.()
def assert_count_differences(repo, [{schema, by} | rest], how) do
assert_count_difference(repo, schema, by, fn ->
assert_count_differences(repo, rest, how)
end)
end
@doc """
Asserts that an Ecto struct has a preloaded nested struct at a given path.
"""
@doc since: "0.1.0"
@spec assert_preloaded(schema :: Ecto.Schema.t(), fields :: atom | [atom]) ::
boolean | no_return
def assert_preloaded(record, [x]), do: assert_preloaded(record, x)
def assert_preloaded(record, [x | xs]), do: assert_preloaded(Map.get(record, x), xs)
def assert_preloaded(record, x) when is_atom(x) do
refute not_loaded_ecto_association?(Map.get(record, x)),
"""
record of type #{inspect(Map.get(record, :__struct__))} has not loaded association at :#{x}
record: #{inspect(record)}
"""
end
@doc """
Refutes that an Ecto struct has a preloaded nested struct at a given path.
"""
@doc since: "0.1.0"
@spec refute_preloaded(schema :: Ecto.Schema.t(), fields :: atom | [atom]) ::
boolean | no_return
def refute_preloaded(record, [x]), do: refute_preloaded(record, x)
def refute_preloaded(record, [x | xs]), do: refute_preloaded(Map.get(record, x), xs)
def refute_preloaded(record, x) when is_atom(x) do
assert not_loaded_ecto_association?(Map.get(record, x)),
"""
record of type #{inspect(Map.get(record, :__struct__))} has preloaded association at :#{x}
record: #{inspect(record)}
"""
end
defp not_loaded_ecto_association?(%Ecto.Association.NotLoaded{}), do: true
defp not_loaded_ecto_association?(_), do: false
@doc """
Allows to compare a DateTime field to another, testing whether they are roughly equal (d=5s).
Delta defaults to 5 seconds and can be passed in optionally.
"""
@doc since: "0.3.0"
def assert_almost_coincide(%DateTime{} = a, %DateTime{} = b, delta \\ 5) do
assert_in_delta DateTime.to_unix(a), DateTime.to_unix(b), delta
end
@doc """
Allows to compare a DateTime field to the present time.
"""
@doc since: "0.3.0"
def assert_almost_now(%DateTime{} = timestamp) do
assert_almost_coincide(timestamp, DateTime.utc_now())
end
def assert_almost_now(value) do
raise(ExUnit.AssertionError, "The given value #{value} is not a timestamp.")
end
@doc """
Asserts that the value of a datetime field changed to the present time.
## Example
%TestSchema{datetime: nil}
|> Ecto.Changeset.change(%{datetime: DateTime.utc_now()})
|> assert_change_to_almost_now(:datetime)
"""
@doc since: "0.9.0"
@spec assert_change_to_almost_now(Changeset.t(), atom()) :: Changeset.t() | no_return
def assert_change_to_almost_now(%Changeset{} = changeset, field) do
case Changeset.fetch_change(changeset, field) do
{:ok, timestamp} ->
assert_almost_now(timestamp)
changeset
_ ->
raise ExUnit.AssertionError, "The field #{field} didn't change."
end
end
@doc """
Assert that two lists are equal when sorted (Enum.sort).
## Example
assert_sorted_equal [:"1", :"2"], [:"2", :"1"]
assert_sorted_equal(
[%{id: 2}, %{id: 1}],
[%{id: 1, preload_nested_resource: %{id: 5}}, %{id: 2}],
& &1.id
)
"""
@doc since: "0.3.0"
def assert_sorted_equal(a, b) when is_list(a) and is_list(b) do
assert(Enum.sort(a) == Enum.sort(b))
end
def assert_sorted_equal(a, b, accessor) do
assert_sorted_equal(Enum.map(a, accessor), Enum.map(b, accessor))
end
@doc since: "0.11.0"
@spec assert_changeset_valid(Changeset.t()) :: Changeset.t() | no_return
def assert_changeset_valid(%Changeset{} = cs) do
assert cs.valid?
cs
end
@doc since: "0.11.0"
@spec refute_changeset_valid(Changeset.t()) :: Changeset.t() | no_return
def refute_changeset_valid(%Changeset{} = cs) do
refute cs.valid?
cs
end
end
|
lib/bitcrowd_ecto/assertions.ex
| 0.962267 | 0.521837 |
assertions.ex
|
starcoder
|
defmodule Adventofcode.Day16TicketTranslation do
use Adventofcode
alias __MODULE__.{Parser, Part1, Part2}
def part_1(input) do
input
|> Parser.parse()
|> Part1.solve()
end
def part_2(input) do
input
|> Parser.parse()
|> Part2.solve()
end
defmodule Part1 do
def solve([rules, _your_ticket, nearby_tickets]) do
all_ranges = rules |> Enum.flat_map(&tl/1)
nearby_tickets
|> List.flatten()
|> Enum.reject(fn num -> Enum.any?(all_ranges, &(num in &1)) end)
|> Enum.sum()
end
end
defmodule Part2 do
def solve([rules, your_ticket, nearby_tickets], prefix \\ "departure") do
all_ranges = rules |> Enum.flat_map(&tl/1)
tickets = nearby_tickets |> Enum.filter(&valid_ticket?(&1, all_ranges))
fields = possible_fields(tickets, rules)
fields
|> Enum.filter(&(&1 |> elem(1) |> String.starts_with?(prefix)))
|> Enum.map(&elem(&1, 0))
|> Enum.map(&Enum.at(your_ticket, &1))
|> Enum.reduce(&Kernel.*/2)
end
defp possible_fields(tickets, rules) do
tickets
|> Enum.zip()
|> Enum.map(&Tuple.to_list/1)
|> Enum.map(&matching_rules(&1, rules))
|> Enum.with_index()
|> (&rule_out_exact_order({&1, %{}})).()
end
defp rule_out_exact_order({[], result}) do
Enum.sort_by(result, &elem(&1, 0))
end
defp rule_out_exact_order({possibilities, matched}) do
possibilities
|> Enum.map(&rule_out_already_matched_fields(&1, matched))
|> Enum.filter(&(length(elem(&1, 0)) > 0))
|> Enum.sort_by(&length(elem(&1, 0)))
|> Enum.map_reduce(matched, &match_fields/2)
|> rule_out_exact_order
end
defp match_fields({[name], index}, acc) do
{{[name], index}, Map.put(acc, index, name)}
end
defp match_fields({choices, index}, acc), do: {{choices, index}, acc}
defp rule_out_already_matched_fields({choices, index}, matched) do
{Enum.reject(choices, &(&1 in Map.values(matched))), index}
end
defp valid_ticket?(ticket, all_ranges) do
Enum.all?(ticket, fn num -> Enum.any?(all_ranges, &(num in &1)) end)
end
defp matching_rules(field_values, rules) do
rules
|> Enum.filter(&matching_rule?(&1, field_values))
|> Enum.map(&hd/1)
end
defp matching_rule?([_name, range1, range2], field_values) do
Enum.all?(field_values, &(&1 in range1 or &1 in range2))
end
end
defmodule Parser do
def parse(input) do
input
|> String.trim()
|> String.split("\n\n")
|> Enum.map(&parse_group/1)
end
defp parse_group("your ticket:" <> _ = group) do
group
|> String.split("\n")
|> Enum.at(1)
|> parse_ticket
end
defp parse_group("nearby tickets:" <> _ = group) do
group
|> String.split("\n")
|> Enum.drop(1)
|> Enum.map(&parse_ticket/1)
end
defp parse_group(group) do
group
|> String.split("\n")
|> Enum.map(&Regex.split(~r/(: | or )/, &1))
|> Enum.map(&[hd(&1) | parse_ranges(tl(&1))])
end
defp parse_ranges(ranges) do
ranges
|> Enum.map(&String.split(&1, "-"))
|> Enum.map(&parse_range/1)
end
defp parse_range([a, b]), do: String.to_integer(a)..String.to_integer(b)
defp parse_ticket(ticket) do
ticket
|> String.split(",")
|> Enum.map(&String.to_integer/1)
end
end
end
|
lib/day_16_ticket_translation.ex
| 0.577376 | 0.40645 |
day_16_ticket_translation.ex
|
starcoder
|
defmodule FeatureToggler do
import Kernel
@moduledoc """
Provides function to set/unset and check if a feature is set/unset for a user id
"""
@doc """
## Parameters
- client : client to connect to redis
- feature : String that represents the name of the feature
- user_id : Integer that represents the identity of a user
## Examples
iex> FeatureToggler.activate_feature(client, "awesome_feature", 1)
true
"""
def activate_feature(client, feature, user_id) when is_integer(user_id) do
client |> Exredis.query(["SADD", feature, user_id]) |> return_as_boolean
end
@doc """
## Parameters
- client : client to connect to redis
- feature : String that represents the name of the feature
- user_id : Integer that represents the identity of a user
## Examples
iex> FeatureToggler.deactivate_feature(client, "awesome_feature", 1)
true
"""
def deactivate_feature(client, feature, user_id) when is_integer(user_id) do
client |> Exredis.query(["SREM", feature, user_id]) |> return_as_boolean
end
@doc """
## Parameters
- client : client to connect to redis
- feature : String that represents the name of the feature
- user_id : Integer that represents the identity of a user
## Examples
iex> FeatureToggler.activated_for?(client, "awesome_feature", 1)
true
"""
def activated_for?(client, feature, user_id) when is_integer(user_id) do
client |> Exredis.query(["SISMEMBER", feature, user_id]) |> return_as_boolean
end
@doc """
## Parameters
- client : client to connect to redis
- feature : String that represents the name of the feature
- user_id : Integer that represents the identity of a user
## Examples
iex> FeatureToggler.deactivated_for?(client, "awesome_feature", 1)
false
"""
def deactivated_for?(client, feature, user_id) when is_integer(user_id) do
client |> Exredis.query(["SISMEMBER", feature, user_id]) |> return_as_boolean |> Kernel.!
end
@doc false
def return_as_boolean(result) do
if result == "1", do: true, else: false
end
end
|
lib/feature_toggler.ex
| 0.802633 | 0.418192 |
feature_toggler.ex
|
starcoder
|
defmodule ExIntegrate.Core.Run do
@moduledoc """
A `Run` represents an entire CI orchestrated workflow, from start to finish.
A Run consists of many `Pipeline`s, which it runs in parallel except when they
depend on each other. Internally, the pipelines are stored in a directed
acyclic graph (DAG), and this graph is traversed from start to finish as
pipelines are launched and completed.
The `%Run{}` struct stores
* the complete specification for the run's execution,
* the results of the run, including the output of all `Step`s, and
* metadata.
"""
alias ExIntegrate.Core.Pipeline
alias ExIntegrate.Core.Step
@behaviour Access
@enforce_keys [:pipelines, :end_nodes, :count]
defstruct @enforce_keys ++ [failed?: false, active_pipelines: []]
@type t :: %__MODULE__{
active_pipelines: [Pipeline.key()],
count: non_neg_integer,
end_nodes: non_neg_integer,
failed?: boolean,
pipelines: Graph.t()
}
@type pipeline_root :: :root
@type pipeline_key :: String.t()
@pipeline_root :root
@spec new(params :: map) :: t()
def new(params) do
pipelines = set_up_pipeline_graph(params)
end_nodes = pipelines |> do_final_pipelines() |> length()
count = end_nodes
struct!(__MODULE__, pipelines: pipelines, end_nodes: end_nodes, count: count)
end
defp set_up_pipeline_graph(params) do
pipelines = params["pipelines"] || []
initial_graph = Graph.new(type: :directed) |> Graph.add_vertex(@pipeline_root)
Enum.reduce(pipelines, initial_graph, &add_pipeline_to_graph/2)
end
defp add_pipeline_to_graph(pipeline_attrs, graph) do
steps =
Enum.map(pipeline_attrs["steps"], fn step_attrs ->
%Step{
args: step_attrs["args"],
command: step_attrs["command"],
name: step_attrs["name"]
}
end)
pipeline = Pipeline.new(name: pipeline_attrs["name"], steps: steps)
parent_pipeline =
case pipeline_attrs["depends_on"] do
nil -> @pipeline_root
parent_pipeline_name -> look_up_pipeline(graph, parent_pipeline_name)
end
Graph.add_edge(graph, parent_pipeline, pipeline)
end
defp look_up_pipeline(pipeline_graph, pipeline_name) do
pipeline_graph
|> Graph.vertices()
|> Enum.find(fn
%{name: name} when name == pipeline_name -> true
_ -> false
end)
end
defp do_final_pipelines(pipeline_graph) do
Graph.Reducers.Dfs.reduce(pipeline_graph, [], fn pipeline, acc ->
case Graph.out_degree(pipeline_graph, pipeline) do
0 -> {:next, [pipeline | acc]}
_ -> {:skip, acc}
end
end)
end
@doc """
Updates the given pipeline in the run's collection.
Returns the run with updated pipeline.
"""
@spec put_pipeline(t(), Pipeline.t() | pipeline_key, Pipeline.t()) :: t()
def put_pipeline(%__MODULE__{} = run, %Pipeline{} = old_pipeline, %Pipeline{} = new_pipeline) do
updated_pipelines = Graph.replace_vertex(run.pipelines, old_pipeline, new_pipeline)
run
|> Map.put(:pipelines, updated_pipelines)
|> Map.put(:failed?, run.failed? || Pipeline.failed?(new_pipeline))
end
def put_pipeline(%__MODULE__{} = run, old_pipeline_name, %Pipeline{} = new_pipeline) do
old_pipeline = run[old_pipeline_name]
put_pipeline(run, old_pipeline, new_pipeline)
end
@doc """
Returns true if the pipeline is included in the run; otherwise, returns false.
"""
@spec has_pipeline?(t(), Pipeline.t()) :: boolean
def has_pipeline?(%__MODULE__{} = run, %Pipeline{} = pipeline) do
Graph.has_vertex?(run.pipelines, pipeline)
end
@spec failed?(t()) :: boolean
def failed?(%__MODULE__{} = run), do: run.failed?
@spec pipeline_root(t()) :: pipeline_root
def pipeline_root(%__MODULE__{} = run) do
if Graph.has_vertex?(run.pipelines, @pipeline_root) do
@pipeline_root
else
raise "graph is missing root node #{inspect(run)}"
end
end
@spec pipelines(t()) :: [Pipeline.t()]
def pipelines(%__MODULE__{} = run),
do: Graph.vertices(run.pipelines) |> Enum.filter(&match?(%Pipeline{}, &1))
@spec next_pipelines(t(), Pipeline.t() | pipeline_root) :: [Pipeline.t()]
def next_pipelines(%__MODULE__{} = run, pipeline) do
Graph.out_neighbors(run.pipelines, pipeline)
end
@spec final_pipelines(t()) :: [Pipeline.t()]
def final_pipelines(%__MODULE__{} = run) do
do_final_pipelines(run.pipelines)
end
@doc """
Checks whether the given pipeline is one of the final pipelines in the
pipeline graph.
If so, decrements the count by 1. If not, does nothing.
"""
@spec check_final_pipeline(t, Pipeline.t()) :: t
def check_final_pipeline(%__MODULE__{} = run, %Pipeline{} = pipeline) do
Map.update(run, :count, run.count, fn count ->
if pipeline in final_pipelines(run) do
count - 1
else
count
end
end)
end
@impl Access
@spec fetch(t(), pipeline_key) :: {:ok, Pipeline.t()}
def fetch(%__MODULE__{} = run, pipeline_name) do
{:ok, look_up_pipeline(run.pipelines, pipeline_name)}
end
@impl Access
@spec pop(t(), term) :: no_return
def pop(%__MODULE__{} = _run, _pipeline_name) do
raise "do not pop a run's pipelines"
end
@impl Access
@spec get_and_update(t(), pipeline_key, fun) :: {Pipeline.t(), t()}
def get_and_update(%__MODULE__{} = run, pipeline_name, fun) when is_function(fun, 1) do
current = run[pipeline_name]
case fun.(current) do
{get, update} ->
{get, put_pipeline(run, pipeline_name, update)}
:pop ->
raise "popping a pipeline is not allowed"
other ->
raise "the given function must return a two-element tuple; got #{inspect(other)}"
end
end
end
|
lib/ex_integrate/core/run.ex
| 0.856137 | 0.60612 |
run.ex
|
starcoder
|
defmodule PowPersistentSession.Plug.Cookie do
@moduledoc """
This plug will handle persistent user sessions with cookies.
By default, the cookie will expire after 30 days. The cookie expiration will
be renewed on every request where a user is assigned to the conn. The token
in the cookie can only be used once to create a session.
If an assigned private `:pow_session_metadata` key exists in the conn with a
keyword list containing a `:fingerprint` key, that fingerprint value will be
set along with the user clause as the persistent session value as
`{[id: user_id], session_metadata: [fingerprint: fingerprint]}`.
## Example
defmodule MyAppWeb.Endpoint do
# ...
plug Pow.Plug.Session, otp_app: :my_app
plug PowPersistentSession.Plug.Cookie
#...
end
## Configuration options
* `:persistent_session_store` - see `PowPersistentSession.Plug.Base`
* `:cache_store_backend` - see `PowPersistentSession.Plug.Base`
* `:persistent_session_cookie_key` - session key name. This defaults to
"persistent_session_cookie". If `:otp_app` is used it'll automatically
prepend the key with the `:otp_app` value.
* `:persistent_session_ttl` - used for both backend store and max age for
cookie. See `PowPersistentSession.Plug.Base` for more.
* `:persistent_session_cookie_opts` - keyword list of cookie options, see
`Plug.Conn.put_resp_cookie/4` for options. The default options are
`[max_age: max_age, path: "/"]` where `:max_age` is the value defined in
`:persistent_session_ttl`.
* `:persistent_session_cookie_expiration_timeout` - integer value in
seconds for how much time should go by before cookie should expire after
the token is fetched in `authenticate/2`. Defaults to 10.
## Custom metadata
You can assign a private `:pow_persistent_session_metadata` key in the conn
with custom metadata as a keyword list. The only current use this has is to
set `:session_metadata` that'll be passed on as `:pow_session_metadata` for
new session generation.
session_metadata =
conn.private
|> Map.get(:pow_session_metadata, [])
|> Keyword.take([:first_seen_at])
Plug.Conn.put_private(conn, :pow_persistent_session_metadata, session_metadata: session_metadata)
This ensure that you are able to keep session metadata consistent between
browser sessions.
When a persistent session token is used, the
`:pow_persistent_session_metadata` assigns key in the conn will be populated
with a `:session_metadata` keyword list so that the session metadata that was
pulled from the persistent session can be carried over to the new persistent
session. `:fingerprint` will always be ignored as to not record the old
fingerprint.
"""
use PowPersistentSession.Plug.Base
alias Plug.Conn
alias Pow.{Config, Operations, Plug, UUID}
@cookie_key "persistent_session_cookie"
@cookie_expiration_timeout 10
@doc """
Sets a persistent session cookie with an auto generated token.
The token is set as a key in the persistent session cache with the id fetched
from the struct. Any existing persistent session will be deleted first with
`delete/2`.
If an assigned private `:pow_session_metadata` key exists in the conn with a
keyword list containing a `:fingerprint` value, then that value will be set
in a `:session_metadata` keyword list in the persistent session metadata. The
value will look like:
`{[id: user_id], session_metadata: [fingerprint: fingerprint]}`
The unique cookie id will be prepended by the `:otp_app` configuration
value, if present.
"""
@spec create(Conn.t(), map(), Config.t()) :: Conn.t()
def create(conn, user, config) do
{store, store_config} = store(config)
cookie_key = cookie_key(config)
key = cookie_id(config)
value = persistent_session_value(conn, user)
opts = cookie_opts(config)
store.put(store_config, key, value)
conn
|> delete(config)
|> Conn.put_resp_cookie(cookie_key, key, opts)
end
defp persistent_session_value(conn, user) do
clauses = user_to_get_by_clauses(user)
metadata =
conn.private
|> Map.get(:pow_persistent_session_metadata, [])
|> maybe_put_fingerprint_in_session_metadata(conn)
{clauses, metadata}
end
defp user_to_get_by_clauses(%{id: id}), do: [id: id]
defp maybe_put_fingerprint_in_session_metadata(metadata, conn) do
conn.private
|> Map.get(:pow_session_metadata, [])
|> Keyword.get(:fingerprint)
|> case do
nil ->
metadata
fingerprint ->
session_metadata =
metadata
|> Keyword.get(:session_metadata, [])
|> Keyword.put_new(:fingerprint, fingerprint)
Keyword.put(metadata, :session_metadata, session_metadata)
end
end
@doc """
Expires the persistent session cookie.
If a persistent session cookie exists it'll be updated to expire immediately,
and the token in the persistent session cache will be deleted.
"""
@spec delete(Conn.t(), Config.t()) :: Conn.t()
def delete(conn, config) do
cookie_key = cookie_key(config)
case conn.req_cookies[cookie_key] do
nil ->
conn
key_id ->
expire_token_in_store(key_id, config)
delete_cookie(conn, cookie_key, config)
end
end
defp expire_token_in_store(key_id, config) do
{store, store_config} = store(config)
store.delete(store_config, key_id)
end
defp delete_cookie(conn, cookie_key, config) do
opts =
config
|> cookie_opts()
|> Keyword.put(:max_age, -1)
Conn.put_resp_cookie(conn, cookie_key, "", opts)
end
@doc """
Authenticates a user with the persistent session cookie.
If a persistent session cookie exists, it'll fetch the credentials from the
persistent session cache.
After the value is fetched from the cookie, it'll be updated to expire after
the value of `:persistent_session_cookie_expiration_timeout` so invalid
cookies will be deleted eventually. This timeout prevents immediate deletion
of the cookie so in case of multiple simultaneous requests, the cache has
time to update the value.
If credentials was fetched successfully, the token in the cache is deleted, a
new session is created, and `create/2` is called to create a new persistent
session cookie. This will override any expiring cookie.
If a `:session_metadata` keyword list is fetched from the persistent session
metadata, all the values will be merged into the private
`:pow_session_metadata` key in the conn.
The expiration date for the cookie will be reset on each request where a user
is assigned to the conn.
"""
@spec authenticate(Conn.t(), Config.t()) :: Conn.t()
def authenticate(conn, config) do
user = Plug.current_user(conn, config)
conn
|> Conn.fetch_cookies()
|> maybe_authenticate(user, config)
|> maybe_renew(config)
end
defp maybe_authenticate(conn, nil, config) do
cookie_key = cookie_key(config)
case conn.req_cookies[cookie_key] do
nil -> conn
key_id -> do_authenticate(conn, cookie_key, key_id, config)
end
end
defp maybe_authenticate(conn, _user, _config), do: conn
defp do_authenticate(conn, cookie_key, key_id, config) do
{store, store_config} = store(config)
res = store.get(store_config, key_id)
plug = Plug.get_plug(config)
conn = expire_cookie(conn, cookie_key, key_id, config)
case res do
:not_found ->
conn
res ->
expire_token_in_store(key_id, config)
fetch_and_auth_user(conn, res, plug, config)
end
end
defp expire_cookie(conn, cookie_key, key_id, config) do
max_age = Config.get(config, :persistent_session_cookie_expiration_timeout, @cookie_expiration_timeout)
opts =
config
|> cookie_opts()
|> Keyword.put(:max_age, max_age)
Conn.put_resp_cookie(conn, cookie_key, key_id, opts)
end
defp fetch_and_auth_user(conn, {clauses, metadata}, plug, config) do
clauses
|> filter_invalid!()
|> Operations.get_by(config)
|> case do
nil ->
conn
user ->
conn
|> update_persistent_session_metadata(metadata)
|> update_session_metadata(metadata)
|> create(user, config)
|> plug.do_create(user, config)
end
end
# TODO: Remove by 1.1.0
defp fetch_and_auth_user(conn, user_id, plug, config),
do: fetch_and_auth_user(conn, {user_id, []}, plug, config)
defp filter_invalid!([id: _value] = clauses), do: clauses
defp filter_invalid!(clauses), do: raise "Invalid get_by clauses stored: #{inspect clauses}"
defp update_persistent_session_metadata(conn, metadata) do
case Keyword.get(metadata, :session_metadata) do
nil ->
conn
session_metadata ->
current_metadata =
conn.private
|> Map.get(:pow_persistent_session_metadata, [])
|> Keyword.get(:session_metadata, [])
metadata =
session_metadata
|> Keyword.merge(current_metadata)
|> Keyword.delete(:fingerprint)
Conn.put_private(conn, :pow_persistent_session_metadata, session_metadata: metadata)
end
end
defp update_session_metadata(conn, metadata) do
case Keyword.get(metadata, :session_metadata) do
nil ->
fallback_session_fingerprint(conn, metadata)
session_metadata ->
metadata = Map.get(conn.private, :pow_session_metadata, [])
Conn.put_private(conn, :pow_session_metadata, Keyword.merge(session_metadata, metadata))
end
end
# TODO: Remove by 1.1.0
defp fallback_session_fingerprint(conn, metadata) do
case Keyword.get(metadata, :session_fingerprint) do
nil ->
conn
fingerprint ->
metadata =
conn.private
|> Map.get(:pow_session_metadata, [])
|> Keyword.put(:fingerprint, fingerprint)
Conn.put_private(conn, :pow_session_metadata, metadata)
end
end
defp maybe_renew(conn, config) do
cookie_key = cookie_key(config)
with user when not is_nil(user) <- Plug.current_user(conn, config),
nil <- conn.resp_cookies[cookie_key] do
renew(conn, cookie_key, config)
else
_ -> conn
end
end
defp renew(conn, cookie_key, config) do
opts = cookie_opts(config)
case conn.req_cookies[cookie_key] do
nil -> conn
value -> Conn.put_resp_cookie(conn, cookie_key, value, opts)
end
end
defp cookie_id(config) do
uuid = UUID.generate()
Plug.prepend_with_namespace(config, uuid)
end
defp cookie_key(config) do
Config.get(config, :persistent_session_cookie_key, default_cookie_key(config))
end
defp default_cookie_key(config) do
Plug.prepend_with_namespace(config, @cookie_key)
end
defp cookie_opts(config) do
config
|> Config.get(:persistent_session_cookie_opts, [])
|> Keyword.put_new(:max_age, max_age(config))
|> Keyword.put_new(:path, "/")
end
defp max_age(config) do
# TODO: Remove by 1.1.0
case Config.get(config, :persistent_session_cookie_max_age) do
nil ->
config
|> PowPersistentSession.Plug.Base.ttl()
|> Integer.floor_div(1000)
max_age ->
IO.warn("use of `:persistent_session_cookie_max_age` config value in #{inspect unquote(__MODULE__)} is deprecated, please use `:persistent_session_ttl`")
max_age
end
end
end
|
lib/extensions/persistent_session/plug/cookie.ex
| 0.828523 | 0.474692 |
cookie.ex
|
starcoder
|
defmodule Dutu.GeneralFixtures do
@moduledoc """
This module defines test helpers for creating
entities via the `Dutu.General` context.
"""
import Timex,
only: [shift: 2, end_of_week: 1, end_of_month: 1, end_of_quarter: 1, end_of_year: 1]
import Dutu.DateHelpers
use Dutu.DateHelpers
@doc """
Generate a todo.
"""
def todo_fixture(attrs \\ %{}) do
{:ok, todo} =
attrs
|> Enum.into(%{title: "some title"})
|> Dutu.General.create_todo()
todo
end
def dynamic_todos_fixture() do
end_of_this_year =
start_of_this_year()
|> end_of_year
[
%{title: "today", due_date: today()},
%{
title: "until today",
approx_due_date: [nil, today()],
date_attrs: %{
"type" => @due_date_types.before
}
},
%{title: "tomorrow", due_date: tomorrow()},
%{
title: "until tomorrow",
approx_due_date: [nil, tomorrow()],
date_attrs: %{
"type" => @due_date_types.before
}
},
%{title: "yesterday", due_date: shift(today(), days: -1)},
%{title: "some day"},
%{
title: "last week",
approx_due_date: [shift(today(), days: -12), shift(today(), days: -8)],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "this year",
approx_due_date: [shift(end_of_this_year, days: -5), end_of_this_year],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "early this year",
approx_due_date: [
shift(start_of_this_year(), weeks: 1),
shift(start_of_this_year(), weeks: 2)
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "next year",
approx_due_date: [start_of_next_year(), shift(start_of_next_year(), days: 5)],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "this week",
approx_due_date: [
shift(start_of_this_week(), days: 2),
start_of_this_week()
|> end_of_week
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "next week",
approx_due_date: [
shift(start_of_next_week(), days: 2),
start_of_next_week()
|> end_of_week
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "this week to next week",
approx_due_date: [
shift(start_of_this_week(), days: 3),
shift(start_of_next_week(), days: 3)
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "this month",
approx_due_date: [
shift(start_of_this_month(), days: 3),
start_of_this_month()
|> end_of_month
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "next month",
approx_due_date: [
shift(start_of_next_month(), days: 3),
start_of_next_month()
|> end_of_month
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "this month to next month",
approx_due_date: [
shift(start_of_this_month(), weeks: 1),
shift(start_of_next_month(), weeks: 2)
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "this quarter",
approx_due_date: [
shift(start_of_this_quarter(), days: 3),
start_of_this_quarter()
|> end_of_quarter
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "next quarter",
approx_due_date: [
shift(start_of_next_quarter(), days: 3),
start_of_next_quarter()
|> end_of_quarter
],
date_attrs: %{
"type" => @due_date_types.between
}
},
%{
title: "this quarter to next quarter",
approx_due_date: [
shift(start_of_this_quarter(), weeks: 1),
shift(start_of_next_quarter(), weeks: 2)
],
date_attrs: %{
"type" => @due_date_types.between
}
}
]
|> Enum.map(fn todo -> todo_fixture(todo) end)
end
@doc """
Generate a chore.
"""
def chore_fixture(attrs \\ %{}) do
{:ok, chore} =
attrs
|> Enum.into(%{
last_done_at: ~N[2022-01-07 09:58:00],
rrule: %{},
title: "some title"
})
|> Dutu.General.create_chore()
chore
end
end
|
test/support/fixtures/general_fixtures.ex
| 0.623148 | 0.438004 |
general_fixtures.ex
|
starcoder
|
require Utils
defmodule D6 do
@moduledoc """
--- Day 6: Universal Orbit Map ---
You've landed at the Universal Orbit Map facility on Mercury. Because navigation in space often involves transferring between orbits, the orbit maps here are useful for finding efficient routes between, for example, you and Santa. You download a map of the local orbits (your puzzle input).
Except for the universal Center of Mass (COM), every object in space is in orbit around exactly one other object.
In this diagram, the object BBB is in orbit around AAA. The path that BBB takes around AAA (drawn with lines) is only partly shown. In the map data, this orbital relationship is written AAA)BBB, which means "BBB is in orbit around AAA".
Before you use your map data to plot a course, you need to make sure it wasn't corrupted during the download. To verify maps, the Universal Orbit Map facility uses orbit count checksums - the total number of direct orbits (like the one shown above) and indirect orbits.
Whenever A orbits B and B orbits C, then A indirectly orbits C. This chain can be any number of objects long: if A orbits B, B orbits C, and C orbits D, then A indirectly orbits D.
What is the total number of direct and indirect orbits in your map data?
--- Part Two ---
Now, you just need to figure out how many orbital transfers you (YOU) need to take to get to Santa (SAN).
You start at the object YOU are orbiting; your destination is the object SAN is orbiting. An orbital transfer lets you move from any object to an object orbiting or orbited by that object.
What is the minimum number of orbital transfers required to move from the object YOU are orbiting to the object SAN is orbiting? (Between the objects they are orbiting - not between YOU and SAN.)
"""
@behaviour Day
def solve(input) do
[top_down, bottom_up] =
input
|> Enum.map(&String.split(&1, ")"))
|> Enum.reduce([%{}, %{}], fn [parent, child], [top_down, bottom_up] ->
children = Map.get(top_down, parent, [])
top_down = Map.put(top_down, parent, [child | children])
bottom_up = Map.put(bottom_up, child, parent)
[top_down, bottom_up]
end)
levels =
Stream.unfold([top_down, ["COM"]], fn
[map, _] when map == %{} ->
nil
[map, nodes] ->
children = Enum.flat_map(nodes, &Map.get(map, &1, []))
map = Map.drop(map, nodes)
{children, [map, children]}
end)
|> Enum.with_index(1)
|> Enum.reduce(%{}, fn {group, distance}, acc ->
Enum.reduce(group, acc, fn node, acc ->
Map.put(acc, node, distance)
end)
end)
you = Map.get(levels, "YOU")
santa = Map.get(levels, "SAN")
gcd_node =
Stream.unfold([bottom_up, ["YOU", "SAN"]], fn
[bottom_up, [node_1, node_2]] ->
{parent_1, bottom_up} = Map.pop(bottom_up, node_1)
{parent_2, bottom_up} = Map.pop(bottom_up, node_2)
cond do
parent_1 == nil -> {node_1, node_1}
parent_2 == nil -> {node_2, node_2}
parent_1 == "COM" -> {0, [bottom_up, [node_1, parent_2]]}
parent_2 == "COM" -> {0, [bottom_up, [parent_1, node_2]]}
true -> {0, [bottom_up, [parent_1, parent_2]]}
end
_ ->
nil
end)
|> Enum.to_list()
|> List.last()
gcd = Map.get(levels, gcd_node)
part_1 =
levels
|> Map.values()
|> Enum.sum()
part_2 = you + santa - 2 * gcd - 2
{
part_1,
part_2
}
end
end
|
lib/days/06.ex
| 0.824073 | 0.783119 |
06.ex
|
starcoder
|
defmodule Plug.Session do
@moduledoc """
A plug to handle session cookies and session stores.
The session is accessed via functions on `Plug.Conn`. Cookies and
session have to be fetched with `Plug.Conn.fetch_session/1` before the
session can be accessed.
Consider using `Plug.CSRFProtection` when using `Plug.Session`.
## Session stores
See `Plug.Session.Store` for the specification session stores are required to
implement.
Plug ships with the following session stores:
* `Plug.Session.ETS`
* `Plug.Session.COOKIE`
## Options
* `:store` - session store module (required);
* `:key` - session cookie key (required);
* `:domain` - see `Plug.Conn.put_resp_cookie/4`;
* `:max_age` - see `Plug.Conn.put_resp_cookie/4`;
* `:path` - see `Plug.Conn.put_resp_cookie/4`;
* `:secure` - see `Plug.Conn.put_resp_cookie/4`;
* `:http_only` - see `Plug.Conn.put_resp_cookie/4`;
Additional options can be given to the session store, see the store's
documentation for the options it accepts.
## Examples
plug Plug.Session, store: :ets, key: "_my_app_session", table: :session
"""
alias Plug.Conn
@behaviour Plug
@cookie_opts [:domain, :max_age, :path, :secure, :http_only]
def init(opts) do
store = Keyword.fetch!(opts, :store) |> convert_store
key = Keyword.fetch!(opts, :key)
cookie_opts = Keyword.take(opts, @cookie_opts)
store_opts = Keyword.drop(opts, [:store, :key] ++ @cookie_opts)
store_config = store.init(store_opts)
%{store: store,
store_config: store_config,
key: key,
cookie_opts: cookie_opts}
end
def call(conn, config) do
Conn.put_private(conn, :plug_session_fetch, fetch_session(config))
end
defp convert_store(store) do
case Atom.to_string(store) do
"Elixir." <> _ -> store
reference -> Module.concat(Plug.Session, String.upcase(reference))
end
end
defp fetch_session(config) do
%{store: store, store_config: store_config, key: key} = config
fn conn ->
{sid, session} =
if cookie = conn.cookies[key] do
store.get(conn, cookie, store_config)
else
{nil, %{}}
end
session = Map.merge(session, Map.get(conn.private, :plug_session, %{}))
conn
|> Conn.put_private(:plug_session, session)
|> Conn.put_private(:plug_session_fetch, :done)
|> Conn.register_before_send(before_send(sid, config))
end
end
defp before_send(sid, config) do
fn conn ->
case Map.get(conn.private, :plug_session_info) do
:write ->
value = put_session(sid, conn, config)
put_cookie(value, conn, config)
:drop ->
if sid do
delete_session(sid, conn, config)
delete_cookie(conn, config)
else
conn
end
:renew ->
if sid, do: delete_session(sid, conn, config)
value = put_session(nil, conn, config)
put_cookie(value, conn, config)
:ignore ->
conn
nil ->
conn
end
end
end
defp put_session(sid, conn, %{store: store, store_config: store_config}),
do: store.put(conn, sid, conn.private[:plug_session], store_config)
defp delete_session(sid, conn, %{store: store, store_config: store_config}),
do: store.delete(conn, sid, store_config)
defp put_cookie(value, conn, %{cookie_opts: cookie_opts, key: key}),
do: Conn.put_resp_cookie(conn, key, value, cookie_opts)
defp delete_cookie(conn, %{cookie_opts: cookie_opts, key: key}),
do: Conn.delete_resp_cookie(conn, key, cookie_opts)
end
|
lib/plug/session.ex
| 0.795579 | 0.526038 |
session.ex
|
starcoder
|
defmodule Triton.Validate do
def coerce(query) do
with {:ok, query} <- validate(query) do
fields = query[:__schema__].__fields__
{:ok, Enum.map(query, fn x -> coerce(x, fields) end)}
end
end
def validate(query) do
case Triton.Helper.query_type(query) do
{:error, err} -> {:error, err.message}
type -> validate(type, query, query[:__schema__].__fields__)
end
end
def validate(:insert, query, schema) do
data = query[:prepared] && query[:prepared] ++ (query[:insert] |> Enum.filter(fn {_, v} -> !is_atom(v) end)) || query[:insert]
vex = schema |> Enum.filter(fn({_, opts}) -> opts[:opts][:validators] end) |> Enum.map(fn {field, opts} -> {field, opts[:opts][:validators]} end)
case Vex.errors(data ++ [_vex: vex]) do
[] -> {:ok, query}
err_list -> {:error, err_list |> Triton.Error.vex_error}
end
end
def validate(:update, query, schema) do
data = query[:prepared] && query[:prepared] ++ (query[:update] |> Enum.filter(fn {_, v} -> !is_atom(v) end)) || query[:update]
fields_to_validate = data |> Enum.map(&(elem(&1, 0)))
vex = schema |> Enum.filter(fn({_, opts}) -> opts[:opts][:validators] end) |> Enum.map(fn {field, opts} -> {field, opts[:opts][:validators]} end) |> Enum.filter(&(elem(&1, 0) in fields_to_validate))
case Vex.errors(data ++ [_vex: vex]) do
[] -> {:ok, query}
err_list -> {:error, err_list |> Triton.Error.vex_error}
end
end
def validate(_, query, _), do: {:ok, query}
defp coerce({:__schema__, v}, _), do: {:__schema__, v}
defp coerce({:__table__, v}, _), do: {:__table__, v}
defp coerce({k, v}, fields), do: {k, coerce(v, fields)}
defp coerce(fragments, fields) when is_list(fragments), do: fragments |> Enum.map(fn fragment -> coerce_fragment(fragment, fields) end)
defp coerce(non_list, _), do: non_list
defp coerce_fragment({k, v}, fields) when is_list(v), do: {k, v |> Enum.map(fn {c, v} -> coerce_fragment({k, c, v}, fields) end)}
defp coerce_fragment({k, v}, fields), do: {k, coerced_value(v, fields[k][:type])}
defp coerce_fragment({k, c, v}, fields), do: {c, coerced_value(v, fields[k][:type])}
defp coerce_fragment(x, _), do: x
defp coerced_value(value, _) when is_atom(value), do: value
defp coerced_value(value, :text) when not is_binary(value), do: to_string(value)
defp coerced_value(value, :bigint) when is_binary(value), do: String.to_integer(value)
defp coerced_value(value, :int) when is_binary(value), do: String.to_integer(value)
defp coerced_value(value, :smallint) when is_binary(value), do: String.to_integer(value)
defp coerced_value(value, :varint) when is_binary(value), do: String.to_integer(value)
defp coerced_value(value, _), do: value
end
|
lib/triton/validate.ex
| 0.539954 | 0.442456 |
validate.ex
|
starcoder
|
defmodule Plug.RewriteOn do
@moduledoc """
A plug to rewrite the request's host/port/protocol from `x-forwarded-*` headers.
If your Plug application is behind a proxy that handles HTTPS, you may
need to tell Plug to parse the proper protocol from the `x-forwarded-*`
header.
plug Plug.RewriteOn, [:x_forwarded_host, :x_forwarded_port, :x_forwarded_proto]
The supported values are:
* `:x_forwarded_host` - to override the host based on on the "x-forwarded-host" header
* `:x_forwarded_port` - to override the port based on on the "x-forwarded-port" header
* `:x_forwarded_proto` - to override the protocol based on on the "x-forwarded-proto" header
Since rewriting the scheme based on `x-forwarded-*` headers can open up
security vulnerabilities, only use this plug if:
* your app is behind a proxy
* your proxy strips the given `x-forwarded-*` headers from all incoming requests
* your proxy sets the `x-forwarded-*` headers and sends it to Plug
"""
@behaviour Plug
import Plug.Conn, only: [get_req_header: 2]
@impl true
def init(header), do: List.wrap(header)
@impl true
def call(conn, [:x_forwarded_proto | rewrite_on]) do
conn
|> put_scheme(get_req_header(conn, "x-forwarded-proto"))
|> call(rewrite_on)
end
def call(conn, [:x_forwarded_port | rewrite_on]) do
conn
|> put_port(get_req_header(conn, "x-forwarded-port"))
|> call(rewrite_on)
end
def call(conn, [:x_forwarded_host | rewrite_on]) do
conn
|> put_host(get_req_header(conn, "x-forwarded-host"))
|> call(rewrite_on)
end
def call(_conn, [other | _rewrite_on]) do
raise "unknown rewrite: #{inspect(other)}"
end
def call(conn, []) do
conn
end
defp put_scheme(%{scheme: :http, port: 80} = conn, ["https"]),
do: %{conn | scheme: :https, port: 443}
defp put_scheme(conn, ["https"]),
do: %{conn | scheme: :https}
defp put_scheme(%{scheme: :https, port: 443} = conn, ["http"]),
do: %{conn | scheme: :http, port: 80}
defp put_scheme(conn, ["http"]),
do: %{conn | scheme: :http}
defp put_scheme(conn, _scheme),
do: conn
defp put_host(conn, [proper_host]),
do: %{conn | host: proper_host}
defp put_host(conn, _),
do: conn
defp put_port(conn, headers) do
with [header] <- headers,
{port, ""} <- Integer.parse(header) do
%{conn | port: port}
else
_ -> conn
end
end
end
|
lib/plug/rewrite_on.ex
| 0.833968 | 0.586434 |
rewrite_on.ex
|
starcoder
|
defmodule CalendarInterval do
@moduledoc """
Functions for working with calendar intervals.
"""
defstruct [:first, :last, :precision]
@type t() :: %CalendarInterval{
first: NaiveDateTime.t(),
last: NaiveDateTime.t(),
precision: precision()
}
@type precision() :: :year | :month | :day | :hour | :minute | :second | {:microsecond, 1..6}
@precisions [:year, :month, :day, :hour, :minute, :second] ++
for(i <- 1..6, do: {:microsecond, i})
@patterns [
{:year, 4, "-01-01 00:00:00.000000"},
{:month, 7, "-01 00:00:00.000000"},
{:day, 10, " 00:00:00.000000"},
{:hour, 13, ":00:00.000000"},
{:minute, 16, ":00.000000"},
{:second, 19, ".000000"},
{{:microsecond, 1}, 21, "00000"},
{{:microsecond, 2}, 22, "0000"},
{{:microsecond, 3}, 23, "000"},
{{:microsecond, 4}, 24, "00"},
{{:microsecond, 5}, 25, "0"}
]
@microsecond {:microsecond, 6}
@doc """
Calendar callback that adds years and months to a naive datetime.
`step` can be positive or negative.
When streaming intervals, this is the callback that increments years
and months. Incrementing other time precision is managed directly through
`NaiveDateTime.add/3`.
"""
@callback add(
Calendar.year(),
Calendar.month(),
Calendar.day(),
Calendar.hour(),
Calendar.minute(),
Calendar.second(),
Calendar.microsecond(),
precision(),
step :: integer()
) ::
{Calendar.year(), Calendar.month(), Calendar.day()}
@typedoc """
Relation between two intervals according to Allen's Interval Algebra.
|
a precedes b | aaaa
| bbbb
-------------------------------
|
a meets b | aaaa
| bbbb
|
-------------------------------
|
a overlaps b | aaaa
| bbbb
|
-------------------------------
|
a finished by b | aaaa
| bb
|
-------------------------------
|
a contains b | aaaaaa
| bb
|
-------------------------------
|
a starts b | aa
| bbbb
|
-------------------------------
|
a equals b | aaaa
| bbbb
|
-------------------------------
|
a started by b | aaaa
| bb
|
-------------------------------
|
a during b | aa
| bbbbbb
|
-------------------------------
|
a finishes b | aa
| bbbb
|
-------------------------------
|
a overlapped by b | aaaa
| bbbb
|
-------------------------------
|
a met by b | aaaa
| bbbb
|
-------------------------------
|
a preceded by b | aaaa
| bbbb
|
See: <https://www.ics.uci.edu/~alspaugh/cls/shr/allen.html>
"""
@type relation() ::
:equals
| :meets
| :met_by
| :precedes
| :preceded_by
| :starts
| :started_by
| :finishes
| :finished_by
| :during
| :contains
| :overlaps
| :overlapped_by
defmacro __using__(_) do
quote do
import CalendarInterval, only: [sigil_I: 2]
end
end
@doc """
Returns an interval starting at given date truncated to `precision`.
## Examples
iex> CalendarInterval.new(~N"2018-06-15 10:20:30.134", :minute)
~I"2018-06-15 10:20"
iex> CalendarInterval.new(~D"2018-06-15", :minute)
~I"2018-06-15 00:00"
"""
@spec new(NaiveDateTime.t() | Date.t(), precision()) :: t()
def new(%NaiveDateTime{} = naive_datetime, precision) when precision in @precisions do
first = truncate(naive_datetime, precision)
last = first |> next_ndt(precision, 1) |> prev_ndt(@microsecond, 1)
new(first, last, precision)
end
def new(%Date{} = date, precision) when precision in @precisions do
{:ok, ndt} = NaiveDateTime.new(date, ~T"00:00:00")
new(ndt, precision)
end
defp new(%NaiveDateTime{} = first, %NaiveDateTime{} = last, precision)
when precision in @precisions do
if NaiveDateTime.compare(first, last) in [:eq, :lt] do
%CalendarInterval{first: first, last: last, precision: precision}
else
first = format(first, precision)
last = format(last, precision)
raise ArgumentError, """
cannot create interval from #{first} and #{last}, descending intervals are not supported\
"""
end
end
@doc """
Returns an interval for the current UTC time in given `t:precision/0`.
## Examples
iex> CalendarInterval.utc_now(:month) in ~I"2018/2100"
true
"""
@spec utc_now(precision()) :: t()
def utc_now(precision \\ @microsecond) when precision in @precisions do
now = NaiveDateTime.utc_now()
first = truncate(now, precision)
last = next_ndt(first, precision, 1) |> prev_ndt(@microsecond, 1)
new(first, last, precision)
end
@doc """
Handles the `~I` sigil for intervals.
## Examples
iex> ~I"2018-06".precision
:month
"""
defmacro sigil_I({:<<>>, _, [string]}, []) do
Macro.escape(parse!(string))
end
@doc """
Parses a string into an interval.
## Examples
iex> CalendarInterval.parse!("2018-06-30")
~I"2018-06-30"
iex> CalendarInterval.parse!("2018-06-01/30")
~I"2018-06-01/30"
"""
@spec parse!(String.t()) :: t()
def parse!(string) do
{string, calendar} = parse_including_calendar(string)
case String.split(string, "/", trim: true) do
[string] ->
{ndt, precision} = do_parse!(string, calendar)
new(ndt, precision)
[left, right] ->
right = String.slice(left, 0, byte_size(left) - byte_size(right)) <> right
right = parse!(right <> " " <> inspect(calendar))
left = parse!(left <> " " <> inspect(calendar))
new(left.first, right.last, left.precision)
end
end
defp parse_including_calendar(string) do
case String.split(string, " ", trim: true) do
[date, <<c::utf8, _rest::binary>> = calendar] when c in ?a..?z or c in ?A..?Z ->
{date, Module.concat([calendar])}
[date, time, <<c::utf8, _rest::binary>> = calendar] when c in ?a..?z or c in ?A..?Z ->
{date <> " " <> time, Module.concat([calendar])}
[date, time, other, <<c::utf8, _rest::binary>> = calendar]
when c in ?a..?z or c in ?A..?Z ->
{date <> " " <> time <> " " <> other, Module.concat([calendar])}
other ->
{Enum.join(other, " "), Calendar.ISO}
end
end
for {precision, bytes, rest} <- @patterns do
defp do_parse!(<<_::unquote(bytes)-bytes>> = string, calendar) do
do_parse!(string <> unquote(rest), calendar)
|> put_elem(1, unquote(precision))
end
end
defp do_parse!(<<_::26-bytes>> = string, calendar) do
{NaiveDateTime.from_iso8601!(string, calendar), @microsecond}
end
defp next_ndt(%NaiveDateTime{calendar: Calendar.ISO} = ndt, :year, step) do
update_in(ndt.year, &(&1 + step))
end
defp next_ndt(%NaiveDateTime{calendar: calendar} = ndt, :year, step) do
%{
year: year,
month: month,
day: day,
hour: hour,
minute: minute,
second: second,
microsecond: microsecond
} = ndt
{year, month, day, hour, minute, second, microsecond} =
calendar.add(year, month, day, hour, minute, second, microsecond, :year, step)
{:ok, ndt} = NaiveDateTime.new(year, month, day, hour, minute, second, microsecond, calendar)
ndt
end
defp next_ndt(%NaiveDateTime{calendar: Calendar.ISO} = ndt, :month, step) do
%{year: year, month: month} = ndt
{plus_year, month} = {div(month + step, 12), rem(month + step, 12)}
if month == 0 do
%{ndt | year: year + plus_year, month: 1}
else
%{ndt | year: year + plus_year, month: month}
end
end
defp next_ndt(%NaiveDateTime{calendar: calendar} = ndt, :month, step) do
%{
year: year,
month: month,
day: day,
hour: hour,
minute: minute,
second: second,
microsecond: microsecond
} = ndt
{year, month, day, hour, minute, second, microsecond} =
calendar.add(year, month, day, hour, minute, second, microsecond, :month, step)
{:ok, ndt} = NaiveDateTime.new(year, month, day, hour, minute, second, microsecond, calendar)
ndt
end
defp next_ndt(ndt, precision, step) do
{count, unit} = precision_to_count_unit(precision)
NaiveDateTime.add(ndt, count * step, unit)
end
defp prev_ndt(%NaiveDateTime{calendar: Calendar.ISO} = ndt, :year, step) do
update_in(ndt.year, &(&1 - step))
end
defp prev_ndt(%NaiveDateTime{calendar: calendar} = ndt, :year, step) do
%{
year: year,
month: month,
day: day,
hour: hour,
minute: minute,
second: second,
microsecond: microsecond
} = ndt
{year, month, day, hour, minute, second, microsecond} =
calendar.add(year, month, day, hour, minute, second, microsecond, :year, -step)
{:ok, ndt} = NaiveDateTime.new(year, month, day, hour, minute, second, microsecond, calendar)
ndt
end
# TODO: handle step != 1
defp prev_ndt(%NaiveDateTime{year: year, month: 1, calendar: Calendar.ISO} = ndt, :month, step) do
%{ndt | year: year - 1, month: 12 - step + 1}
end
# TODO: handle step != 1
defp prev_ndt(%NaiveDateTime{month: month, calendar: Calendar.ISO} = ndt, :month, step) do
%{ndt | month: month - step}
end
defp prev_ndt(%NaiveDateTime{calendar: calendar} = ndt, :month, step) do
%{
year: year,
month: month,
day: day,
hour: hour,
minute: minute,
second: second,
microsecond: microsecond
} = ndt
{year, month, day, hour, minute, second, microsecond} =
calendar.add(year, month, day, hour, minute, second, microsecond, :month, -step)
{:ok, ndt} = NaiveDateTime.new(year, month, day, hour, minute, second, microsecond, calendar)
ndt
end
defp prev_ndt(ndt, precision, step) do
{count, unit} = precision_to_count_unit(precision)
NaiveDateTime.add(ndt, -count * step, unit)
end
defp precision_to_count_unit(:day), do: {24 * 60 * 60, :second}
defp precision_to_count_unit(:hour), do: {60 * 60, :second}
defp precision_to_count_unit(:minute), do: {60, :second}
defp precision_to_count_unit(:second), do: {1, :second}
defp precision_to_count_unit({:microsecond, exponent}) do
{1, Enum.reduce(1..exponent, 1, fn _, acc -> acc * 10 end)}
end
@doc false
def count(%CalendarInterval{first: %{year: year1}, last: %{year: year2}, precision: :year}),
do: year2 - year1 + 1
def count(%CalendarInterval{
first: %{year: year1, month: month1},
last: %{year: year2, month: month2},
precision: :month
}),
do: month2 + year2 * 12 - month1 - year1 * 12 + 1
def count(%CalendarInterval{first: first, last: last, precision: precision}) do
{count, unit} = precision_to_count_unit(precision)
div(NaiveDateTime.diff(last, first, unit), count) + 1
end
@doc """
Returns string representation.
## Examples
iex> CalendarInterval.to_string(~I"2018-06")
"2018-06"
"""
@spec to_string(t()) :: String.t()
def to_string(%CalendarInterval{first: first, last: last, precision: precision}) do
left = format(first, precision)
right = format(last, precision)
if left == right do
left <> maybe_add_calendar(first)
else
format_left_right(left, right) <> maybe_add_calendar(first)
end
end
defp format_left_right(left, left) do
left
end
for i <- Enum.reverse([5, 8, 11, 14, 17, 20, 22, 23, 24, 25, 26]) do
defp format_left_right(
<<left::unquote(i)-bytes>> <> left_rest,
<<left::unquote(i)-bytes>> <> right_rest
) do
left <> left_rest <> "/" <> right_rest
end
end
defp format_left_right(left, right) do
left <> "/" <> right
end
defp format(ndt, @microsecond) do
NaiveDateTime.to_string(ndt)
end
for {precision, bytes, _} <- @patterns do
defp format(ndt, unquote(precision)) do
NaiveDateTime.to_string(ndt)
|> String.slice(0, unquote(bytes))
end
end
defp maybe_add_calendar(%{calendar: Calendar.ISO}) do
""
end
defp maybe_add_calendar(%{calendar: calendar}) do
" " <> Kernel.inspect(calendar)
end
@doc """
Returns first element of the interval.
## Examples
iex> CalendarInterval.first(~I"2018-01/12")
~I"2018-01"
iex> CalendarInterval.first(~I"2018-01")
~I"2018-01"
"""
@spec first(t()) :: t()
def first(%CalendarInterval{first: first, precision: precision}) do
new(first, precision)
end
@doc """
Returns last element of the interval.
## Examples
iex> CalendarInterval.last(~I"2018-01/12")
~I"2018-12"
iex> CalendarInterval.last(~I"2018-01")
~I"2018-01"
"""
@spec last(t()) :: t()
def last(%CalendarInterval{last: last, precision: precision}) do
new(last, precision)
end
@doc """
Returns next interval.
## Examples
iex> CalendarInterval.next(~I"2018-06-30")
~I"2018-07-01"
iex> CalendarInterval.next(~I"2018-06-30 09:00", 80)
~I"2018-06-30 10:20"
iex> CalendarInterval.next(~I"2018-01/06")
~I"2018-07/12"
iex> CalendarInterval.next(~I"2018-01/02", 2)
~I"2018-05/06"
"""
@spec next(t(), step :: integer()) :: t()
def next(interval, step \\ 1)
def next(interval, 0) do
interval
end
def next(%CalendarInterval{last: last, precision: precision} = interval, step) when step > 0 do
count = count(interval)
first =
last
|> next_ndt(@microsecond, 1)
|> next_ndt(precision, count * (step - 1))
last =
first
|> next_ndt(precision, count)
|> prev_ndt(@microsecond, 1)
new(first, last, precision)
end
@doc """
Returns previous interval.
## Examples
iex> CalendarInterval.prev(~I"2018-06-01")
~I"2018-05-31"
iex> CalendarInterval.prev(~I"2018-06-01 01:00", 80)
~I"2018-05-31 23:40"
iex> CalendarInterval.prev(~I"2018-07/12")
~I"2018-01/06"
iex> CalendarInterval.prev(~I"2018-05/06", 2)
~I"2018-01/02"
"""
@spec prev(t(), step :: integer()) :: t()
def prev(interval, step \\ 1)
def prev(interval, 0) do
interval
end
def prev(%CalendarInterval{first: first, precision: precision} = interval, step)
when step >= 0 do
count = count(interval)
first =
first
|> prev_ndt(precision, count * step)
last =
first
|> next_ndt(precision, count)
|> prev_ndt(@microsecond, 1)
new(first, last, precision)
end
@doc """
Returns an interval within given interval.
## Example
iex> CalendarInterval.nest(~I"2018", :day)
~I"2018-01-01/12-31"
iex> CalendarInterval.nest(~I"2018-06-15", :minute)
~I"2018-06-15 00:00/23:59"
iex> CalendarInterval.nest(~I"2018-06-15", :year)
** (ArgumentError) cannot nest from :day to :year
"""
@spec nest(t(), precision()) :: t()
def nest(%CalendarInterval{precision: old_precision} = interval, new_precision)
when new_precision in @precisions do
if precision_index(new_precision) > precision_index(old_precision) do
%{interval | precision: new_precision}
else
raise ArgumentError,
"cannot nest from #{inspect(old_precision)} to #{inspect(new_precision)}"
end
end
@doc """
Returns interval that encloses given interval.
## Example
iex> CalendarInterval.enclosing(~I"2018-05-01", :year)
~I"2018"
iex> CalendarInterval.enclosing(~I"2018-06-15", :second)
** (ArgumentError) cannot enclose from :day to :second
"""
@spec enclosing(t(), precision()) :: t()
def enclosing(%CalendarInterval{precision: old_precision} = interval, new_precision)
when new_precision in @precisions do
if precision_index(new_precision) < precision_index(old_precision) do
interval.first |> truncate(new_precision) |> new(new_precision)
else
raise ArgumentError,
"cannot enclose from #{inspect(old_precision)} to #{inspect(new_precision)}"
end
end
defp truncate(ndt, :year), do: truncate(%{ndt | month: 1}, :month)
defp truncate(ndt, :month), do: truncate(%{ndt | day: 1}, :day)
defp truncate(ndt, :day), do: %{ndt | hour: 0, minute: 0, second: 0, microsecond: {0, 6}}
defp truncate(ndt, :hour), do: %{ndt | minute: 0, second: 0, microsecond: {0, 6}}
defp truncate(ndt, :minute), do: %{ndt | second: 0, microsecond: {0, 6}}
defp truncate(ndt, :second), do: %{ndt | microsecond: {0, 6}}
defp truncate(ndt, @microsecond), do: ndt
defp truncate(%{microsecond: {microsecond, _}} = ndt, {:microsecond, precision}) do
{1, n} = precision_to_count_unit({:microsecond, 6 - precision})
%{ndt | microsecond: {div(microsecond, n) * n, 6}}
end
for {precision, index} <- Enum.with_index(@precisions) do
defp precision_index(unquote(precision)), do: unquote(index)
end
@doc """
Returns an intersection of `interval1` and `interval2` or `nil` if they don't overlap.
Both intervals must have the same `precision`.
## Examples
iex> CalendarInterval.intersection(~I"2018-01/04", ~I"2018-03/06")
~I"2018-03/04"
iex> CalendarInterval.intersection(~I"2018-01/12", ~I"2018-02")
~I"2018-02"
iex> CalendarInterval.intersection(~I"2018-01/02", ~I"2018-11/12")
nil
"""
@spec intersection(t(), t()) :: t() | nil
def intersection(interval1, interval2)
def intersection(%CalendarInterval{precision: p} = i1, %CalendarInterval{precision: p} = i2) do
if lteq?(i1.first, i2.last) and gteq?(i1.last, i2.first) do
first = max_ndt(i1.first, i2.first)
last = min_ndt(i1.last, i2.last)
new(first, last, p)
else
nil
end
end
@doc """
Splits interval by another interval.
## Examples
iex> CalendarInterval.split(~I"2018-01/12", ~I"2018-04/05")
{~I"2018-01/03", ~I"2018-04/05", ~I"2018-06/12"}
iex> CalendarInterval.split(~I"2018-01/12", ~I"2018-01/02")
{~I"2018-01/02", ~I"2018-03/12"}
iex> CalendarInterval.split(~I"2018-01/12", ~I"2018-08/12")
{~I"2018-01/07", ~I"2018-08/12"}
iex> CalendarInterval.split(~I"2018-01/12", ~I"2019-01")
~I"2018-01/12"
"""
@spec split(t(), t()) :: t() | {t(), t()} | {t(), t(), t()}
def split(%{precision: p} = interval1, %{precision: p} = interval2) do
case relation(interval2, interval1) do
:during ->
a = new(interval1.first, prev(interval2).last, p)
b = new(interval2.first, interval2.last, p)
c = new(next(interval2).first, interval1.last, p)
{a, b, c}
:starts ->
a = new(interval1.first, interval2.last, p)
b = new(next(interval2).first, interval1.last, p)
{a, b}
:finishes ->
a = new(interval1.first, prev(interval2).last, p)
b = new(interval2.first, interval2.last, p)
{a, b}
_ ->
interval1
end
end
@doc """
Returns an union of `interval1` and `interval2` or `nil`.
Both intervals must have the same `precision`.
## Examples
iex> CalendarInterval.union(~I"2018-01/02", ~I"2018-01/04")
~I"2018-01/04"
iex> CalendarInterval.union(~I"2018-01/11", ~I"2018-12")
~I"2018-01/12"
iex> CalendarInterval.union(~I"2018-01/02", ~I"2018-04/05")
nil
"""
@spec union(t(), t()) :: t() | nil
def union(interval1, interval2)
def union(%CalendarInterval{precision: p} = i1, %CalendarInterval{precision: p} = i2) do
if intersection(i1, i2) != nil or next_ndt(i1.last, @microsecond, 1) == i2.first do
new(i1.first, i2.last, p)
else
nil
end
end
defp lt?(ndt1, ndt2), do: NaiveDateTime.compare(ndt1, ndt2) == :lt
defp gt?(ndt1, ndt2), do: NaiveDateTime.compare(ndt1, ndt2) == :gt
defp lteq?(ndt1, ndt2), do: NaiveDateTime.compare(ndt1, ndt2) in [:lt, :eq]
defp gteq?(ndt1, ndt2), do: NaiveDateTime.compare(ndt1, ndt2) in [:gt, :eq]
defp min_ndt(ndt1, ndt2), do: if(lteq?(ndt1, ndt2), do: ndt1, else: ndt2)
defp max_ndt(ndt1, ndt2), do: if(gteq?(ndt1, ndt2), do: ndt1, else: ndt2)
@doc """
Returns a [`relation`](`t:CalendarInterval.relation/0`) between `interval1` and `interval2`.
## Examples
iex> CalendarInterval.relation(~I"2018-01/02", ~I"2018-06")
:precedes
iex> CalendarInterval.relation(~I"2018-01/02", ~I"2018-03")
:meets
iex> CalendarInterval.relation(~I"2018-02", ~I"2018-01/12")
:during
"""
@spec relation(t(), t()) :: relation()
def relation(%{precision: p} = interval1, %{precision: p} = interval2) do
cond do
interval1 == interval2 ->
:equals
interval2.first == next_ndt(interval1.last, @microsecond, 1) ->
:meets
interval2.last == prev_ndt(interval1.first, @microsecond, 1) ->
:met_by
lt?(interval1.last, interval2.first) ->
:precedes
gt?(interval1.first, interval2.last) ->
:preceded_by
interval1.first == interval2.first and lt?(interval1.last, interval2.last) ->
:starts
interval1.first == interval2.first and gt?(interval1.last, interval2.last) ->
:started_by
interval1.last == interval2.last and gt?(interval1.first, interval2.first) ->
:finishes
interval1.last == interval2.last and lt?(interval1.first, interval2.first) ->
:finished_by
gt?(interval1.first, interval2.first) and lt?(interval1.last, interval2.last) ->
:during
lt?(interval1.first, interval2.first) and gt?(interval1.last, interval2.last) ->
:contains
lt?(interval1.first, interval2.first) and lt?(interval1.last, interval2.last) and
gt?(interval1.last, interval2.first) ->
:overlaps
gt?(interval1.first, interval2.first) and gt?(interval1.last, interval2.last) and
lt?(interval1.first, interval2.last) ->
:overlapped_by
end
end
defimpl String.Chars do
defdelegate to_string(interval), to: CalendarInterval
end
defimpl Inspect do
def inspect(interval, _) do
"~I\"" <> CalendarInterval.to_string(interval) <> "\""
end
end
defimpl Enumerable do
def count(interval) do
{:ok, CalendarInterval.count(interval)}
end
def member?(%{first: first, last: last}, %CalendarInterval{
first: other_first,
last: other_last
}) do
{:ok,
NaiveDateTime.compare(other_first, first) in [:eq, :gt] and
NaiveDateTime.compare(other_last, last) in [:eq, :lt]}
end
def member?(%{first: first, last: last}, %NaiveDateTime{} = ndt) do
{:ok,
NaiveDateTime.compare(ndt, first) in [:eq, :gt] and
NaiveDateTime.compare(ndt, last) in [:eq, :lt]}
end
def member?(interval, %Date{calendar: Calendar.ISO} = date) do
{:ok, ndt} = NaiveDateTime.new(date, ~T"00:00:00")
member?(interval, ndt)
end
def member?(interval, %Date{calendar: calendar} = date) do
{:ok, ndt} = NaiveDateTime.new(date, ~T"00:00:00" |> Map.put(:calendar, calendar))
member?(interval, ndt)
end
def slice(interval) do
{:ok, CalendarInterval.count(interval), &slice(interval, &1 + 1, &2)}
end
defp slice(first, start, count) do
interval =
CalendarInterval.new(first.first, first.precision)
|> CalendarInterval.next(start - 1)
slice(interval, count)
end
defp slice(current, 1), do: [current]
defp slice(current, remaining) do
[current | slice(CalendarInterval.next(current), remaining - 1)]
end
def reduce(interval, acc, fun) do
current = CalendarInterval.new(interval.first, interval.precision)
reduce(current, interval.last, interval.precision, acc, fun)
end
defp reduce(_current_interval, _last, _precision, {:halt, acc}, _fun) do
{:halted, acc}
end
defp reduce(_current_interval, _last, _precision, {:suspend, acc}, _fun) do
{:suspended, acc}
end
defp reduce(current_interval, last, precision, {:cont, acc}, fun) do
if NaiveDateTime.compare(current_interval.first, last) == :lt do
next = CalendarInterval.next(current_interval)
reduce(next, last, precision, fun.(current_interval, acc), fun)
else
{:halt, acc}
end
end
end
end
|
lib/calendar_interval.ex
| 0.923061 | 0.652241 |
calendar_interval.ex
|
starcoder
|
defmodule AWS.Synthetics do
@moduledoc """
Amazon CloudWatch Synthetics
You can use Amazon CloudWatch Synthetics to continually monitor your services.
You can create and manage *canaries*, which are modular, lightweight scripts
that monitor your endpoints and APIs from the outside-in. You can set up your
canaries to run 24 hours a day, once per minute. The canaries help you check the
availability and latency of your web services and troubleshoot anomalies by
investigating load time data, screenshots of the UI, logs, and metrics. The
canaries seamlessly integrate with CloudWatch ServiceLens to help you trace the
causes of impacted nodes in your applications. For more information, see [Using ServiceLens to Monitor the Health of Your
Applications](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ServiceLens.html)
in the *Amazon CloudWatch User Guide*.
Before you create and manage canaries, be aware of the security considerations.
For more information, see [Security Considerations for Synthetics Canaries](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2017-10-11",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "synthetics",
global?: false,
protocol: "rest-json",
service_id: "synthetics",
signature_version: "v4",
signing_name: "synthetics",
target_prefix: nil
}
end
@doc """
Creates a canary.
Canaries are scripts that monitor your endpoints and APIs from the outside-in.
Canaries help you check the availability and latency of your web services and
troubleshoot anomalies by investigating load time data, screenshots of the UI,
logs, and metrics. You can set up a canary to run continuously or just once.
Do not use `CreateCanary` to modify an existing canary. Use
[UpdateCanary](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_UpdateCanary.html) instead.
To create canaries, you must have the `CloudWatchSyntheticsFullAccess` policy.
If you are creating a new IAM role for the canary, you also need the the
`iam:CreateRole`, `iam:CreatePolicy` and `iam:AttachRolePolicy` permissions. For
more information, see [Necessary Roles and
Permissions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Roles).
Do not include secrets or proprietary information in your canary names. The
canary name makes up part of the Amazon Resource Name (ARN) for the canary, and
the ARN is included in outbound calls over the internet. For more information,
see [Security Considerations for Synthetics Canaries](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html).
"""
def create_canary(%Client{} = client, input, options \\ []) do
url_path = "/canary"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Permanently deletes the specified canary.
When you delete a canary, resources used and created by the canary are not
automatically deleted. After you delete a canary that you do not intend to use
again, you should also delete the following:
* The Lambda functions and layers used by this canary. These have
the prefix `cwsyn-*MyCanaryName* `.
* The CloudWatch alarms created for this canary. These alarms have a
name of `Synthetics-SharpDrop-Alarm-*MyCanaryName* `.
* Amazon S3 objects and buckets, such as the canary's artifact
location.
* IAM roles created for the canary. If they were created in the
console, these roles have the name `
role/service-role/CloudWatchSyntheticsRole-*MyCanaryName* `.
* CloudWatch Logs log groups created for the canary. These logs
groups have the name `/aws/lambda/cwsyn-*MyCanaryName* `.
Before you delete a canary, you might want to use `GetCanary` to display the
information about this canary. Make note of the information returned by this
operation so that you can delete these resources after you delete the canary.
"""
def delete_canary(%Client{} = client, name, input, options \\ []) do
url_path = "/canary/#{AWS.Util.encode_uri(name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This operation returns a list of the canaries in your account, along with full
details about each canary.
This operation does not have resource-level authorization, so if a user is able
to use `DescribeCanaries`, the user can see all of the canaries in the account.
A deny policy can only be used to restrict access to all canaries. It cannot be
used on specific resources.
"""
def describe_canaries(%Client{} = client, input, options \\ []) do
url_path = "/canaries"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Use this operation to see information from the most recent run of each canary
that you have created.
"""
def describe_canaries_last_run(%Client{} = client, input, options \\ []) do
url_path = "/canaries/last-run"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of Synthetics canary runtime versions.
For more information, see [ Canary Runtime Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html).
"""
def describe_runtime_versions(%Client{} = client, input, options \\ []) do
url_path = "/runtime-versions"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Retrieves complete information about one canary.
You must specify the name of the canary that you want. To get a list of canaries
and their names, use
[DescribeCanaries](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html).
"""
def get_canary(%Client{} = client, name, options \\ []) do
url_path = "/canary/#{AWS.Util.encode_uri(name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a list of runs for a specified canary.
"""
def get_canary_runs(%Client{} = client, name, input, options \\ []) do
url_path = "/canary/#{AWS.Util.encode_uri(name)}/runs"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Displays the tags associated with a canary.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Use this operation to run a canary that has already been created.
The frequency of the canary runs is determined by the value of the canary's
`Schedule`. To see a canary's schedule, use
[GetCanary](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanary.html).
"""
def start_canary(%Client{} = client, name, input, options \\ []) do
url_path = "/canary/#{AWS.Util.encode_uri(name)}/start"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Stops the canary to prevent all future runs.
If the canary is currently running, Synthetics stops waiting for the current run
of the specified canary to complete. The run that is in progress completes on
its own, publishes metrics, and uploads artifacts, but it is not recorded in
Synthetics as a completed run.
You can use `StartCanary` to start it running again with the canary’s current
schedule at any point in the future.
"""
def stop_canary(%Client{} = client, name, input, options \\ []) do
url_path = "/canary/#{AWS.Util.encode_uri(name)}/stop"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Assigns one or more tags (key-value pairs) to the specified canary.
Tags can help you organize and categorize your resources. You can also use them
to scope user permissions, by granting a user permission to access or change
only resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted
strictly as strings of characters.
You can use the `TagResource` action with a canary that already has tags. If you
specify a new tag key for the alarm, this tag is appended to the list of tags
associated with the alarm. If you specify a tag key that is already associated
with the alarm, the new tag value that you specify replaces the previous value
for that tag.
You can associate as many as 50 tags with a canary.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes one or more tags from the specified canary.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
{query_params, input} =
[
{"TagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Use this operation to change the settings of a canary that has already been
created.
You can't use this operation to update the tags of an existing canary. To change
the tags of an existing canary, use
[TagResource](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_TagResource.html).
"""
def update_canary(%Client{} = client, name, input, options \\ []) do
url_path = "/canary/#{AWS.Util.encode_uri(name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end
|
lib/aws/generated/synthetics.ex
| 0.784154 | 0.475057 |
synthetics.ex
|
starcoder
|
defmodule Clickhousex.Codec.RowBinary do
alias Clickhousex.{Codec, Codec.Binary}
@behaviour Codec
@impl Codec
def response_format(), do: "RowBinaryWithNamesAndTypes"
@impl Codec
def request_format(), do: "Values"
@impl Codec
def encode(query, replacements, params) do
Clickhousex.Codec.Values.encode(query, replacements, params)
end
@impl Codec
def decode(response) when is_binary(response) do
{:ok, column_count, rest} = Binary.decode(response, :varint)
decode_metadata(rest, column_count)
end
defp decode_metadata(bytes, column_count) do
{:ok, column_names, rest} = decode_column_names(bytes, column_count, [])
{:ok, column_types, rest} = decode_column_types(rest, column_count, [])
{:ok, rows} = decode_rows(rest, column_types, [])
{:ok, %{column_names: column_names, rows: rows, count: 0}}
end
defp decode_column_names(bytes, 0, names) do
{:ok, Enum.reverse(names), bytes}
end
defp decode_column_names(bytes, column_count, names) do
{:ok, column_name, rest} = Binary.decode(bytes, :string)
decode_column_names(rest, column_count - 1, [column_name | names])
end
defp decode_column_types(bytes, 0, types) do
{:ok, Enum.reverse(types), bytes}
end
defp decode_column_types(bytes, column_count, types) do
{:ok, column_type, rest} = Binary.decode(bytes, :string)
decode_column_types(rest, column_count - 1, [to_type(column_type) | types])
end
defp decode_rows(<<>>, _, rows) do
{:ok, Enum.reverse(rows)}
end
defp decode_rows(bytes, atom_types, rows) do
{:ok, row, rest} = decode_row(bytes, atom_types, [])
decode_rows(rest, atom_types, [row | rows])
end
defp decode_row(bytes, [], row) do
row_tuple =
row
|> Enum.reverse()
|> List.to_tuple()
{:ok, row_tuple, bytes}
end
defp decode_row(<<1, rest::binary>>, [{:nullable, _} | types], row) do
decode_row(rest, types, [nil | row])
end
defp decode_row(<<0, rest::binary>>, [{:nullable, actual_type} | types], row) do
decode_row(rest, [actual_type | types], row)
end
defp decode_row(bytes, [{:fixed_string, length} | types], row) do
<<value::binary-size(length), rest::binary>> = bytes
decode_row(rest, types, [value | row])
end
defp decode_row(bytes, [{:array, elem_type} | types], row) do
{:ok, value, rest} = Binary.decode(bytes, {:list, elem_type})
decode_row(rest, types, [value | row])
end
defp decode_row(bytes, [type | types], row) do
{:ok, value, rest} = Binary.decode(bytes, type)
decode_row(rest, types, [value | row])
end
defp to_type(<<"Nullable(", type::binary>>) do
rest_type =
type
|> String.trim_trailing(")")
|> to_type()
{:nullable, rest_type}
end
defp to_type(<<"FixedString(", rest::binary>>) do
{length, _} = Integer.parse(rest)
{:fixed_string, length}
end
defp to_type(<<"Array(", type::binary>>) do
rest_type =
type
|> String.trim_trailing(")")
|> to_type()
{:array, rest_type}
end
@clickhouse_mappings [
{"Int64", :i64},
{"Int32", :i32},
{"Int16", :i16},
{"Int8", :i8},
{"UInt64", :u64},
{"UInt32", :u32},
{"UInt16", :u16},
{"UInt8", :u8},
{"Float64", :f64},
{"Float32", :f32},
{"Float16", :f16},
{"Float8", :f8},
{"String", :string},
{"Date", :date},
{"DateTime", :datetime}
]
for {clickhouse_type, local_type} <- @clickhouse_mappings do
defp to_type(unquote(clickhouse_type)) do
unquote(local_type)
end
end
end
|
lib/clickhousex/codec/row_binary.ex
| 0.612426 | 0.462352 |
row_binary.ex
|
starcoder
|
defmodule Nebulex.Cache do
@moduledoc ~S"""
Cache's main interface; defines the cache abstraction layer which is
highly inspired by [Ecto](https://github.com/elixir-ecto/ecto).
A Cache maps to an underlying implementation, controlled by the
adapter. For example, Nebulex ships with a default adapter that
implements a local generational cache.
When used, the Cache expects the `:otp_app` and `:adapter` as options.
The `:otp_app` should point to an OTP application that has the cache
configuration. For example, the Cache:
defmodule MyApp.Cache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
end
Could be configured with:
config :my_app, MyApp.Cache,
backend: :shards,
gc_interval: :timer.hours(12),
max_size: 1_000_000,
allocated_memory: 2_000_000_000,
gc_cleanup_min_timeout: :timer.seconds(10),
gc_cleanup_max_timeout: :timer.minutes(10)
Most of the configuration that goes into the `config` is specific
to the adapter. For this particular example, you can check
[`Nebulex.Adapters.Local`](https://hexdocs.pm/nebulex/Nebulex.Adapters.Local.html)
for more information. In spite of this, the following configuration values
are shared across all adapters:
* `:name` - The name of the Cache supervisor process.
* `:stats` - Boolean to define whether or not the cache will provide stats.
Defaults to `false`. Each adapter is responsible for providing stats by
implementing `Nebulex.Adapter.Stats` behaviour. See the "Stats" section
below.
* `:telemetry_prefix` - It is recommend for adapters to publish events
using the `Telemetry` library. By default, the telemetry prefix is based
on the module name, so if your module is called `MyApp.Cache`, the prefix
will be `[:my_app, :cache]`. See the "Telemetry events" section to see
what events recommended for the adapters to publish.. Note that if you
have multiple caches, you should keep the `:telemetry_prefix` consistent
for each of them and use the `:cache` and/or `:name` (in case of a named
or dynamic cache) properties in the event metadata for distinguishing
between caches. If it is set to `nil`, Telemetry events are disabled for
that cache.
## Stats
Stats support depends on the adapter entirely, it should implement the
optional behaviour `Nebulex.Adapter.Stats` to support so. Nevertheless,
the behaviour `Nebulex.Adapter.Stats` brings with a default implementation
using [Erlang counters][https://erlang.org/doc/man/counters.html], which is
used by the local built-in adapter (`Nebulex.Adapters.Local`).
To use stats it is a matter to set the option `:stats` to `true` into the
Cache options. For example, you can do it in the configuration file:
config :my_app, MyApp.Cache,
stats: true,
...
> Remember to check if the underlying adapter implements the
`Nebulex.Adapter.Stats` behaviour.
See `c:Nebulex.Cache.stats/0` for more information.
## Dispatching stats via Telemetry
It is possible to emit Telemetry events for the current stats via
`c:Nebulex.Cache.dispatch_stats/1`, but it has to be called explicitly,
Nebulex does not emit Telemetry events on its own. But it is pretty easy
to emit this event using [`:telemetry_poller`][telemetry_poller].
[telemetry_poller]: https://github.com/beam-telemetry/telemetry_poller
For example, we can define a custom pollable measurement:
:telemetry_poller.start_link(
measurements: [
{MyApp.Cache, :dispatch_stats, []},
],
# configure sampling period - default is :timer.seconds(5)
period: :timer.seconds(30),
name: :my_cache_stats_poller
)
Or you can also start the `:telemetry_poller` process along with your
application supervision tree, like so:
def start(_type, _args) do
my_cache_stats_poller_opts = [
measurements: [
{MyApp.Cache, :dispatch_stats, []},
],
period: :timer.seconds(30),
name: :my_cache_stats_poller
]
children = [
{MyApp.Cache, []},
{:telemetry_poller, my_cache_stats_poller_opts}
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
See [Nebulex Telemetry Guide](http://hexdocs.pm/nebulex/telemetry.html)
for more information.
## Telemetry events
Similar to Ecto or Phoenix, Nebulex also provides built-in Telemetry events
applied to all caches, and cache adapter-specific events.
### Nebulex built-in events
The following events are emitted by all Nebulex caches:
* `[:nebulex, :cache, :init]` - it is dispatched whenever a cache starts.
The measurement is a single `system_time` entry in native unit. The
metadata is the `:cache` and all initialization options under `:opts`.
### Adapter-specific events
It is recommend the adapters to publish certain `Telemetry` events listed
below. Those events will use the `:telemetry_prefix` outlined above which
defaults to `[:my_app, :cache]`.
For instance, to receive all events published by a cache called `MyApp.Cache`,
one could define a module:
defmodule MyApp.Telemetry do
def handle_event([:my_app, :cache, :command, event], measurements, metadata, config) do
case event do
:start ->
# Handle start event ...
:stop ->
# Handle stop event ...
:exception ->
# Handle exception event ...
end
end
end
Then, in the `Application.start/2` callback, attach the handler to this event
using a unique handler id:
:telemetry.attach(
"my-app-handler-id",
[:my_app, :cache, :command],
&MyApp.Telemetry.handle_event/4,
%{}
)
See [the telemetry documentation](https://hexdocs.pm/telemetry/)
for more information.
The following are the events you should expect from Nebulex. All examples
below consider a cache named `MyApp.Cache`:
#### `[:my_app, :cache, :command, :start]`
This event should be invoked on every cache call sent to the adapter before
the command logic is executed.
The `:measurements` map will include the following:
* `:system_time` - The current system time in native units from calling:
`System.system_time()`.
A Telemetry `:metadata` map including the following fields. Each cache adapter
may emit different information here. For built-in adapters, it will contain:
* `:action` - An atom indicating the called cache command or action.
* `:cache` - The Nebulex cache.
#### `[:my_app, :cache, :command, :stop]`
This event should be invoked on every cache call sent to the adapter after
the command logic is executed.
The `:measurements` map will include the following:
* `:duration` - The time spent executing the cache command. The measurement
is given in the `:native` time unit. You can read more about it in the
docs for `System.convert_time_unit/3`.
A Telemetry `:metadata` map including the following fields. Each cache adapter
may emit different information here. For built-in adapters, it will contain:
* `:action` - An atom indicating the called cache command or action.
* `:cache` - The Nebulex cache.
* `:result` - The command result.
#### `[:my_app, :cache, :command, :exception]`
This event should be invoked when an error or exception occurs while executing
the cache command.
The `:measurements` map will include the following:
* `:duration` - The time spent executing the cache command. The measurement
is given in the `:native` time unit. You can read more about it in the
docs for `System.convert_time_unit/3`.
A Telemetry `:metadata` map including the following fields. Each cache adapter
may emit different information here. For built-in adapters, it will contain:
* `:action` - An atom indicating the called cache command or action.
* `:cache` - The Nebulex cache.
* `:kind` - The type of the error: `:error`, `:exit`, or `:throw`.
* `:reason` - The reason of the error.
* `:stacktrace` - The stacktrace.
**NOTE:** The events outlined above are the recommended for the adapters
to dispatch. However, it is highly recommended to review the used adapter
documentation to ensure it is fullly compatible with these events, perhaps
differences, or perhaps also additional events.
## Distributed topologies
Nebulex provides the following adapters for distributed topologies:
* `Nebulex.Adapters.Partitioned` - Partitioned cache topology.
* `Nebulex.Adapters.Replicated` - Replicated cache topology.
* `Nebulex.Adapters.Multilevel` - Multi-level distributed cache topology.
These adapters work more as wrappers for an existing local adapter and provide
the distributed topology on top of it. Optionally, you can set the adapter for
the primary cache storage with the option `:primary_storage_adapter`. Defaults
to `Nebulex.Adapters.Local`.
"""
@type t :: module
@typedoc "Cache entry key"
@type key :: any
@typedoc "Cache entry value"
@type value :: any
@typedoc "Cache entries"
@type entries :: map | [{key, value}]
@typedoc "Cache action options"
@type opts :: Keyword.t()
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Nebulex.Cache
alias Nebulex.Cache.{
Entry,
Persistence,
Queryable,
Stats,
Storage,
Transaction
}
alias Nebulex.Hook
{otp_app, adapter, behaviours} = Nebulex.Cache.Supervisor.compile_config(opts)
@otp_app otp_app
@adapter adapter
@opts opts
@default_dynamic_cache opts[:default_dynamic_cache] || __MODULE__
@before_compile adapter
## Config and metadata
@impl true
def config do
{:ok, config} = Nebulex.Cache.Supervisor.runtime_config(__MODULE__, @otp_app, [])
config
end
@impl true
def __adapter__, do: @adapter
## Process lifecycle
@doc false
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :supervisor
}
end
@impl true
def start_link(opts \\ []) do
Nebulex.Cache.Supervisor.start_link(__MODULE__, @otp_app, @adapter, opts)
end
@impl true
def stop(timeout \\ 5000) do
Supervisor.stop(get_dynamic_cache(), :normal, timeout)
end
@compile {:inline, get_dynamic_cache: 0}
@impl true
def get_dynamic_cache do
Process.get({__MODULE__, :dynamic_cache}, @default_dynamic_cache)
end
@impl true
def put_dynamic_cache(dynamic) when is_atom(dynamic) or is_pid(dynamic) do
Process.put({__MODULE__, :dynamic_cache}, dynamic) || @default_dynamic_cache
end
@impl true
def with_dynamic_cache(name, fun) do
default_dynamic_cache = get_dynamic_cache()
try do
_ = put_dynamic_cache(name)
fun.()
after
_ = put_dynamic_cache(default_dynamic_cache)
end
end
@impl true
def with_dynamic_cache(name, module, fun, args) do
with_dynamic_cache(name, fn -> apply(module, fun, args) end)
end
## Entry
@impl true
def get(key, opts \\ []) do
Entry.get(get_dynamic_cache(), key, opts)
end
@impl true
def get!(key, opts \\ []) do
Entry.get!(get_dynamic_cache(), key, opts)
end
@impl true
def get_all(keys, opts \\ []) do
Entry.get_all(get_dynamic_cache(), keys, opts)
end
@impl true
def put(key, value, opts \\ []) do
Entry.put(get_dynamic_cache(), key, value, opts)
end
@impl true
def put_new(key, value, opts \\ []) do
Entry.put_new(get_dynamic_cache(), key, value, opts)
end
@impl true
def put_new!(key, value, opts \\ []) do
Entry.put_new!(get_dynamic_cache(), key, value, opts)
end
@impl true
def replace(key, value, opts \\ []) do
Entry.replace(get_dynamic_cache(), key, value, opts)
end
@impl true
def replace!(key, value, opts \\ []) do
Entry.replace!(get_dynamic_cache(), key, value, opts)
end
@impl true
def put_all(entries, opts \\ []) do
Entry.put_all(get_dynamic_cache(), entries, opts)
end
@impl true
def put_new_all(entries, opts \\ []) do
Entry.put_new_all(get_dynamic_cache(), entries, opts)
end
@impl true
def delete(key, opts \\ []) do
Entry.delete(get_dynamic_cache(), key, opts)
end
@impl true
def take(key, opts \\ []) do
Entry.take(get_dynamic_cache(), key, opts)
end
@impl true
def take!(key, opts \\ []) do
Entry.take!(get_dynamic_cache(), key, opts)
end
@impl true
def has_key?(key) do
Entry.has_key?(get_dynamic_cache(), key)
end
@impl true
def get_and_update(key, fun, opts \\ []) do
Entry.get_and_update(get_dynamic_cache(), key, fun, opts)
end
@impl true
def update(key, initial, fun, opts \\ []) do
Entry.update(get_dynamic_cache(), key, initial, fun, opts)
end
@impl true
def incr(key, amount \\ 1, opts \\ []) do
Entry.incr(get_dynamic_cache(), key, amount, opts)
end
@impl true
def decr(key, amount \\ 1, opts \\ []) do
Entry.decr(get_dynamic_cache(), key, amount, opts)
end
@impl true
def ttl(key) do
Entry.ttl(get_dynamic_cache(), key)
end
@impl true
def expire(key, ttl) do
Entry.expire(get_dynamic_cache(), key, ttl)
end
@impl true
def touch(key) do
Entry.touch(get_dynamic_cache(), key)
end
## Queryable
if Nebulex.Adapter.Queryable in behaviours do
@impl true
def all(query \\ nil, opts \\ []) do
Queryable.all(get_dynamic_cache(), query, opts)
end
@impl true
def count_all(query \\ nil, opts \\ []) do
Queryable.count_all(get_dynamic_cache(), query, opts)
end
@impl true
def delete_all(query \\ nil, opts \\ []) do
Queryable.delete_all(get_dynamic_cache(), query, opts)
end
@impl true
def stream(query \\ nil, opts \\ []) do
Queryable.stream(get_dynamic_cache(), query, opts)
end
## Deprecated functions (for backwards compatibility)
@impl true
defdelegate size, to: __MODULE__, as: :count_all
@impl true
defdelegate flush, to: __MODULE__, as: :delete_all
end
## Persistence
if Nebulex.Adapter.Persistence in behaviours do
@impl true
def dump(path, opts \\ []) do
Persistence.dump(get_dynamic_cache(), path, opts)
end
@impl true
def load(path, opts \\ []) do
Persistence.load(get_dynamic_cache(), path, opts)
end
end
## Transactions
if Nebulex.Adapter.Transaction in behaviours do
@impl true
def transaction(opts \\ [], fun) do
Transaction.transaction(get_dynamic_cache(), opts, fun)
end
@impl true
def in_transaction? do
Transaction.in_transaction?(get_dynamic_cache())
end
end
## Stats
if Nebulex.Adapter.Stats in behaviours do
@impl true
def stats do
Stats.stats(get_dynamic_cache())
end
@impl true
def dispatch_stats(opts \\ []) do
Stats.dispatch_stats(get_dynamic_cache(), opts)
end
end
end
end
## User callbacks
@optional_callbacks init: 1
@doc """
A callback executed when the cache starts or when configuration is read.
"""
@callback init(config :: Keyword.t()) :: {:ok, Keyword.t()} | :ignore
## Nebulex.Adapter
@doc """
Returns the adapter tied to the cache.
"""
@callback __adapter__ :: Nebulex.Adapter.t()
@doc """
Returns the adapter configuration stored in the `:otp_app` environment.
If the `c:init/1` callback is implemented in the cache, it will be invoked.
"""
@callback config() :: Keyword.t()
@doc """
Starts a supervision and return `{:ok, pid}` or just `:ok` if nothing
needs to be done.
Returns `{:error, {:already_started, pid}}` if the cache is already
started or `{:error, term}` in case anything else goes wrong.
## Options
See the configuration in the moduledoc for options shared between adapters,
for adapter-specific configuration see the adapter's documentation.
"""
@callback start_link(opts) ::
{:ok, pid}
| {:error, {:already_started, pid}}
| {:error, term}
@doc """
Shuts down the cache.
"""
@callback stop(timeout) :: :ok
@doc """
Returns the atom name or pid of the current cache
(based on Ecto dynamic repo).
See also `c:put_dynamic_cache/1`.
"""
@callback get_dynamic_cache() :: atom() | pid()
@doc """
Sets the dynamic cache to be used in further commands
(based on Ecto dynamic repo).
There might be cases where we want to have different cache instances but
accessing them through the same cache module. By default, when you call
`MyApp.Cache.start_link/1`, it will start a cache with the name
`MyApp.Cache`. But it is also possible to start multiple caches by using
a different name for each of them:
MyApp.Cache.start_link(name: :cache1)
MyApp.Cache.start_link(name: :cache2, backend: :shards)
You can also start caches without names by explicitly setting the name
to `nil`:
MyApp.Cache.start_link(name: nil, backend: :shards)
> **NOTE:** There may be adapters requiring the `:name` option anyway,
therefore, it is highly recommended to see the adapter's documentation
you want to use.
However, once the cache is started, it is not possible to interact directly
with it, since all operations through `MyApp.Cache` are sent by default to
the cache named `MyApp.Cache`. But you can change the default cache at
compile-time:
use Nebulex.Cache, default_dynamic_cache: :cache_name
Or anytime at runtime by calling `put_dynamic_cache/1`:
MyApp.Cache.put_dynamic_cache(:another_cache_name)
From this moment on, all future commands performed by the current process
will run on `:another_cache_name`.
"""
@callback put_dynamic_cache(atom() | pid()) :: atom() | pid()
@doc """
Invokes the given function `fun` for the dynamic cache `name_or_pid`.
## Example
MyCache.with_dynamic_cache(:my_cache, fn ->
MyCache.put("foo", "var")
end)
See `c:get_dynamic_cache/0` and `c:put_dynamic_cache/1`.
"""
@callback with_dynamic_cache(name_or_pid :: atom() | pid(), fun) :: term
@doc """
For the dynamic cache `name_or_pid`, invokes the given function name `fun`
from `module` with the list of arguments `args`.
## Example
MyCache.with_dynamic_cache(:my_cache, Module, :some_fun, ["foo", "bar"])
See `c:get_dynamic_cache/0` and `c:put_dynamic_cache/1`.
"""
@callback with_dynamic_cache(
name_or_pid :: atom() | pid(),
module,
fun :: atom,
args :: [term]
) :: term
## Nebulex.Adapter.Entry
@doc """
Gets a value from Cache where the key matches the given `key`.
Returns `nil` if no result was found.
## Options
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.put("foo", "bar")
:ok
iex> MyCache.get("foo")
"bar"
iex> MyCache.get(:non_existent_key)
nil
"""
@callback get(key, opts) :: value
@doc """
Similar to `c:get/2` but raises `KeyError` if `key` is not found.
## Options
See the "Shared options" section at the module documentation for more options.
## Example
MyCache.get!(:a)
"""
@callback get!(key, opts) :: value
@doc """
Returns a `map` with all the key-value pairs in the Cache where the key
is in `keys`.
If `keys` contains keys that are not in the Cache, they're simply ignored.
## Options
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.put_all([a: 1, c: 3])
:ok
iex> MyCache.get_all([:a, :b, :c])
%{a: 1, c: 3}
"""
@callback get_all(keys :: [key], opts) :: map
@doc """
Puts the given `value` under `key` into the Cache.
If `key` already holds an entry, it is overwritten. Any previous
time to live associated with the key is discarded on successful
`put` operation.
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.put("foo", "bar")
:ok
If the value is nil, then it is not stored (operation is skipped):
iex> MyCache.put("foo", nil)
:ok
Put key with time-to-live:
iex> MyCache.put("foo", "bar", ttl: 10_000)
:ok
Using Nebulex.Time for TTL:
iex> MyCache.put("foo", "bar", ttl: :timer.hours(1))
:ok
iex> MyCache.put("foo", "bar", ttl: :timer.minutes(1))
:ok
iex> MyCache.put("foo", "bar", ttl: :timer.seconds(1))
:ok
"""
@callback put(key, value, opts) :: :ok
@doc """
Puts the given `entries` (key/value pairs) into the Cache. It replaces
existing values with new values (just as regular `put`).
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.put_all(apples: 3, bananas: 1)
:ok
iex> MyCache.put_all(%{apples: 2, oranges: 1}, ttl: 10_000)
:ok
Ideally, this operation should be atomic, so all given keys are put at once.
But it depends purely on the adapter's implementation and the backend used
internally by the adapter. Hence, it is recommended to review the adapter's
documentation.
"""
@callback put_all(entries, opts) :: :ok
@doc """
Puts the given `value` under `key` into the cache, only if it does not
already exist.
Returns `true` if a value was set, otherwise, `false` is returned.
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.put_new("foo", "bar")
true
iex> MyCache.put_new("foo", "bar")
false
If the value is nil, it is not stored (operation is skipped):
iex> MyCache.put_new("other", nil)
true
"""
@callback put_new(key, value, opts) :: boolean
@doc """
Similar to `c:put_new/3` but raises `Nebulex.KeyAlreadyExistsError` if the
key already exists.
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.put_new!("foo", "bar")
true
"""
@callback put_new!(key, value, opts) :: true
@doc """
Puts the given `entries` (key/value pairs) into the `cache`. It will not
perform any operation at all even if just a single key already exists.
Returns `true` if all entries were successfully set. It returns `false`
if no key was set (at least one key already existed).
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.put_new_all(apples: 3, bananas: 1)
true
iex> MyCache.put_new_all(%{apples: 3, oranges: 1}, ttl: 10_000)
false
Ideally, this operation should be atomic, so all given keys are put at once.
But it depends purely on the adapter's implementation and the backend used
internally by the adapter. Hence, it is recommended to review the adapter's
documentation.
"""
@callback put_new_all(entries, opts) :: boolean
@doc """
Alters the entry stored under `key`, but only if the entry already exists
into the Cache.
Returns `true` if a value was set, otherwise, `false` is returned.
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.replace("foo", "bar")
false
iex> MyCache.put_new("foo", "bar")
true
iex> MyCache.replace("foo", "bar2")
true
Update current value and TTL:
iex> MyCache.replace("foo", "bar3", ttl: 10_000)
true
"""
@callback replace(key, value, opts) :: boolean
@doc """
Similar to `c:replace/3` but raises `KeyError` if `key` is not found.
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.replace!("foo", "bar")
true
"""
@callback replace!(key, value, opts) :: true
@doc """
Deletes the entry in Cache for a specific `key`.
## Options
See the "Shared options" section at the module documentation for more options.
## Example
iex> MyCache.put(:a, 1)
:ok
iex> MyCache.delete(:a)
:ok
iex> MyCache.get(:a)
:ok
iex> MyCache.delete(:non_existent_key)
:ok
"""
@callback delete(key, opts) :: :ok
@doc """
Returns and removes the value associated with `key` in the Cache.
If the `key` does not exist, then `nil` is returned.
## Options
See the "Shared options" section at the module documentation for more options.
## Examples
iex> MyCache.put(:a, 1)
:ok
iex> MyCache.take(:a)
1
iex> MyCache.take(:a)
nil
"""
@callback take(key, opts) :: value
@doc """
Similar to `c:take/2` but raises `KeyError` if `key` is not found.
## Options
See the "Shared options" section at the module documentation for more options.
## Example
MyCache.take!(:a)
"""
@callback take!(key, opts) :: value
@doc """
Returns whether the given `key` exists in the Cache.
## Examples
iex> MyCache.put(:a, 1)
:ok
iex> MyCache.has_key?(:a)
true
iex> MyCache.has_key?(:b)
false
"""
@callback has_key?(key) :: boolean
@doc """
Gets the value from `key` and updates it, all in one pass.
`fun` is called with the current cached value under `key` (or `nil` if `key`
hasn't been cached) and must return a two-element tuple: the current value
(the retrieved value, which can be operated on before being returned) and
the new value to be stored under `key`. `fun` may also return `:pop`, which
means the current value shall be removed from Cache and returned.
The returned value is a tuple with the current value returned by `fun` and
the new updated value under `key`.
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Examples
Update nonexistent key:
iex> MyCache.get_and_update(:a, fn current_value ->
...> {current_value, "value!"}
...> end)
{nil, "value!"}
Update existing key:
iex> MyCache.get_and_update(:a, fn current_value ->
...> {current_value, "new value!"}
...> end)
{"value!", "new value!"}
Pop/remove value if exist:
iex> MyCache.get_and_update(:a, fn _ -> :pop end)
{"new value!", nil}
Pop/remove nonexistent key:
iex> MyCache.get_and_update(:b, fn _ -> :pop end)
{nil, nil}
"""
@callback get_and_update(key, (value -> {current_value, new_value} | :pop), opts) ::
{current_value, new_value}
when current_value: value, new_value: value
@doc """
Updates the cached `key` with the given function.
If `key` is present in Cache with value `value`, `fun` is invoked with
argument `value` and its result is used as the new value of `key`.
If `key` is not present in Cache, `initial` is inserted as the value of `key`.
The initial value will not be passed through the update function.
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
See the "Shared options" section at the module documentation for more options.
## Examples
iex> MyCache.update(:a, 1, &(&1 * 2))
1
iex> MyCache.update(:a, 1, &(&1 * 2))
2
"""
@callback update(key, initial :: value, (value -> value), opts) :: value
@doc """
Increments the counter stored at `key` by the given `amount`.
If `amount < 0` (negative), the value is decremented by that `amount`
instead.
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
* `:default` - If `key` is not present in Cache, the default value is
inserted as initial value of key before the it is incremented.
Defaults to `0`.
See the "Shared options" section at the module documentation for more options.
## Examples
iex> MyCache.incr(:a)
1
iex> MyCache.incr(:a, 2)
3
iex> MyCache.incr(:a, -1)
2
iex> MyCache.incr(:missing_key, 2, default: 10)
12
"""
@callback incr(key, amount :: integer, opts) :: integer
@doc """
Decrements the counter stored at `key` by the given `amount`.
If `amount < 0` (negative), the value is incremented by that `amount`
instead (opposite to `incr/3`).
## Options
* `:ttl` - (positive integer or `:infinity`) Defines the time-to-live
(or expiry time) for the given key in **milliseconds**. Defaults
to `:infinity`.
* `:default` - If `key` is not present in Cache, the default value is
inserted as initial value of key before the it is incremented.
Defaults to `0`.
See the "Shared options" section at the module documentation for more options.
## Examples
iex> MyCache.decr(:a)
-1
iex> MyCache.decr(:a, 2)
-3
iex> MyCache.decr(:a, -1)
-2
iex> MyCache.decr(:missing_key, 2, default: 10)
8
"""
@callback decr(key, amount :: integer, opts) :: integer
@doc """
Returns the remaining time-to-live for the given `key`. If the `key` does not
exist, then `nil` is returned.
## Examples
iex> MyCache.put(:a, 1, ttl: 5000)
:ok
iex> MyCache.put(:b, 2)
:ok
iex> MyCache.ttl(:a)
_remaining_ttl
iex> MyCache.ttl(:b)
:infinity
iex> MyCache.ttl(:c)
nil
"""
@callback ttl(key) :: timeout | nil
@doc """
Returns `true` if the given `key` exists and the new `ttl` was successfully
updated, otherwise, `false` is returned.
## Examples
iex> MyCache.put(:a, 1)
:ok
iex> MyCache.expire(:a, 5)
true
iex> MyCache.expire(:a, :infinity)
true
iex> MyCache.ttl(:b, 5)
false
"""
@callback expire(key, ttl :: timeout) :: boolean
@doc """
Returns `true` if the given `key` exists and the last access time was
successfully updated, otherwise, `false` is returned.
## Examples
iex> MyCache.put(:a, 1)
:ok
iex> MyCache.touch(:a)
true
iex> MyCache.ttl(:b)
false
"""
@callback touch(key) :: boolean
## Deprecated Callbacks
@doc """
Returns the total number of cached entries.
## Examples
iex> :ok = Enum.each(1..10, &MyCache.put(&1, &1))
iex> MyCache.size()
10
iex> :ok = Enum.each(1..5, &MyCache.delete(&1))
iex> MyCache.size()
5
"""
@doc deprecated: "Use count_all/2 instead"
@callback size() :: integer
@doc """
Flushes the cache and returns the number of evicted keys.
## Examples
iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1))
iex> MyCache.flush()
5
iex> MyCache.size()
0
"""
@doc deprecated: "Use delete_all/2 instead"
@callback flush() :: integer
## Nebulex.Adapter.Queryable
@optional_callbacks all: 2, count_all: 2, delete_all: 2, stream: 2
@doc """
Fetches all entries from cache matching the given `query`.
May raise `Nebulex.QueryError` if query validation fails.
## Query values
There are two types of query values. The ones shared and implemented
by all adapters and the ones that are adapter specific.
### Common queries
The following query values are shared and/or supported for all adapters:
* `nil` - Returns a list with all cached entries based on the `:return`
option.
### Adapter-specific queries
The `query` value depends entirely on the adapter implementation; it could
any term. Therefore, it is highly recommended to see adapters' documentation
for more information about building queries. For example, the built-in
`Nebulex.Adapters.Local` adapter uses `:ets.match_spec()` for queries,
as well as other pre-defined ones like `:unexpired` and `:expired`.
## Options
* `:return` - Tells the query what to return from the matched entries.
See the possible values in the "Query return option" section below.
The default depends on the adapter, for example, the default for the
built-in adapters is `:key`. This option is supported by the build-in
adapters, but it is recommended to see the adapter's documentation
to confirm its compatibility with this option.
See the "Shared options" section at the module documentation for more options.
## Query return option
The following are the possible values for the `:return` option:
* `:key` - Returns a list only with the keys.
* `:value` - Returns a list only with the values.
* `:entry` - Returns a list of `t:Nebulex.Entry.t/0`.
* `{:key, :value}` - Returns a list of tuples in the form `{key, value}`.
See adapters documentation to confirm what of these options are supported
and what other added.
## Example
Populate the cache with some entries:
iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2))
Fetch all (with default params):
iex> MyCache.all()
[1, 2, 3, 4, 5]
Fetch all entries and return values:
iex> MyCache.all(nil, return: :value)
[2, 4, 6, 8, 10]
Fetch all entries and return them as key/value pairs:
iex> MyCache.all(nil, return: {:key, :value})
[{1, 2}, {2, 4}, {3, 6}, {4, 8}, {5, 10}]
Fetch all entries that match with the given query assuming we are using
`Nebulex.Adapters.Local` adapter:
iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [:"$1"]}]
iex> MyCache.all(query)
[3, 4, 5]
## Query
Query spec is defined by the adapter, hence, it is recommended to review
adapters documentation. For instance, the built-in `Nebulex.Adapters.Local`
adapter supports `nil | :unexpired | :expired | :ets.match_spec()` as query
value.
## Examples
Additional built-in queries for `Nebulex.Adapters.Local` adapter:
iex> unexpired = MyCache.all(:unexpired)
iex> expired = MyCache.all(:expired)
If we are using Nebulex.Adapters.Local adapter, the stored entry tuple
`{:entry, key, value, version, expire_at}`, then the match spec could be
something like:
iex> spec = [
...> {{:entry, :"$1", :"$2", :_, :_},
...> [{:>, :"$2", 5}], [{{:"$1", :"$2"}}]}
...> ]
iex> MyCache.all(spec)
[{3, 6}, {4, 8}, {5, 10}]
The same previous query but using `Ex2ms`:
iex> import Ex2ms
Ex2ms
iex> spec =
...> fun do
...> {_. key, value, _, _} when value > 5 -> {key, value}
...> end
iex> MyCache.all(spec)
[{3, 6}, {4, 8}, {5, 10}]
"""
@callback all(query :: term, opts) :: [any]
@doc """
Similar to `c:all/2` but returns a lazy enumerable that emits all entries
from the cache matching the given `query`.
If `query` is `nil`, then all entries in cache match and are returned
when the stream is evaluated; based on the `:return` option.
May raise `Nebulex.QueryError` if query validation fails.
## Query values
See `c:all/2` callback for more information about the query values.
## Options
* `:return` - Tells the query what to return from the matched entries.
See the possible values in the "Query return option" section below.
The default depends on the adapter, for example, the default for the
built-in adapters is `:key`. This option is supported by the build-in
adapters, but it is recommended to see the adapter's documentation
to confirm its compatibility with this option.
* `:page_size` - Positive integer (>= 1) that defines the page size
internally used by the adapter for paginating the results coming
back from the cache's backend. Defaults to `20`; it's unlikely
this will ever need changing.
See the "Shared options" section at the module documentation for more options.
## Query return option
The following are the possible values for the `:return` option:
* `:key` - Returns a list only with the keys.
* `:value` - Returns a list only with the values.
* `:entry` - Returns a list of `t:Nebulex.Entry.t/0`.
* `{:key, :value}` - Returns a list of tuples in the form `{key, value}`.
See adapters documentation to confirm what of these options are supported
and what other added.
## Examples
Populate the cache with some entries:
iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2))
Stream all (with default params):
iex> MyCache.stream() |> Enum.to_list()
[1, 2, 3, 4, 5]
Stream all entries and return values:
iex> nil |> MyCache.stream(return: :value, page_size: 3) |> Enum.to_list()
[2, 4, 6, 8, 10]
Stream all entries and return them as key/value pairs:
iex> nil |> MyCache.stream(return: {:key, :value}) |> Enum.to_list()
[{1, 2}, {2, 4}, {3, 6}, {4, 8}, {5, 10}]
Additional built-in queries for `Nebulex.Adapters.Local` adapter:
iex> unexpired_stream = MyCache.stream(:unexpired)
iex> expired_stream = MyCache.stream(:expired)
If we are using Nebulex.Adapters.Local adapter, the stored entry tuple
`{:entry, key, value, version, expire_at}`, then the match spec could be
something like:
iex> spec = [
...> {{:entry, :"$1", :"$2", :_, :_},
...> [{:>, :"$2", 5}], [{{:"$1", :"$2"}}]}
...> ]
iex> MyCache.stream(spec, page_size: 100) |> Enum.to_list()
[{3, 6}, {4, 8}, {5, 10}]
The same previous query but using `Ex2ms`:
iex> import Ex2ms
Ex2ms
iex> spec =
...> fun do
...> {_, key, value, _, _} when value > 5 -> {key, value}
...> end
iex> spec |> MyCache.stream(page_size: 100) |> Enum.to_list()
[{3, 6}, {4, 8}, {5, 10}]
"""
@callback stream(query :: term, opts) :: Enum.t()
@doc """
Deletes all entries matching the given `query`. If `query` is `nil`,
then all entries in the cache are deleted.
It returns the number of deleted entries.
May raise `Nebulex.QueryError` if query validation fails.
## Query values
See `c:all/2` callback for more information about the query values.
## Options
See the "Shared options" section at the module documentation for more options.
## Example
Populate the cache with some entries:
iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2))
Delete all (with default params):
iex> MyCache.delete_all()
5
Delete all entries that match with the given query assuming we are using
`Nebulex.Adapters.Local` adapter:
iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [true]}]
iex> MyCache.delete_all(query)
> For the local adapter you can use [Ex2ms](https://github.com/ericmj/ex2ms)
to build the match specs much easier.
Additional built-in queries for `Nebulex.Adapters.Local` adapter:
iex> unexpired = MyCache.delete_all(:unexpired)
iex> expired = MyCache.delete_all(:expired)
"""
@callback delete_all(query :: term, opts) :: integer
@doc """
Counts all entries in cache matching the given `query`.
It returns the count of the matched entries.
If `query` is `nil` (the default), then the total number of
cached entries is returned.
May raise `Nebulex.QueryError` if query validation fails.
## Query values
See `c:all/2` callback for more information about the query values.
## Example
Populate the cache with some entries:
iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2))
Count all entries in cache:
iex> MyCache.count_all()
5
Count all entries that match with the given query assuming we are using
`Nebulex.Adapters.Local` adapter:
iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [true]}]
iex> MyCache.count_all(query)
> For the local adapter you can use [Ex2ms](https://github.com/ericmj/ex2ms)
to build the match specs much easier.
Additional built-in queries for `Nebulex.Adapters.Local` adapter:
iex> unexpired = MyCache.count_all(:unexpired)
iex> expired = MyCache.count_all(:expired)
"""
@callback count_all(query :: term, opts) :: integer
## Nebulex.Adapter.Persistence
@optional_callbacks dump: 2, load: 2
@doc """
Dumps a cache to the given file `path`.
Returns `:ok` if successful, or `{:error, reason}` if an error occurs.
## Options
This operation relies entirely on the adapter implementation, which means the
options depend on each of them. For that reason, it is recommended to review
the documentation of the adapter to be used. The built-in adapters inherit
the default implementation from `Nebulex.Adapter.Persistence`, hence, review
the available options there.
## Examples
Populate the cache with some entries:
iex> entries = for x <- 1..10, into: %{}, do: {x, x}
iex> MyCache.set_many(entries)
:ok
Dump cache to a file:
iex> MyCache.dump("my_cache")
:ok
"""
@callback dump(path :: Path.t(), opts) :: :ok | {:error, term}
@doc """
Loads a dumped cache from the given `path`.
Returns `:ok` if successful, or `{:error, reason}` if an error occurs.
## Options
Similar to `c:dump/2`, this operation relies entirely on the adapter
implementation, therefore, it is recommended to review the documentation
of the adapter to be used. Similarly, the built-in adapters inherit the
default implementation from `Nebulex.Adapter.Persistence`, hence, review
the available options there.
## Examples
Populate the cache with some entries:
iex> entries = for x <- 1..10, into: %{}, do: {x, x}
iex> MyCache.set_many(entries)
:ok
Dump cache to a file:
iex> MyCache.dump("my_cache")
:ok
Load the cache from a file:
iex> MyCache.load("my_cache")
:ok
"""
@callback load(path :: Path.t(), opts) :: :ok | {:error, term}
## Nebulex.Adapter.Transaction
@optional_callbacks transaction: 2, in_transaction?: 0
@doc """
Runs the given function inside a transaction.
A successful transaction returns the value returned by the function.
## Options
See the "Shared options" section at the module documentation for more options.
## Examples
MyCache.transaction fn ->
alice = MyCache.get(:alice)
bob = MyCache.get(:bob)
MyCache.put(:alice, %{alice | balance: alice.balance + 100})
MyCache.put(:bob, %{bob | balance: bob.balance + 100})
end
Locking only the involved key (recommended):
MyCache.transaction [keys: [:alice, :bob]], fn ->
alice = MyCache.get(:alice)
bob = MyCache.get(:bob)
MyCache.put(:alice, %{alice | balance: alice.balance + 100})
MyCache.put(:bob, %{bob | balance: bob.balance + 100})
end
"""
@callback transaction(opts, function :: fun) :: term
@doc """
Returns `true` if the current process is inside a transaction.
## Examples
MyCache.in_transaction?
#=> false
MyCache.transaction(fn ->
MyCache.in_transaction? #=> true
end)
"""
@callback in_transaction?() :: boolean
## Nebulex.Adapter.Stats
@optional_callbacks stats: 0, dispatch_stats: 1
@doc """
Returns `Nebulex.Stats.t()` with the current stats values.
If the stats are disabled for the cache, then `nil` is returned.
## Example
iex> MyCache.stats()
%Nebulex.Stats{
measurements: {
evictions: 0,
expirations: 0,
hits: 0,
misses: 0,
updates: 0,
writes: 0
},
metadata: %{}
}
"""
@callback stats() :: Nebulex.Stats.t() | nil
@doc """
Emits a telemetry event when called with the current stats count.
The telemetry `:measurements` map will include the same as
`Nebulex.Stats.t()`'s measurements. For example:
* `:evictions` - Current **evictions** count.
* `:expirations` - Current **expirations** count.
* `:hits` - Current **hits** count.
* `:misses` - Current **misses** count.
* `:updates` - Current **updates** count.
* `:writes` - Current **writes** count.
The telemetry `:metadata` map will include the same as `Nebulex.Stats.t()`'s
metadata by default. For example:
* `:cache` - The cache module, or the name (if an explicit name has been
given to the cache).
Additionally, you can add your own metadata fields by given the option
`:metadata`.
## Options
* `:event_prefix` – The prefix of the telemetry event.
Defaults to `[:nebulex, :cache]`.
* `:metadata` – A map with additional metadata fields. Defaults to `%{}`.
## Examples
iex> MyCache.dispatch_stats()
:ok
iex> MyCache.Stats.dispatch_stats(
...> event_prefix: [:my_cache],
...> metadata: %{tag: "tag1"}
...> )
:ok
**NOTE:** Since `:telemetry` is an optional dependency, when it is not
defined, a default implementation is provided without any logic, just
returning `:ok`.
"""
@callback dispatch_stats(opts) :: :ok
end
|
lib/nebulex/cache.ex
| 0.932661 | 0.661977 |
cache.ex
|
starcoder
|
defmodule ChartsLive.StackedBarView do
@moduledoc """
View functions for rendering Stacked Bar charts
"""
use ChartsLive.ChartBehavior
alias Charts.Gradient
alias Charts.StackedColumnChart.Rectangle
def viewbox_height(rectangles) do
length(rectangles) * 12 + 170
end
@doc """
The function used to generate X Axis labels
"""
def x_axis_labels(chart, grid_lines, offsetter) do
lines = Enum.map(grid_lines, &x_axis_column_label(&1, offsetter))
content_tag(:svg, lines,
id: svg_id(chart, "xlabels"),
class: "lines__x-labels",
width: "84%",
height: "8%",
y: "92%",
x: "5%",
style: "overflow: visible;",
offset: "0"
)
end
# TODO: add to behavior, shared with stacked column
def legend(rectangles, colors) do
legend_items =
rectangles
|> Enum.map(& &1.fill_color)
|> Enum.uniq()
|> Enum.sort()
|> Enum.map(&legend_content(&1, colors))
content_tag(:dl, legend_items, style: "margin-left: 10%; float: right;")
end
defp legend_content(color_label, colors) do
[
content_tag(:dt, "",
style:
"background-color: #{colors[color_label]}; display: inline-block; height: 10px; width: 20px; vertical-align: middle;"
),
content_tag(:dd, color_to_label(color_label),
style: "display: inline-block; margin: 0px 10px 0 6px; padding-bottom: 0;"
)
]
end
defp color_to_label(atom_color) do
atom_color
|> Atom.to_string()
|> String.capitalize()
end
defp x_axis_column_label(line, offsetter) do
content_tag(:svg,
x: "#{offsetter.(line)}%",
y: "0%",
height: "100%",
width: "20%",
style: "overflow: visible;"
) do
content_tag(:svg, width: "100%", height: "100%", x: "0", y: "0") do
content_tag(:text, line,
x: "50%",
y: "50%",
alignment_baseline: "middle",
text_anchor: "start"
)
end
end
end
end
|
lib/charts_live/views/stacked_bar_view.ex
| 0.572364 | 0.417657 |
stacked_bar_view.ex
|
starcoder
|
defmodule Plug.SSL do
@moduledoc """
A plug to force SSL connections and enable HSTS.
If the scheme of a request is `https`, it'll add a `strict-transport-security`
header to enable HTTP Strict Transport Security by default.
Otherwise, the request will be redirected to a corresponding location
with the `https` scheme by setting the `location` header of the response.
The status code will be 301 if the method of `conn` is `GET` or `HEAD`,
or 307 in other situations.
Besides being a Plug, this module also provides conveniences for configuring
SSL. See `configure/1`.
## x-forwarded-*
If your Plug application is behind a proxy that handles HTTPS, you may
need to tell Plug to parse the proper protocol from the `x-forwarded-*`
header. This can be done using the `:rewrite_on` option:
plug Plug.SSL, rewrite_on: [:x_forwarded_proto, :x_forwarded_host, :x_forwarded_port]
The supported values are:
* `:x_forwarded_host` - to override the host based on on the "x-forwarded-host" header
* `:x_forwarded_port` - to override the port based on on the "x-forwarded-port" header
* `:x_forwarded_proto` - to override the protocol based on on the "x-forwarded-proto" header
Since rewriting the scheme based on `x-forwarded-*` headers can open up
security vulnerabilities, only provide the option above if:
* your app is behind a proxy
* your proxy strips the given `x-forwarded-*` headers from all incoming requests
* your proxy sets the `x-forwarded-*` headers and sends it to Plug
## Plug Options
* `:rewrite_on` - rewrites the given connection information based on the given headers
* `:hsts` - a boolean on enabling HSTS or not, defaults to `true`
* `:expires` - seconds to expires for HSTS, defaults to `31_536_000` (1 year)
* `:preload` - a boolean to request inclusion on the HSTS preload list
(for full set of required flags, see: [Chromium HSTS submission site](https://hstspreload.org)),
defaults to `false`
* `:subdomains` - a boolean on including subdomains or not in HSTS,
defaults to `false`
* `:exclude` - exclude the given hosts from redirecting to the `https`
scheme. Defaults to `["localhost"]`
* `:host` - a new host to redirect to if the request's scheme is `http`,
defaults to `conn.host`. It may be set to a binary or a tuple
`{module, function, args}` that will be invoked on demand
* `:log` - The log level at which this plug should log its request info.
Default is `:info`. Can be `false` to disable logging.
## Port
It is not possible to directly configure the port in `Plug.SSL` because
HSTS expects the port to be 443 for SSL. If you are not using HSTS and
want to redirect to HTTPS on another port, you can sneak it alongside
the host, for example: `host: "example.com:443"`.
"""
@behaviour Plug
require Logger
import Plug.Conn
@strong_tls_ciphers [
'ECDHE-RSA-AES256-GCM-SHA384',
'ECDHE-ECDSA-AES256-GCM-SHA384',
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'DHE-RSA-AES256-GCM-SHA384',
'DHE-RSA-AES128-GCM-SHA256'
]
@compatible_tls_ciphers [
'ECDHE-RSA-AES256-GCM-SHA384',
'ECDHE-ECDSA-AES256-GCM-SHA384',
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'DHE-RSA-AES256-GCM-SHA384',
'DHE-RSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES256-SHA384',
'ECDHE-ECDSA-AES256-SHA384',
'ECDHE-RSA-AES128-SHA256',
'ECDHE-ECDSA-AES128-SHA256',
'DHE-RSA-AES256-SHA256',
'DHE-RSA-AES128-SHA256',
'ECDHE-RSA-AES256-SHA',
'ECDHE-ECDSA-AES256-SHA',
'ECDHE-RSA-AES128-SHA',
'ECDHE-ECDSA-AES128-SHA'
]
@eccs [
:secp256r1,
:secp384r1,
:secp521r1
]
@doc """
Configures and validates the options given to the `:ssl` application.
This function is often called internally by adapters, such as Cowboy,
to validate and set reasonable defaults for SSL handling. Therefore
Plug users are not expected to invoke it directly, rather you pass
the relevant SSL options to your adapter which then invokes this.
## Options
This function accepts all options defined
[in Erlang/OTP `:ssl` documentation](http://erlang.org/doc/man/ssl.html).
Besides the options from `:ssl`, this function adds on extra option:
* `:cypher_suite` - it may be `:strong` or `:compatible`,
as outlined in the following section
Furthermore, it sets the following defaults:
* `secure_renegotiate: true` - to avoid certain types of man-in-the-middle attacks
* `reuse_sessions: true` - for improved handshake performance of recurring connections
For a complete guide on HTTPS and best pratices, see [our Plug HTTPS Guide](https.html).
## Cipher Suites
To simplify configuration of TLS defaults, this function provides two preconfigured
options: `cipher_suite: :strong` and `cipher_suite: :compatible`. The Ciphers
chosen and related configuration come from the [OWASP Cipher String Cheat
Sheet](https://www.owasp.org/index.php/TLS_Cipher_String_Cheat_Sheet)
We've made two modifications to the suggested config from the OWASP recommendations.
First we include ECDSA certificates which are excluded from their configuration.
Second we have changed the order of the ciphers to deprioritize DHE because of
performance implications noted within the OWASP post itself. As the article notes
"...the TLS handshake with DHE hinders the CPU about 2.4 times more than ECDHE".
The **Strong** cipher suite only supports tlsv1.2. Ciphers were based on the OWASP
Group A+ and includes support for RSA or ECDSA certificates. The intention of this
configuration is to provide as secure as possible defaults knowing that it will not
be fully compatible with older browsers and operating systems.
The **Compatible** cipher suite supports tlsv1, tlsv1.1 and tlsv1.2. Ciphers were
based on the OWASP Group B and includes support for RSA or ECDSA certificates. The
intention of this configuration is to provide as secure as possible defaults that
still maintain support for older browsers and Android versions 4.3 and earlier
For both suites we've specified certificate curves secp256r1, ecp384r1 and secp521r1.
Since OWASP doesn't prescribe curves we've based the selection on [Mozilla's
recommendations](https://wiki.mozilla.org/Security/Server_Side_TLS#Cipher_names_correspondence_table)
**The cipher suites were last updated on 2018-JUN-14.**
"""
@spec configure(Keyword.t()) :: {:ok, Keyword.t()} | {:error, String.t()}
def configure(options) do
options
|> check_for_missing_keys()
|> validate_ciphers()
|> normalize_ssl_files()
|> convert_to_charlist()
|> set_secure_defaults()
|> configure_managed_tls()
catch
{:configure, message} -> {:error, message}
else
options -> {:ok, options}
end
defp check_for_missing_keys(options) do
has_sni? = Keyword.has_key?(options, :sni_hosts) or Keyword.has_key?(options, :sni_fun)
has_key? = Keyword.has_key?(options, :key) or Keyword.has_key?(options, :keyfile)
has_cert? = Keyword.has_key?(options, :cert) or Keyword.has_key?(options, :certfile)
cond do
has_sni? -> options
not has_key? -> fail("missing option :key/:keyfile")
not has_cert? -> fail("missing option :cert/:certfile")
true -> options
end
end
defp normalize_ssl_files(options) do
ssl_files = [:keyfile, :certfile, :cacertfile, :dhfile]
Enum.reduce(ssl_files, options, &normalize_ssl_file(&1, &2))
end
defp normalize_ssl_file(key, options) do
value = options[key]
cond do
is_nil(value) ->
options
Path.type(value) == :absolute ->
put_ssl_file(options, key, value)
true ->
put_ssl_file(options, key, Path.expand(value, otp_app(options)))
end
end
defp put_ssl_file(options, key, value) do
value = to_charlist(value)
unless File.exists?(value) do
message =
"the file #{value} required by SSL's #{inspect(key)} either does not exist, " <>
"or the application does not have permission to access it"
fail(message)
end
Keyword.put(options, key, value)
end
defp otp_app(options) do
if app = options[:otp_app] do
Application.app_dir(app)
else
fail("the :otp_app option is required when setting relative SSL certfiles")
end
end
defp convert_to_charlist(options) do
Enum.reduce([:password], options, fn key, acc ->
if value = acc[key] do
Keyword.put(acc, key, to_charlist(value))
else
acc
end
end)
end
defp set_secure_defaults(options) do
options
|> Keyword.put_new(:secure_renegotiate, true)
|> Keyword.put_new(:reuse_sessions, true)
end
defp configure_managed_tls(options) do
{cipher_suite, options} = Keyword.pop(options, :cipher_suite)
case cipher_suite do
:strong -> set_strong_tls_defaults(options)
:compatible -> set_compatible_tls_defaults(options)
nil -> options
_ -> fail("unknown :cipher_suite named #{inspect(cipher_suite)}")
end
end
defp set_managed_tls_defaults(options) do
options
|> Keyword.put_new(:honor_cipher_order, true)
|> Keyword.put_new(:eccs, @eccs)
end
defp set_strong_tls_defaults(options) do
options
|> set_managed_tls_defaults
|> Keyword.put_new(:ciphers, @strong_tls_ciphers)
|> Keyword.put_new(:versions, [:"tlsv1.2"])
end
defp set_compatible_tls_defaults(options) do
options
|> set_managed_tls_defaults
|> Keyword.put_new(:ciphers, @compatible_tls_ciphers)
|> Keyword.put_new(:versions, [:"tlsv1.2", :"tlsv1.1", :tlsv1])
end
defp validate_ciphers(options) do
options
|> Keyword.get(:ciphers, [])
|> Enum.each(&validate_cipher/1)
options
end
defp validate_cipher(cipher) do
if is_binary(cipher) do
message =
"invalid cipher #{inspect(cipher)} in cipher list. " <>
"Strings (double-quoted) are not allowed in ciphers. " <>
"Ciphers must be either charlists (single-quoted) or tuples. " <>
"See the ssl application docs for reference"
fail(message)
end
end
defp fail(message) when is_binary(message) do
throw({:configure, message})
end
@impl true
def init(opts) do
host = Keyword.get(opts, :host)
rewrite_on = List.wrap(Keyword.get(opts, :rewrite_on))
log = Keyword.get(opts, :log, :info)
exclude = Keyword.get(opts, :exclude, ["localhost"])
{hsts_header(opts), exclude, host, rewrite_on, log}
end
@impl true
def call(conn, {hsts, exclude, host, rewrite_on, log_level}) do
conn = rewrite_on(conn, rewrite_on)
cond do
:lists.member(conn.host, exclude) -> conn
conn.scheme == :https -> put_hsts_header(conn, hsts)
true -> redirect_to_https(conn, host, log_level)
end
end
defp rewrite_on(conn, [:x_forwarded_proto | rewrite_on]) do
conn
|> put_scheme(get_req_header(conn, "x-forwarded-proto"))
|> rewrite_on(rewrite_on)
end
defp rewrite_on(conn, [:x_forwarded_port | rewrite_on]) do
conn
|> put_port(get_req_header(conn, "x-forwarded-port"))
|> rewrite_on(rewrite_on)
end
defp rewrite_on(conn, [:x_forwarded_host | rewrite_on]) do
conn
|> put_host(get_req_header(conn, "x-forwarded-host"))
|> rewrite_on(rewrite_on)
end
defp rewrite_on(_conn, [other | _rewrite_on]) do
raise "unknown rewrite: #{inspect(other)}"
end
defp rewrite_on(conn, []) do
conn
end
defp put_scheme(%{scheme: :http, port: 80} = conn, ["https"]),
do: %{conn | scheme: :https, port: 443}
defp put_scheme(conn, ["https"]),
do: %{conn | scheme: :https}
defp put_scheme(%{scheme: :https, port: 443} = conn, ["http"]),
do: %{conn | scheme: :http, port: 80}
defp put_scheme(conn, ["http"]),
do: %{conn | scheme: :http}
defp put_scheme(conn, _scheme),
do: conn
defp put_host(conn, [proper_host]),
do: %{conn | host: proper_host}
defp put_host(conn, _),
do: conn
defp put_port(conn, headers) do
with [header] <- headers,
{port, ""} <- Integer.parse(header) do
%{conn | port: port}
else
_ -> conn
end
end
# http://tools.ietf.org/html/draft-hodges-strict-transport-sec-02
defp hsts_header(opts) do
if Keyword.get(opts, :hsts, true) do
expires = Keyword.get(opts, :expires, 31_536_000)
preload = Keyword.get(opts, :preload, false)
subdomains = Keyword.get(opts, :subdomains, false)
"max-age=#{expires}" <>
if(preload, do: "; preload", else: "") <>
if(subdomains, do: "; includeSubDomains", else: "")
end
end
defp put_hsts_header(conn, hsts_header) when is_binary(hsts_header) do
put_resp_header(conn, "strict-transport-security", hsts_header)
end
defp put_hsts_header(conn, nil), do: conn
defp redirect_to_https(%{host: host} = conn, custom_host, log_level) do
status = if conn.method in ~w(HEAD GET), do: 301, else: 307
scheme_and_host = "https://" <> host(custom_host, host)
location = scheme_and_host <> conn.request_path <> qs(conn.query_string)
log_level &&
Logger.log(log_level, fn ->
[
"Plug.SSL is redirecting ",
conn.method,
?\s,
conn.request_path,
" to ",
scheme_and_host,
" with status ",
Integer.to_string(status)
]
end)
conn
|> put_resp_header("location", location)
|> send_resp(status, "")
|> halt
end
defp host(nil, host), do: host
defp host(host, _) when is_binary(host), do: host
defp host({mod, fun, args}, host), do: host(apply(mod, fun, args), host)
# TODO: Deprecate this format
defp host({:system, env}, host), do: host(System.get_env(env), host)
defp qs(""), do: ""
defp qs(qs), do: "?" <> qs
end
|
lib/plug/ssl.ex
| 0.913184 | 0.722943 |
ssl.ex
|
starcoder
|
defmodule Temple.Html do
require Temple.Elements
@moduledoc """
The `Temple.Html` module defines macros for all HTML5 compliant elements.
`Temple.Html` macros must be called inside of a `Temple.temple/1` block.
*Note*: Only the lowest arity macros are documented. Void elements are defined as a 1-arity macro and non-void elements are defined as 0, 1, and 2-arity macros.
## Attributes
Html accept a keyword list or a map of attributes to be emitted into the element's opening tag. Multi-word attribute keys written in snake_case (`data_url`) will be transformed into kebab-case (`data-url`).
## Children
Non-void elements (such as `div`) accept a block that can be used to nest other tags or text nodes. These blocks can contain arbitrary Elixir code such as variables and for comprehensions.
If you are only emitting a text node within a block, you can use the shortened syntax by passing the text in as the first parameter of the tag.
## Example
```
temple do
# empty non-void element
div()
# non-void element with keyword list attributes
div class: "text-red", id: "my-el"
# non-void element with map attributes
div %{:class => "text-red", "id" => "my-el"}
# non-void element with children
div do
text "Hello, world!"
for name <- @names do
div data_name: name
end
end
# non-void element with a single text node
div "Hello, world!", class: "text-green"
# void elements
input name: "comments", placeholder: "Enter a comment..."
end
# {:safe,
# "<div></div>
# <div class=\"text-red\" id=\"my-el\"></div>
# <div>
# Hello, world!
# <div data-name=\"Alice\"></div>
# <div data-name=\"Bob\"></div>
# <div data-name=\"Carol\"></div>
# </div>
# <div class=\"text-green\">Hello, world!</div>
# <input name=\"comments\" placeholder=\"Enter a comment...\">"
# }
```
"""
@nonvoid_elements ~w[
head title style script
noscript template
body section nav article aside h1 h2 h3 h4 h5 h6
header footer address main
p pre blockquote ol ul li dl dt dd figure figcaption div
a em strong small s cite q dfn abbr data time code var samp kbd
sub sup i b u mark ruby rt rp bdi bdo span
ins del
iframe object video audio canvas
map
table caption colgroup tbody thead tfoot tr td th
form fieldset legend label button select datalist optgroup
option textarea output progress meter
details summary menuitem menu
]a
@void_elements ~w[
meta link base
area br col embed hr img input keygen param source track wbr
]a
@doc false
def nonvoid_elements, do: @nonvoid_elements
@doc false
def void_elements, do: @void_elements
for el <- @nonvoid_elements do
Temple.Elements.defelement(unquote(el), :nonvoid)
end
for el <- @void_elements do
Temple.Elements.defelement(unquote(el), :void)
end
defmacro html(attrs \\ [], [{:do, _inner}] = block) do
doc_type =
quote location: :keep do
Temple.Utils.put_buffer(var!(buff, Temple.Html), "<!DOCTYPE html>")
end
[doc_type, Temple.Elements.nonvoid_element(:html, attrs, block)]
end
end
|
lib/temple/html.ex
| 0.876324 | 0.698137 |
html.ex
|
starcoder
|
defmodule RDF.XSD.Time do
@moduledoc """
`RDF.XSD.Datatype` for XSD times.
Options:
- `tz`: this allows to specify a timezone which is not supported by Elixir's `Time` struct; note,
that it will also overwrite an eventually already present timezone in an input lexical
"""
@type valid_value :: Time.t() | {Time.t(), true}
use RDF.XSD.Datatype.Primitive,
name: "time",
id: RDF.Utils.Bootstrapping.xsd_iri("time")
alias RDF.XSD
# TODO: Are GMT/UTC actually allowed? Maybe because it is supported by Elixir's Datetime ...
@grammar ~r/\A(\d{2}:\d{2}:\d{2}(?:\.\d+)?)((?:[\+\-]\d{2}:\d{2})|UTC|GMT|Z)?\Z/
@tz_number_grammar ~r/\A(?:([\+\-])(\d{2}):(\d{2}))\Z/
def_applicable_facet XSD.Facets.ExplicitTimezone
def_applicable_facet XSD.Facets.Pattern
@doc false
def explicit_timezone_conform?(:required, {_, true}, _), do: true
def explicit_timezone_conform?(:required, _, _), do: false
def explicit_timezone_conform?(:prohibited, {_, true}, _), do: false
def explicit_timezone_conform?(:prohibited, _, _), do: true
def explicit_timezone_conform?(:optional, _, _), do: true
@doc false
def pattern_conform?(pattern, _value, lexical) do
XSD.Facets.Pattern.conform?(pattern, lexical)
end
@impl XSD.Datatype
def lexical_mapping(lexical, opts) do
case Regex.run(@grammar, lexical) do
[_, time] ->
do_lexical_mapping(time, opts)
[_, time, tz] ->
do_lexical_mapping(
time,
opts |> Keyword.put_new(:tz, tz) |> Keyword.put_new(:lexical_present, true)
)
_ ->
@invalid_value
end
end
defp do_lexical_mapping(value, opts) do
case Time.from_iso8601(value) do
{:ok, time} -> elixir_mapping(time, opts)
_ -> @invalid_value
end
|> case do
{{_, true} = value, _} -> value
value -> value
end
end
@impl XSD.Datatype
@spec elixir_mapping(valid_value | any, Keyword.t()) ::
value | {value, XSD.Datatype.uncanonical_lexical()}
def elixir_mapping(value, opts)
def elixir_mapping(%Time{} = value, opts) do
if tz = Keyword.get(opts, :tz) do
case with_offset(value, tz) do
@invalid_value ->
@invalid_value
time ->
{{time, true}, unless(Keyword.get(opts, :lexical_present), do: Time.to_iso8601(value))}
end
else
value
end
end
def elixir_mapping(_, _), do: @invalid_value
defp with_offset(time, zone) when zone in ~W[Z UTC GMT], do: time
defp with_offset(time, offset) do
case Regex.run(@tz_number_grammar, offset) do
[_, "-", hour, minute] ->
{hour, minute} = {String.to_integer(hour), String.to_integer(minute)}
minute = time.minute + minute
{rem(time.hour + hour + div(minute, 60), 24), rem(minute, 60)}
[_, "+", hour, minute] ->
{hour, minute} = {String.to_integer(hour), String.to_integer(minute)}
if (minute = time.minute - minute) < 0 do
{rem(24 + time.hour - hour - 1, 24), minute + 60}
else
{rem(24 + time.hour - hour - div(minute, 60), 24), rem(minute, 60)}
end
nil ->
@invalid_value
end
|> case do
{hour, minute} -> %Time{time | hour: hour, minute: minute}
@invalid_value -> @invalid_value
end
end
@impl XSD.Datatype
@spec canonical_mapping(valid_value) :: String.t()
def canonical_mapping(value)
def canonical_mapping(%Time{} = value), do: Time.to_iso8601(value)
def canonical_mapping({%Time{} = value, true}), do: canonical_mapping(value) <> "Z"
@impl XSD.Datatype
@spec init_valid_lexical(valid_value, XSD.Datatype.uncanonical_lexical(), Keyword.t()) ::
XSD.Datatype.uncanonical_lexical()
def init_valid_lexical(value, lexical, opts)
def init_valid_lexical({value, _}, nil, opts) do
if tz = Keyword.get(opts, :tz) do
canonical_mapping(value) <> tz
end
end
def init_valid_lexical(_, nil, _), do: nil
def init_valid_lexical(_, lexical, opts) do
if tz = Keyword.get(opts, :tz) do
# When using the :tz option, we'll have to strip off the original timezone
case Regex.run(@grammar, lexical) do
[_, time] -> time
[_, time, _] -> time
end <> tz
else
lexical
end
end
@impl XSD.Datatype
@spec init_invalid_lexical(any, Keyword.t()) :: String.t()
def init_invalid_lexical(value, opts)
def init_invalid_lexical({time, tz}, opts) do
if tz_opt = Keyword.get(opts, :tz) do
to_string(time) <> tz_opt
else
to_string(time) <> to_string(tz)
end
end
def init_invalid_lexical(value, _) when is_binary(value), do: value
def init_invalid_lexical(value, opts) do
if tz = Keyword.get(opts, :tz) do
to_string(value) <> tz
else
to_string(value)
end
end
@impl RDF.Literal.Datatype
def do_cast(value)
def do_cast(%XSD.String{} = xsd_string), do: new(xsd_string.value)
def do_cast(literal) do
cond do
XSD.DateTime.datatype?(literal) ->
case literal.value do
%NaiveDateTime{} = datetime ->
datetime
|> NaiveDateTime.to_time()
|> new()
%DateTime{} ->
[_date, time_with_zone] =
literal
|> XSD.DateTime.canonical_lexical_with_zone()
|> String.split("T", parts: 2)
new(time_with_zone)
end
true ->
super(literal)
end
end
@impl RDF.Literal.Datatype
def do_equal_value_same_or_derived_datatypes?(left, right)
def do_equal_value_same_or_derived_datatypes?(%{value: %{}}, %{value: tz_tuple}) when is_tuple(tz_tuple), do: nil
def do_equal_value_same_or_derived_datatypes?(%{value: tz_tuple}, %{value: %{}}) when is_tuple(tz_tuple), do: nil
def do_equal_value_same_or_derived_datatypes?(left, right), do: super(left, right)
@doc """
Extracts the timezone string from a `RDF.XSD.Time` value.
"""
def tz(time_literal) do
if valid?(time_literal) do
time_literal
|> lexical()
|> XSD.Utils.DateTime.tz()
end
end
@doc """
Converts a time literal to a canonical string, preserving the zone information.
"""
@spec canonical_lexical_with_zone(RDF.Literal.t() | t()) :: String.t() | nil
def canonical_lexical_with_zone(%RDF.Literal{literal: xsd_time}),
do: canonical_lexical_with_zone(xsd_time)
def canonical_lexical_with_zone(%__MODULE__{} = xsd_time) do
case tz(xsd_time) do
nil ->
nil
zone when zone in ["Z", "", "+00:00"] ->
canonical_lexical(xsd_time)
zone ->
xsd_time
|> lexical()
|> String.replace_trailing(zone, "")
|> Time.from_iso8601!()
|> new()
|> canonical_lexical()
|> Kernel.<>(zone)
end
end
end
|
lib/rdf/xsd/datatypes/time.ex
| 0.628065 | 0.548371 |
time.ex
|
starcoder
|
defmodule Membrane.H264.FFmpeg.Parser do
@moduledoc """
Membrane element providing parser for H264 encoded video stream.
Uses the parser provided by FFmpeg.
By default, this parser splits the stream into h264 access units,
each of which is a sequence of NAL units corresponding to one
video frame, and equips them with the following metadata entries
under `:h264` key:
- `key_frame?: boolean` - determines whether the frame is a h264
I frame.
Setting custom packetization options affects metadata, see `alignment`
and `attach_nalus?` options for details.
"""
use Membrane.Filter
alias __MODULE__.{NALu, Native}
alias Membrane.Buffer
alias Membrane.Caps.Video.H264
require Membrane.Logger
def_input_pad :input,
demand_unit: :buffers,
caps: :any
def_output_pad :output,
caps: {H264, stream_format: :byte_stream}
def_options framerate: [
type: :framerate,
spec: H264.framerate_t(),
default: {0, 1},
description: """
Framerate of video stream, see `t:Membrane.Caps.Video.H264.framerate_t/0`
"""
],
sps: [
type: :binary,
default: <<>>,
description: """
Sequence Parameter Set NAL unit - if absent in the stream, should
be provided via this option.
"""
],
pps: [
type: :binary,
default: <<>>,
description: """
Picture Parameter Set NAL unit - if absent in the stream, should
be provided via this option.
"""
],
alignment: [
type: :atom,
spec: :au | :nal,
default: :au,
description: """
Stream units carried by each output buffer. See `t:Membrane.Caps.Video.H264.alignment_t`.
If alignment is `:nal`, the following metadata entries are added:
- `type` - h264 nalu type
- `new_access_unit: access_unit_metadata` - added whenever the new access unit starts.
`access_unit_metadata` is the metadata that would be merged into the buffer metadata
normally (if `alignment` was `:au`).
- `end_access_unit: true` - added for each NALu that ends an access unit.
"""
],
attach_nalus?: [
type: :boolean,
default: false,
description: """
Determines whether to attach NAL units list to the metadata when `alignment` option
is set to `:au`. For details see `t:Membrane.Caps.Video.H264.nalu_in_metadata_t/0`.
"""
],
skip_until_keyframe?: [
type: :boolean,
default: false,
description: """
Determines whether to drop the stream until the first key frame is received.
"""
]
@impl true
def handle_init(opts) do
state = %{
parser_ref: nil,
partial_frame: <<>>,
first_frame_prefix: opts.sps <> opts.pps,
framerate: opts.framerate,
alignment: opts.alignment,
attach_nalus?: opts.attach_nalus?,
skip_until_keyframe?: opts.skip_until_keyframe?,
metadata: nil,
timestamp: 0
}
{:ok, state}
end
@impl true
def handle_stopped_to_prepared(_ctx, state) do
with {:ok, parser_ref} <- Native.create() do
{:ok, %{state | parser_ref: parser_ref}}
else
{:error, reason} -> {{:error, reason}, state}
end
end
@impl true
def handle_prepared_to_playing(_ctx, %{skip_until_keyframe: true} = state) do
{{:ok, event: {:input, %Membrane.KeyframeRequestEvent{}}}, state}
end
@impl true
def handle_prepared_to_playing(_ctx, state) do
{:ok, state}
end
@impl true
def handle_demand(:output, _size, :buffers, _ctx, state) do
{{:ok, demand: :input}, state}
end
@impl true
def handle_process(:input, %Buffer{payload: payload, metadata: metadata}, ctx, state) do
payload =
if ctx.pads.output.start_of_stream? do
payload
else
state.first_frame_prefix <> payload
end
with {:ok, sizes, resolution_changes} <- Native.parse(payload, state.parser_ref) do
{bufs, state} = parse_access_units(payload, sizes, metadata, state)
actions = parse_resolution_changes(state, bufs, resolution_changes)
{{:ok, actions ++ [redemand: :output]}, state}
else
{:error, reason} -> {{:error, reason}, state}
end
end
# analize resolution changes and generate appropriate caps before corresponding buffers
defp parse_resolution_changes(state, bufs, resolution_changes, acc \\ [], index_offset \\ 0)
defp parse_resolution_changes(_state, bufs, [], acc, _index_offset) do
acc ++ [buffer: {:output, bufs}]
end
defp parse_resolution_changes(state, bufs, [meta | resolution_changes], acc, index_offset) do
updated_index = meta.index - index_offset
{old_bufs, next_bufs} = Enum.split(bufs, updated_index)
next_caps = mk_caps(state, meta.width, meta.height)
parse_resolution_changes(
state,
next_bufs,
resolution_changes,
acc ++ [buffer: {:output, old_bufs}, caps: {:output, next_caps}],
meta.index
)
end
@impl true
def handle_caps(:input, _caps, _ctx, state) do
# ignoring caps, new ones will be generated in handle_process
{:ok, state}
end
@impl true
def handle_end_of_stream(:input, _ctx, state) do
with {:ok, sizes} <- Native.flush(state.parser_ref) do
{bufs, state} = parse_access_units(<<>>, sizes, state.metadata, state)
if state.partial_frame != <<>> do
Membrane.Logger.warn("Discarding incomplete frame because of end of stream")
end
actions = [buffer: {:output, bufs}, end_of_stream: :output]
{{:ok, actions}, state}
end
end
@impl true
def handle_prepared_to_stopped(_ctx, state) do
{:ok, %{state | parser_ref: nil}}
end
defp parse_access_units(input, au_sizes, metadata, %{partial_frame: <<>>} = state) do
state = update_metadata(metadata, state)
{buffers, input, state} = do_parse_access_units(input, au_sizes, metadata, state, [])
{buffers, %{state | partial_frame: input}}
end
defp parse_access_units(input, [], _metadata, state) do
{[], %{state | partial_frame: state.partial_frame <> input}}
end
defp parse_access_units(input, [au_size | au_sizes], metadata, state) do
{first_au_buffers, input, state} =
do_parse_access_units(state.partial_frame <> input, [au_size], state.metadata, state, [])
state = update_metadata(metadata, state)
{buffers, input, state} = do_parse_access_units(input, au_sizes, metadata, state, [])
{first_au_buffers ++ buffers, %{state | partial_frame: input}}
end
defp do_parse_access_units(input, [], _metadata, state, acc) do
{Enum.reverse(acc), input, state}
end
defp do_parse_access_units(input, [au_size | au_sizes], metadata, state, acc) do
<<au::binary-size(au_size), rest::binary>> = input
# setting both :timestamp and :dts in order to maintain backward compatibility
metadata = Map.put(metadata, :timestamp, state.timestamp) |> Map.put(:dts, state.timestamp)
{nalus, au_metadata} = NALu.parse(au)
au_metadata = Map.merge(metadata, au_metadata)
state = Map.update!(state, :skip_until_keyframe?, &(&1 and not au_metadata.h264.key_frame?))
buffers =
case state do
%{skip_until_keyframe?: true} ->
[]
%{alignment: :au, attach_nalus?: true} ->
[%Buffer{payload: au, metadata: put_in(au_metadata, [:h264, :nalus], nalus)}]
%{alignment: :au, attach_nalus?: false} ->
[%Buffer{payload: au, metadata: au_metadata}]
%{alignment: :nal} ->
Enum.map(nalus, fn nalu ->
%Buffer{
payload: :binary.part(au, nalu.prefixed_poslen),
metadata: Map.merge(metadata, nalu.metadata)
}
end)
end
do_parse_access_units(rest, au_sizes, metadata, bump_timestamp(state), [buffers | acc])
end
defp update_metadata(%{timestamp: timestamp} = metadata, state) do
%{state | timestamp: timestamp, metadata: metadata}
end
defp update_metadata(metadata, state) do
%{state | metadata: metadata}
end
defp bump_timestamp(%{framerate: {0, _}} = state) do
state
end
defp bump_timestamp(state) do
use Ratio
%{timestamp: timestamp, framerate: {num, denom}} = state
timestamp = timestamp + Ratio.new(denom * Membrane.Time.second(), num)
%{state | timestamp: timestamp}
end
defp mk_caps(state, width, height) do
{:ok, profile} = Native.get_profile(state.parser_ref)
%H264{
width: width,
height: height,
framerate: state.framerate,
alignment: state.alignment,
nalu_in_metadata?: state.attach_nalus?,
stream_format: :byte_stream,
profile: profile
}
end
end
|
lib/membrane_h264_ffmpeg/parser.ex
| 0.875341 | 0.50293 |
parser.ex
|
starcoder
|
defmodule Cldr.Calendar.Julian do
@behaviour Calendar
@behaviour Cldr.Calendar
@type year :: -9999..-1 | 1..9999
@type month :: 1..12
@type day :: 1..31
@quarters_in_year 4
@months_in_year 12
@months_in_quarter 3
@days_in_week 7
@doc """
Defines the CLDR calendar type for this calendar.
This type is used in support of `Cldr.Calendar.localize/3`.
Currently only `:gregorian` is supported.
"""
@impl true
def cldr_calendar_type do
:gregorian
end
@doc """
Identifies that this calendar is month based.
"""
@impl true
def calendar_base do
:month
end
@epoch Cldr.Calendar.Gregorian.date_to_iso_days(0, 12, 30)
def epoch do
@epoch
end
@doc """
Determines if the date given is valid according to this calendar.
"""
@impl true
def valid_date?(0, _month, _day) do
false
end
@months_with_30_days [4, 6, 9, 11]
def valid_date?(_year, month, day) when month in @months_with_30_days and day in 1..30 do
true
end
@months_with_31_days [1, 3, 5, 7, 8, 10, 12]
def valid_date?(_year, month, day) when month in @months_with_31_days and day in 1..31 do
true
end
def valid_date?(year, 2, 29) do
if leap_year?(year), do: true, else: false
end
def valid_date?(_year, 2, day) when day in 1..28 do
true
end
def valid_date?(_year, _month, _day) do
false
end
@doc """
Calculates the year and era from the given `year`.
The ISO calendar has two eras: the current era which
starts in year 1 and is defined as era "1". And a
second era for those years less than 1 defined as
era "0".
"""
@spec year_of_era(year) :: {year, era :: 0..1}
unless Code.ensure_loaded?(Calendar.ISO) && function_exported?(Calendar.ISO, :year_of_era, 3) do
@impl true
end
def year_of_era(year) when year > 0 do
{year, 1}
end
def year_of_era(year) when year <= 0 do
{abs(year), 0}
end
@doc """
Calculates the year and era from the given `year`,
`month` and `day`.
"""
@spec year_of_era(year, month, day) :: {year :: Calendar.year(), era :: 0..1}
@impl true
def year_of_era(year, _month, _day) do
year_of_era(year)
end
@doc """
Returns the calendar year as displayed
on rendered calendars.
"""
@spec calendar_year(year, month, day) :: Calendar.year()
@impl true
def calendar_year(year, _month, _day) do
year
end
@doc """
Returns the related gregorian year as displayed
on rendered calendars.
"""
@spec related_gregorian_year(year, month, day) :: Calendar.year()
@impl true
def related_gregorian_year(year, month, day) do
iso_days = date_to_iso_days(year, month, day)
{year, _month, _day} = Cldr.Calendar.Gregorian.date_from_iso_days(iso_days)
year
end
@doc """
Returns the extended year as displayed
on rendered calendars.
"""
@spec extended_year(year, month, day) :: Calendar.year()
@impl true
def extended_year(year, _month, _day) do
year
end
@doc """
Returns the cyclic year as displayed
on rendered calendars.
"""
@spec cyclic_year(year, month, day) :: Calendar.year()
@impl true
def cyclic_year(year, _month, _day) do
year
end
@doc """
Calculates the quarter of the year from the given `year`, `month`, and `day`.
It is an integer from 1 to 4.
"""
@spec quarter_of_year(year, month, day) :: 1..4
@impl true
def quarter_of_year(_year, month, _day) do
Float.ceil(month / @months_in_quarter)
|> trunc
end
@doc """
Calculates the month of the year from the given `year`, `month`, and `day`.
It is an integer from 1 to 12.
"""
@spec month_of_year(year, month, day) :: month
@impl true
def month_of_year(_year, month, _day) do
month
end
@doc """
Calculates the week of the year from the given `year`, `month`, and `day`.
It is an integer from 1 to 53.
"""
@spec week_of_year(year, month, day) :: {:error, :not_defined}
@impl true
def week_of_year(_year, _month, _day) do
{:error, :not_defined}
end
@doc """
Calculates the ISO week of the year from the given `year`, `month`, and `day`.
It is an integer from 1 to 53.
"""
@spec iso_week_of_year(year, month, day) :: {:error, :not_defined}
@impl true
def iso_week_of_year(_year, _month, _day) do
{:error, :not_defined}
end
@doc """
Calculates the week of the year from the given `year`, `month`, and `day`.
It is an integer from 1 to 53.
"""
@spec week_of_month(year, month, day) :: {pos_integer(), pos_integer()} | {:error, :not_defined}
@impl true
def week_of_month(_year, _month, _day) do
{:error, :not_defined}
end
@doc """
Calculates the day and era from the given `year`, `month`, and `day`.
"""
@spec day_of_era(year, month, day) :: {day :: pos_integer(), era :: 0..1}
@impl true
def day_of_era(year, month, day) do
{_, era} = year_of_era(year)
days = date_to_iso_days(year, month, day)
{days + epoch(), era}
end
@doc """
Calculates the day of the year from the given `year`, `month`, and `day`.
"""
@spec day_of_year(year, month, day) :: 1..366
@impl true
def day_of_year(year, month, day) do
first_day = date_to_iso_days(year, 1, 1)
this_day = date_to_iso_days(year, month, day)
this_day - first_day + 1
end
@doc """
Calculates the day of the week from the given `year`, `month`, and `day`.
It is an integer from 1 to 7, where 1 is Monday and 7 is Sunday.
"""
if Code.ensure_loaded?(Date) && function_exported?(Date, :day_of_week, 2) do
@spec day_of_week(year, month, day, 1..7 | :default) ::
{Calendar.day_of_week(), first_day_of_week :: non_neg_integer(),
last_day_of_week :: non_neg_integer()}
@impl Calendar
@epoch_day_of_week 6
def day_of_week(year, month, day, :default) do
days = date_to_iso_days(year, month, day)
days_after_saturday = rem(days, 7)
day_of_week = Cldr.Math.amod(days_after_saturday + @epoch_day_of_week, @days_in_week)
{day_of_week, 1, 7}
end
else
@spec day_of_week(year, month, day) :: 1..7
@impl Calendar
@epoch_day_of_week 6
def day_of_week(year, month, day) do
days = date_to_iso_days(year, month, day)
days_after_saturday = rem(days, 7)
Cldr.Math.amod(days_after_saturday + @epoch_day_of_week, @days_in_week)
end
end
@doc """
Calculates the number of period in a given `year`. A period
corresponds to a month in month-based calendars and
a week in week-based calendars..
"""
@impl true
def periods_in_year(_year) do
@months_in_year
end
@impl true
def weeks_in_year(_year) do
{:error, :not_defined}
end
@doc """
Returns the number days in a given year.
"""
@impl true
def days_in_year(year) do
if leap_year?(year), do: 366, else: 365
end
@doc """
Returns how many days there are in the given year-month.
"""
@spec days_in_month(year, month) :: 28..31
@impl true
def days_in_month(year, 2) do
if leap_year?(year), do: 29, else: 28
end
def days_in_month(_year, month) when month in @months_with_30_days do
30
end
def days_in_month(_year, month) when month in @months_with_31_days do
31
end
@doc """
Returns the number days in a a week.
"""
def days_in_week do
@days_in_week
end
@doc """
Returns a `Date.Range.t` representing
a given year.
"""
@impl true
def year(year) do
last_month = months_in_year(year)
days_in_last_month = days_in_month(year, last_month)
with {:ok, start_date} <- Date.new(year, 1, 1, __MODULE__),
{:ok, end_date} <- Date.new(year, last_month, days_in_last_month, __MODULE__) do
Date.range(start_date, end_date)
end
end
@doc """
Returns a `Date.Range.t` representing
a given quarter of a year.
"""
@impl true
def quarter(year, quarter) do
months_in_quarter = div(months_in_year(year), @quarters_in_year)
starting_month = months_in_quarter * (quarter - 1) + 1
starting_day = 1
ending_month = starting_month + months_in_quarter - 1
ending_day = days_in_month(year, ending_month)
with {:ok, start_date} <- Date.new(year, starting_month, starting_day, __MODULE__),
{:ok, end_date} <- Date.new(year, ending_month, ending_day, __MODULE__) do
Date.range(start_date, end_date)
end
end
@doc """
Returns a `Date.Range.t` representing
a given month of a year.
"""
@impl true
def month(year, month) do
starting_day = 1
ending_day = days_in_month(year, month)
with {:ok, start_date} <- Date.new(year, month, starting_day, __MODULE__),
{:ok, end_date} <- Date.new(year, month, ending_day, __MODULE__) do
Date.range(start_date, end_date)
end
end
@doc """
Returns a `Date.Range.t` representing
a given week of a year.
"""
@impl true
def week(_year, _week) do
{:error, :not_defined}
end
@doc """
Adds an `increment` number of `date_part`s
to a `year-month-day`.
`date_part` can be `:quarters`
or`:months`.
"""
@impl true
def plus(year, month, day, date_part, increment, options \\ [])
def plus(year, month, day, :quarters, quarters, options) do
months = quarters * @months_in_quarter
plus(year, month, day, :months, months, options)
end
def plus(year, month, day, :months, months, options) do
months_in_year = months_in_year(year)
{year_increment, new_month} = Cldr.Math.div_amod(month + months, months_in_year)
new_year = year + year_increment
new_day =
if Keyword.get(options, :coerce, false) do
max_new_day = days_in_month(new_year, new_month)
min(day, max_new_day)
else
day
end
{new_year, new_month, new_day}
end
@doc """
Returns if the given year is a leap year.
"""
@spec leap_year?(year) :: boolean()
@impl true
def leap_year?(year) do
Cldr.Math.mod(year, 4) == if year > 0, do: 0, else: 3
end
@doc """
Returns the number of days since the calendar
epoch for a given `year-month-day`
"""
def date_to_iso_days(year, month, day) do
adjustment = adjustment(year, month, day)
year = if year < 0, do: year + 1, else: year
epoch() - 1 +
365 * (year - 1) +
Integer.floor_div(year - 1, 4) +
Integer.floor_div(367 * month - 362, @months_in_year) +
adjustment +
day
end
defp adjustment(year, month, _day) do
cond do
month <= 2 -> 0
leap_year?(year) -> -1
true -> -2
end
end
@doc """
Returns a `{year, month, day}` calculated from
the number of `iso_days`.
"""
def date_from_iso_days(iso_days) do
approx = Integer.floor_div(4 * (iso_days - epoch()) + 1464, 1461)
year = if approx <= 0, do: approx - 1, else: approx
prior_days = iso_days - date_to_iso_days(year, 1, 1)
correction = correction(iso_days, year)
month = Integer.floor_div(@months_in_year * (prior_days + correction) + 373, 367)
day = 1 + (iso_days - date_to_iso_days(year, month, 1))
{year, month, day}
end
defp correction(iso_days, year) do
cond do
iso_days < date_to_iso_days(year, 3, 1) -> 0
leap_year?(year) -> 1
true -> 2
end
end
@doc """
Returns the `t:Calendar.iso_days/0` format of the specified date.
"""
@impl true
@spec naive_datetime_to_iso_days(
Calendar.year(),
Calendar.month(),
Calendar.day(),
Calendar.hour(),
Calendar.minute(),
Calendar.second(),
Calendar.microsecond()
) :: Calendar.iso_days()
def naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond) do
{date_to_iso_days(year, month, day), time_to_day_fraction(hour, minute, second, microsecond)}
end
@doc """
Converts the `t:Calendar.iso_days/0` format to the datetime format specified by this calendar.
"""
@spec naive_datetime_from_iso_days(Calendar.iso_days()) :: {
Calendar.year(),
Calendar.month(),
Calendar.day(),
Calendar.hour(),
Calendar.minute(),
Calendar.second(),
Calendar.microsecond()
}
@impl Calendar
def naive_datetime_from_iso_days({days, day_fraction}) do
{year, month, day} = date_from_iso_days(days)
{hour, minute, second, microsecond} = time_from_day_fraction(day_fraction)
{year, month, day, hour, minute, second, microsecond}
end
@doc false
@impl Calendar
defdelegate day_rollover_relative_to_midnight_utc, to: Calendar.ISO
@doc false
@impl Calendar
defdelegate months_in_year(year), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate time_from_day_fraction(day_fraction), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate time_to_day_fraction(hour, minute, second, microsecond), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate parse_date(date_string), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate parse_time(time_string), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate parse_utc_datetime(dt_string), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate parse_naive_datetime(dt_string), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate date_to_string(year, month, day), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate datetime_to_string(
year,
month,
day,
hour,
minute,
second,
microsecond,
time_zone,
zone_abbr,
utc_offset,
std_offset
),
to: Calendar.ISO
@doc false
@impl Calendar
defdelegate naive_datetime_to_string(
year,
month,
day,
hour,
minute,
second,
microsecond
),
to: Calendar.ISO
@doc false
@impl Calendar
defdelegate time_to_string(hour, minute, second, microsecond), to: Calendar.ISO
@doc false
@impl Calendar
defdelegate valid_time?(hour, minute, second, microsecond), to: Calendar.ISO
end
|
lib/cldr/calendar/calendars/julian.ex
| 0.911798 | 0.634034 |
julian.ex
|
starcoder
|
defmodule Dotzip.ExtraField.Unix do
@moduledoc """
This module encode and decode Unix extra field defined in section
4.5.7 of the official documentation.
"""
defstruct atime: 0, mtime: 0, uid: 0, gid: 0, var: 0
defp tag() do
<<0x00, 0x0d>>
end
defp encode_tag({:ok, data, buffer}) do
{:ok, data, <<tag::binary-size(2), buffer::bitstring>>}
end
defp encode_tsize({:ok, data, buffer}) do
s = byte_size(buffer)
{:ok, data, <<s::size(16), buffer::bitstring>>}
end
defp encode_atime({:ok, %{ :atime => atime } = data, buffer}) do
{:ok, data, <<atime::size(32), buffer::bitstring>>}
end
defp encode_mtime({:ok, %{ :mtime => mtime } = data, buffer}) do
{:ok, data, <<mtime::size(32), buffer::bitstring>>}
end
defp encode_uid({:ok, %{ :uid => uid } = data, buffer}) do
{:ok, data, <<uid::size(16), buffer::bitstring>>}
end
defp encode_gid({:ok, %{ :gid => gid } = data, buffer}) do
{:ok, data, <<gid::size(16), buffer::bitstring>>}
end
defp encode_var(data) do
{:ok, data, <<>>}
end
@doc """
Encode an Unix Extra field.
"""
def encode(data) do
encode_var(data)
|> encode_gid()
|> encode_uid()
|> encode_mtime()
|> encode_atime()
|> encode_tsize()
|> encode_tag()
end
defp decode_tag(<<0x00, 0x0d, rest::bitstring>>) do
{:ok, %{}, rest}
end
defp decode_tsize({:ok, data, <<tsize::size(16), rest::bitstring>>}) do
{:ok, Map.put(data, :tsize, tsize), rest}
end
defp decode_atime({:ok, data, <<atime::size(32), rest::bitstring>>}) do
{:ok, Map.put(data, :atime, atime), rest}
end
defp decode_mtime({:ok, data, <<mtime::size(32), rest::bitstring>>}) do
{:ok, Map.put(data, :mtime, mtime), rest}
end
defp decode_uid({:ok, data, <<uid::size(16), rest::bitstring>>}) do
{:ok, Map.put(data, :uid, uid), rest}
end
defp decode_gid({:ok, data, <<gid::size(16), rest::bitstring>>}) do
{:ok, Map.put(data, :gid, gid ), rest}
end
defp decode_var({:ok, data, rest}) do
{:ok, data, rest}
end
@doc """
Decode an Unix Extra field.
"""
def decode(bitstring) do
decode_tag(bitstring)
|> decode_tsize()
|> decode_atime()
|> decode_mtime()
|> decode_uid()
|> decode_gid()
|> decode_var()
end
end
|
lib/dotzip/extra_field/unix.ex
| 0.629775 | 0.635717 |
unix.ex
|
starcoder
|
defmodule EQRCode.SVG do
@moduledoc """
Render the QR Code matrix in SVG format
```elixir
qr_code_content
|> EQRCode.encode()
|> EQRCode.svg(color: "#cc6600", shape: "circle", width: 300)
```
You can specify the following attributes of the QR code:
* `background_color`: In hexadecimal format or `:transparent`. The default is `#FFF`
* `color`: In hexadecimal format. The default is `#000`
* `shape`: Only `square` or `circle`. The default is `square`
* `width`: The width of the QR code in pixel. Without the width attribute, the QR code size will be dynamically generated based on the input string.
* `viewbox`: When set to `true`, the SVG element will specify its height and width using `viewBox`, instead of explicit `height` and `width` tags.
Default options are `[color: "#000", shape: "square"]`.
"""
alias EQRCode.Matrix
@doc """
Return the SVG format of the QR Code
"""
@spec svg(Matrix.t(), map() | Keyword.t()) :: String.t()
def svg(%Matrix{matrix: matrix} = m, options \\ []) do
options = options |> Enum.map(& &1)
matrix_size = Matrix.size(m)
svg_options = options |> Map.new() |> set_svg_options(matrix_size)
dimension = matrix_size * svg_options[:module_size]
xml_tag = ~s(<?xml version="1.0" standalone="yes"?>)
viewbox_attr = ~s(viewBox="0 0 #{matrix_size} #{matrix_size}")
dimension_attrs =
if Keyword.get(options, :viewbox, false) do
viewbox_attr
else
~s(width="#{dimension}" height="#{dimension}" #{viewbox_attr})
end
open_tag =
~s(<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events" #{
dimension_attrs
}
shape-rendering="crispEdges" style="background-color: #{svg_options[:background_color]}">)
close_tag = ~s(</svg>)
result =
Tuple.to_list(matrix)
|> Stream.with_index()
|> Stream.map(fn {row, row_num} ->
Tuple.to_list(row)
|> format_row_as_svg(row_num, svg_options)
end)
|> Enum.to_list()
Enum.join([xml_tag, open_tag, result, close_tag], "\n")
end
defp set_svg_options(options, matrix_size) do
options
|> Map.put_new(:background_color, "#FFF")
|> Map.put_new(:color, "#000")
|> set_module_size(matrix_size)
|> Map.put_new(:shape, "rectangle")
|> Map.put_new(:size, matrix_size)
end
defp set_module_size(%{width: width} = options, matrix_size) when is_integer(width) do
options
|> Map.put_new(:module_size, width / matrix_size)
end
defp set_module_size(%{width: width} = options, matrix_size) when is_binary(width) do
options
|> Map.put_new(:module_size, String.to_integer(width) / matrix_size)
end
defp set_module_size(options, _matrix_size) do
options
|> Map.put_new(:module_size, 11)
end
defp format_row_as_svg(row_matrix, row_num, svg_options) do
row_matrix
|> Stream.with_index()
|> Stream.map(fn {col, col_num} ->
substitute(col, row_num, col_num, svg_options)
end)
|> Enum.to_list()
end
defp substitute(data, row_num, col_num, %{})
when is_nil(data) or data == 0 do
%{}
|> Map.put(:height, 1)
|> Map.put(:style, "fill: transparent;")
|> Map.put(:width, 1)
|> Map.put(:x, row_num)
|> Map.put(:y, col_num)
|> draw_rect
end
# This pattern match ensures that the QR Codes positional markers are drawn
# as rectangles, regardless of the shape
defp substitute(1, row_num, col_num, %{color: color, size: size})
when (row_num <= 8 and col_num <= 8) or
(row_num >= size - 9 and col_num <= 8) or
(row_num <= 8 and col_num >= size - 9) do
%{}
|> Map.put(:height, 1)
|> Map.put(:style, "fill:#{color};")
|> Map.put(:width, 1)
|> Map.put(:x, col_num)
|> Map.put(:y, row_num)
|> draw_rect
end
defp substitute(1, row_num, col_num, %{color: color, shape: "circle"}) do
radius = 0.5
%{}
|> Map.put(:cx, row_num + radius)
|> Map.put(:cy, col_num + radius)
|> Map.put(:r, radius)
|> Map.put(:style, "fill:#{color};")
|> draw_circle
end
defp substitute(1, row_num, col_num, %{color: color}) do
%{}
|> Map.put(:height, 1)
|> Map.put(:style, "fill:#{color};")
|> Map.put(:width, 1)
|> Map.put(:x, row_num)
|> Map.put(:y, col_num)
|> draw_rect
end
defp draw_rect(attribute_map) do
attributes = get_attributes(attribute_map)
~s(<rect #{attributes}/>)
end
defp draw_circle(attribute_map) do
attributes = get_attributes(attribute_map)
~s(<circle #{attributes}/>)
end
defp get_attributes(attribute_map) do
attribute_map
|> Enum.map(fn {key, value} -> ~s(#{key}="#{value}") end)
|> Enum.join(" ")
end
end
|
lib/eqrcode/svg.ex
| 0.863334 | 0.875148 |
svg.ex
|
starcoder
|
defmodule Krill.Parser do
@moduledoc """
Deals with anything related to Porcelain.Process and Results
"""
require Logger
import Krill, only: [empty?: 1]
@typedoc "Standard line"
@type std_line :: {pos_integer, String.t}
@typedoc "Rules can be a string, a regular expresion or a function that returns a boolean"
@type rule :: String.t | Regex.t | (String.t -> as_boolean(term))
@typedoc "Can be either a string or a regular expresion"
@type pattern :: String.t | Regex.t
@doc """
Match `string` against and every rule in `rules`.
Returns `true` if any of them matched, or `false` if none.
"""
@spec match_rule?(String.t, [rule]) :: boolean
def match_rule?(string, rules) when is_bitstring(string) and is_list(rules) do
Enum.find_value(rules, false,
fn(rule) -> match_rule?(string, rule)
end)
end
@doc """
Matches `rule` against `string`.
Rule can be be a `function`, a `string` or `regular expession`.
Returns `true` or `false`.
"""
@spec match_rule?(String.t, rule) :: boolean
def match_rule?(string, rule) when is_bitstring(string) do
cond do
is_function(rule) ->
rule.(string)
empty?(string) and empty?(rule) and is_bitstring(rule) ->
true
empty?(string) or ( empty?(rule) and is_bitstring(rule) ) ->
false
true ->
string =~ rule #match regex or bitstring
end
end
@doc """
Returns the numer of lines in `string`.
"""
@spec count_lines(String.t) :: non_neg_integer
def count_lines(string),
do: String.split(string, "\n") |> Enum.count
@doc """
Returns the number of lines in `string` for which `fun` returns a truthy value.
"""
@spec count_lines(String.t, (String.t -> as_boolean(term))) :: non_neg_integer
def count_lines(string, fun) when is_function(fun) do
String.split(string, "\n")
|> Enum.count(fun)
end
@doc """
Returns the number of lines in `string` that match `pattern`.
If `pattern` is a regular expression, `string` has to match to return true or
if `pattern` is a string, `pattern` has to be a substring of `string`
"""
@spec count_lines(String.t, pattern) :: non_neg_integer
def count_lines(string, pattern) do
String.split(string, "\n") |>
Enum.count(&(&1 =~ pattern))
end
@doc """
Rejects items in `collection` that are empty strings, `nil` or
`false`.
"""
@spec reject_empty([std_line]) :: [std_line]
def reject_empty(collection) when is_list(collection) do
Enum.reject( collection, fn
{_line_no, line} ->
empty?(line)
line ->
empty?(line)
end)
end
@doc """
Joins a `collection` with `joiner`, removing items that are empty
strings or falsey values.
It keeps the last item if it's a empty string, `nil` or `false`,
to avoid triming a trailing new lines.
Returns a string.
"""
@spec join(Enumerable.t, String.t) :: String.t
def join(collection, joiner \\ "\n") do
collection
|> reject_empty
|> Enum.join(joiner)
end
@doc """
Splits a `string` by `delimiters`. It strips the carriage return (\n)
at the end of the string, if any.
It retuns a `list`.
"""
@spec split(String.t, String.t) :: list
def split(string, delimiter \\ "\n") do
string
|> String.strip(?\n)
|> String.split(delimiter)
end
@doc """
Takes a `collection` which is a map with the format {line_no, line} and
returns a map with the same format with the lines that matched `rules`.
It also takes `string`, splits it into lines, and returns only the lines that
match against `rules`".
`rules` must be a list of rules consiting of `string`s or `regex`es.
"""
@spec accept( (nil | [std_line] | String.t), (nil | [rule]) ) :: nil | [std_line] | String.t
def accept(nil, _rules), do: nil
def accept(items, nil) when is_list(items) or is_bitstring(items), do: items
def accept(collection, rules) when is_list(collection) and is_list(rules) do
Enum.filter(collection, fn({_line_no, line}) ->
match_rule?(line, rules)
end)
end
def accept(string, rules) when is_bitstring(string) and is_list(rules) do
String.split(string, "\n")
|> Enum.filter( fn(line) ->
match_rule?(line, rules)
end)
|> join("\n")
end
@doc """
Takes a `collection` which is a list with the format {line_no, line} and
returns a map with the same format with the lines that did not match
the `rules`.
It also takes `string`, splits it into lines, and returns the lines that
did not match against `rules`".
`rules` must be a list of rules consiting of `string`s or `regex`es.
"""
@spec reject( nil | [std_line] | String.t, nil | [rule] ) :: nil | [std_line] | String.t
def reject(nil, _rules), do: nil
def reject(items, nil) when is_list(items) or is_bitstring(items), do: items
def reject(collection, rules) when is_list(collection) and is_list(rules) do
Enum.reject( collection, fn({_line_no, line}) ->
match_rule?(line, rules)
end)
end
def reject(string, rules) when is_bitstring(string) and is_list(rules) do
String.split(string, "\n")
|> Enum.reject( fn(line) ->
match_rule?(line, rules)
end)
|> join("\n")
end
@doc """
Takes a `list` with lines, and return a list with format [{line_no, line}, ...]
"""
@spec numerify([String.t]) :: [std_line]
def numerify(list) when is_list(list) do
Enum.map_reduce(list, 1, fn(item, counter) -> { {counter, item}, counter+1} end) |> elem(0)
end
@doc """
Takes a `list` with lines in the format [{line_no, line}, ...], and returns a list
containing the lines only [line_1, line_2, ...]
"""
@spec denumerify([std_line]) :: [String.t]
def denumerify(list) when is_list(list) do
Enum.map(list, fn({_line_no, line}) -> line end)
end
end
|
lib/krill/parser.ex
| 0.760117 | 0.649843 |
parser.ex
|
starcoder
|
defmodule ExAws.GameLift do
@moduledoc """
Operations on the AWS GameLift service.
http://docs.aws.amazon.com/LINK_MUST_BE_HERE
"""
alias ExAws.GameLift.Encodable
alias ExAws.GameLift.Player
import ExAws.Utils, only: [camelize_keys: 1]
@namespace "GameLift"
defp request(action, data) do
operation =
action
|> Atom.to_string()
|> Macro.camelize()
ExAws.Operation.JSON.new(:gamelift, %{
data: data,
headers: [
{"x-amz-target", "#{@namespace}.#{operation}"},
{"content-type", "application/x-amz-json-1.1"}
]
})
end
defp camelize_opts(opts) do
opts
|> Map.new()
|> ExAws.Utils.camelize_keys()
end
@spec get_aliases(Map.t()) :: Map.t()
def get_aliases(opts \\ []) do
request(:list_aliases, camelize_opts(opts))
end
@spec start_matchmaking(
configuration_name :: String.t(),
players :: [Player.t()],
ticket_id :: String.t() | nil
) :: ExAws.Operation.JSON.t()
def start_matchmaking(configuration_name, players, ticket_id \\ nil) do
data = %{
"ConfigurationName" => configuration_name,
"Players" => Enum.map(players, &encode_player/1),
"TicketId" => ticket_id
}
request(:start_matchmaking, data)
end
defp encode_player(player) do
player
|> Map.from_struct()
|> Stream.map(&encode_player_param/1)
|> Enum.into(%{})
end
defp encode_player_param({:player_id, player_id}) do
{"PlayerId", player_id}
end
defp encode_player_param({:player_attributes, nil}) do
{"PlayerAttributes", nil}
end
defp encode_player_param({:player_attributes, player_attributes}) do
encoded_player_attributes =
player_attributes
|> Stream.map(fn {k, v} -> {to_string(k), Encodable.encode(v)} end)
|> Enum.into(%{})
{"PlayerAttributes", encoded_player_attributes}
end
defp encode_player_param({:latency_in_ms, latency_in_ms}) do
{"LatencyInMs", latency_in_ms}
end
defp encode_player_param({:team, team}) do
encoded_team = if is_nil(team), do: nil, else: to_string(team)
{"Team", encoded_team}
end
@spec describe_matchmaking(ticket_ids :: String.t() | [String.t()]) :: ExAws.Operation.JSON.t()
def describe_matchmaking(ticket_ids) do
request(:describe_matchmaking, %{"TicketIds" => List.wrap(ticket_ids)})
end
@spec stop_matchmaking(ticket_id :: String.t()) :: ExAws.Operation.JSON.t()
def stop_matchmaking(ticket_id) do
request(:stop_matchmaking, %{"TicketId" => ticket_id})
end
@spec accept_match(ticket_id :: String.t(), player_ids :: [String.t()], :accept | :reject) ::
ExAws.Operation.JSON.t()
def accept_match(ticket_id, player_ids, acceptance_type \\ :accept) do
data = %{
"TicketId" => ticket_id,
"PlayerIds" => player_ids,
"AcceptanceType" => acceptance_type |> Atom.to_string() |> String.upcase()
}
request(:accept_match, data)
end
@type describe_game_session_details_opts :: [
alias_id: String.t(),
fleet_id: String.t(),
game_session_id: String.t(),
limit: pos_integer,
next_token: String.t(),
status_filter: :activating | :active | :terminating | :terminated
]
@spec describe_game_session_details(opts :: describe_game_session_details_opts) ::
ExAws.Operation.JSON.t()
def describe_game_session_details(opts \\ []) do
data =
opts
|> Map.new()
|> Map.update(:status_filter, nil, &String.upcase(Atom.to_string(&1)))
|> camelize_keys
request(:describe_game_session_details, data)
end
end
|
lib/ex_aws/gamelift.ex
| 0.724481 | 0.433262 |
gamelift.ex
|
starcoder
|
defmodule AWS.Machinelearning do
@moduledoc """
Definition of the public APIs exposed by Amazon Machine Learning
"""
@doc """
Adds one or more tags to an object, up to a limit of 10.
Each tag consists of a key and an optional value. If you add a tag using a key
that is already associated with the ML object, `AddTags` updates the tag's
value.
"""
def add_tags(client, input, options \\ []) do
request(client, "AddTags", input, options)
end
@doc """
Generates predictions for a group of observations.
The observations to process exist in one or more data files referenced by a
`DataSource`. This operation creates a new `BatchPrediction`, and uses an
`MLModel` and the data files referenced by the `DataSource` as information
sources.
`CreateBatchPrediction` is an asynchronous operation. In response to
`CreateBatchPrediction`, Amazon Machine Learning (Amazon ML) immediately returns
and sets the `BatchPrediction` status to `PENDING`. After the `BatchPrediction`
completes, Amazon ML sets the status to `COMPLETED`.
You can poll for status updates by using the `GetBatchPrediction` operation and
checking the `Status` parameter of the result. After the `COMPLETED` status
appears, the results are available in the location specified by the `OutputUri`
parameter.
"""
def create_batch_prediction(client, input, options \\ []) do
request(client, "CreateBatchPrediction", input, options)
end
@doc """
Creates a `DataSource` object from an [ Amazon Relational Database Service](http://aws.amazon.com/rds/) (Amazon RDS).
A `DataSource` references data that can be used to perform `CreateMLModel`,
`CreateEvaluation`, or `CreateBatchPrediction` operations.
`CreateDataSourceFromRDS` is an asynchronous operation. In response to
`CreateDataSourceFromRDS`, Amazon Machine Learning (Amazon ML) immediately
returns and sets the `DataSource` status to `PENDING`. After the `DataSource` is
created and ready for use, Amazon ML sets the `Status` parameter to `COMPLETED`.
`DataSource` in the `COMPLETED` or `PENDING` state can be used only to perform
`>CreateMLModel`>, `CreateEvaluation`, or `CreateBatchPrediction` operations.
If Amazon ML cannot accept the input source, it sets the `Status` parameter to
`FAILED` and includes an error message in the `Message` attribute of the
`GetDataSource` operation response.
"""
def create_data_source_from_r_d_s(client, input, options \\ []) do
request(client, "CreateDataSourceFromRDS", input, options)
end
@doc """
Creates a `DataSource` from a database hosted on an Amazon Redshift cluster.
A `DataSource` references data that can be used to perform either
`CreateMLModel`, `CreateEvaluation`, or `CreateBatchPrediction` operations.
`CreateDataSourceFromRedshift` is an asynchronous operation. In response to
`CreateDataSourceFromRedshift`, Amazon Machine Learning (Amazon ML) immediately
returns and sets the `DataSource` status to `PENDING`. After the `DataSource` is
created and ready for use, Amazon ML sets the `Status` parameter to `COMPLETED`.
`DataSource` in `COMPLETED` or `PENDING` states can be used to perform only
`CreateMLModel`, `CreateEvaluation`, or `CreateBatchPrediction` operations.
If Amazon ML can't accept the input source, it sets the `Status` parameter to
`FAILED` and includes an error message in the `Message` attribute of the
`GetDataSource` operation response.
The observations should be contained in the database hosted on an Amazon
Redshift cluster and should be specified by a `SelectSqlQuery` query. Amazon ML
executes an `Unload` command in Amazon Redshift to transfer the result set of
the `SelectSqlQuery` query to `S3StagingLocation`.
After the `DataSource` has been created, it's ready for use in evaluations and
batch predictions. If you plan to use the `DataSource` to train an `MLModel`,
the `DataSource` also requires a recipe. A recipe describes how each input
variable will be used in training an `MLModel`. Will the variable be included or
excluded from training? Will the variable be manipulated; for example, will it
be combined with another variable or will it be split apart into word
combinations? The recipe provides answers to these questions.
<!--?oxy_insert_start author="laurama" timestamp="20160406T153842-0700"-->You
can't change an existing datasource, but you can copy and modify the settings
from an existing Amazon Redshift datasource to create a new datasource. To do
so, call `GetDataSource` for an existing datasource and copy the values to a
`CreateDataSource` call. Change the settings that you want to change and make
sure that all required fields have the appropriate values.
<!--?oxy_insert_end-->
"""
def create_data_source_from_redshift(client, input, options \\ []) do
request(client, "CreateDataSourceFromRedshift", input, options)
end
@doc """
Creates a `DataSource` object.
A `DataSource` references data that can be used to perform `CreateMLModel`,
`CreateEvaluation`, or `CreateBatchPrediction` operations.
`CreateDataSourceFromS3` is an asynchronous operation. In response to
`CreateDataSourceFromS3`, Amazon Machine Learning (Amazon ML) immediately
returns and sets the `DataSource` status to `PENDING`. After the `DataSource`
has been created and is ready for use, Amazon ML sets the `Status` parameter to
`COMPLETED`. `DataSource` in the `COMPLETED` or `PENDING` state can be used to
perform only `CreateMLModel`, `CreateEvaluation` or `CreateBatchPrediction`
operations.
If Amazon ML can't accept the input source, it sets the `Status` parameter to
`FAILED` and includes an error message in the `Message` attribute of the
`GetDataSource` operation response.
The observation data used in a `DataSource` should be ready to use; that is, it
should have a consistent structure, and missing data values should be kept to a
minimum. The observation data must reside in one or more .csv files in an Amazon
Simple Storage Service (Amazon S3) location, along with a schema that describes
the data items by name and type. The same schema must be used for all of the
data files referenced by the `DataSource`.
After the `DataSource` has been created, it's ready to use in evaluations and
batch predictions. If you plan to use the `DataSource` to train an `MLModel`,
the `DataSource` also needs a recipe. A recipe describes how each input variable
will be used in training an `MLModel`. Will the variable be included or excluded
from training? Will the variable be manipulated; for example, will it be
combined with another variable or will it be split apart into word combinations?
The recipe provides answers to these questions.
"""
def create_data_source_from_s3(client, input, options \\ []) do
request(client, "CreateDataSourceFromS3", input, options)
end
@doc """
Creates a new `Evaluation` of an `MLModel`.
An `MLModel` is evaluated on a set of observations associated to a `DataSource`.
Like a `DataSource` for an `MLModel`, the `DataSource` for an `Evaluation`
contains values for the `Target Variable`. The `Evaluation` compares the
predicted result for each observation to the actual outcome and provides a
summary so that you know how effective the `MLModel` functions on the test data.
Evaluation generates a relevant performance metric, such as BinaryAUC,
RegressionRMSE or MulticlassAvgFScore based on the corresponding `MLModelType`:
`BINARY`, `REGRESSION` or `MULTICLASS`.
`CreateEvaluation` is an asynchronous operation. In response to
`CreateEvaluation`, Amazon Machine Learning (Amazon ML) immediately returns and
sets the evaluation status to `PENDING`. After the `Evaluation` is created and
ready for use, Amazon ML sets the status to `COMPLETED`.
You can use the `GetEvaluation` operation to check progress of the evaluation
during the creation operation.
"""
def create_evaluation(client, input, options \\ []) do
request(client, "CreateEvaluation", input, options)
end
@doc """
Creates a new `MLModel` using the `DataSource` and the recipe as information
sources.
An `MLModel` is nearly immutable. Users can update only the `MLModelName` and
the `ScoreThreshold` in an `MLModel` without creating a new `MLModel`.
`CreateMLModel` is an asynchronous operation. In response to `CreateMLModel`,
Amazon Machine Learning (Amazon ML) immediately returns and sets the `MLModel`
status to `PENDING`. After the `MLModel` has been created and ready is for use,
Amazon ML sets the status to `COMPLETED`.
You can use the `GetMLModel` operation to check the progress of the `MLModel`
during the creation operation.
`CreateMLModel` requires a `DataSource` with computed statistics, which can be
created by setting `ComputeStatistics` to `true` in `CreateDataSourceFromRDS`,
`CreateDataSourceFromS3`, or `CreateDataSourceFromRedshift` operations.
"""
def create_m_l_model(client, input, options \\ []) do
request(client, "CreateMLModel", input, options)
end
@doc """
Creates a real-time endpoint for the `MLModel`.
The endpoint contains the URI of the `MLModel`; that is, the location to send
real-time prediction requests for the specified `MLModel`.
"""
def create_realtime_endpoint(client, input, options \\ []) do
request(client, "CreateRealtimeEndpoint", input, options)
end
@doc """
Assigns the DELETED status to a `BatchPrediction`, rendering it unusable.
After using the `DeleteBatchPrediction` operation, you can use the
`GetBatchPrediction` operation to verify that the status of the
`BatchPrediction` changed to DELETED.
**Caution:** The result of the `DeleteBatchPrediction` operation is
irreversible.
"""
def delete_batch_prediction(client, input, options \\ []) do
request(client, "DeleteBatchPrediction", input, options)
end
@doc """
Assigns the DELETED status to a `DataSource`, rendering it unusable.
After using the `DeleteDataSource` operation, you can use the `GetDataSource`
operation to verify that the status of the `DataSource` changed to DELETED.
**Caution:** The results of the `DeleteDataSource` operation are irreversible.
"""
def delete_data_source(client, input, options \\ []) do
request(client, "DeleteDataSource", input, options)
end
@doc """
Assigns the `DELETED` status to an `Evaluation`, rendering it unusable.
After invoking the `DeleteEvaluation` operation, you can use the `GetEvaluation`
operation to verify that the status of the `Evaluation` changed to `DELETED`.
<caution><title>Caution</title> The results of the `DeleteEvaluation` operation
are irreversible.
</caution>
"""
def delete_evaluation(client, input, options \\ []) do
request(client, "DeleteEvaluation", input, options)
end
@doc """
Assigns the `DELETED` status to an `MLModel`, rendering it unusable.
After using the `DeleteMLModel` operation, you can use the `GetMLModel`
operation to verify that the status of the `MLModel` changed to DELETED.
**Caution:** The result of the `DeleteMLModel` operation is irreversible.
"""
def delete_m_l_model(client, input, options \\ []) do
request(client, "DeleteMLModel", input, options)
end
@doc """
Deletes a real time endpoint of an `MLModel`.
"""
def delete_realtime_endpoint(client, input, options \\ []) do
request(client, "DeleteRealtimeEndpoint", input, options)
end
@doc """
Deletes the specified tags associated with an ML object.
After this operation is complete, you can't recover deleted tags.
If you specify a tag that doesn't exist, Amazon ML ignores it.
"""
def delete_tags(client, input, options \\ []) do
request(client, "DeleteTags", input, options)
end
@doc """
Returns a list of `BatchPrediction` operations that match the search criteria in
the request.
"""
def describe_batch_predictions(client, input, options \\ []) do
request(client, "DescribeBatchPredictions", input, options)
end
@doc """
Returns a list of `DataSource` that match the search criteria in the request.
"""
def describe_data_sources(client, input, options \\ []) do
request(client, "DescribeDataSources", input, options)
end
@doc """
Returns a list of `DescribeEvaluations` that match the search criteria in the
request.
"""
def describe_evaluations(client, input, options \\ []) do
request(client, "DescribeEvaluations", input, options)
end
@doc """
Returns a list of `MLModel` that match the search criteria in the request.
"""
def describe_m_l_models(client, input, options \\ []) do
request(client, "DescribeMLModels", input, options)
end
@doc """
Describes one or more of the tags for your Amazon ML object.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Returns a `BatchPrediction` that includes detailed metadata, status, and data
file information for a `Batch Prediction` request.
"""
def get_batch_prediction(client, input, options \\ []) do
request(client, "GetBatchPrediction", input, options)
end
@doc """
Returns a `DataSource` that includes metadata and data file information, as well
as the current status of the `DataSource`.
`GetDataSource` provides results in normal or verbose format. The verbose format
adds the schema description and the list of files pointed to by the DataSource
to the normal format.
"""
def get_data_source(client, input, options \\ []) do
request(client, "GetDataSource", input, options)
end
@doc """
Returns an `Evaluation` that includes metadata as well as the current status of
the `Evaluation`.
"""
def get_evaluation(client, input, options \\ []) do
request(client, "GetEvaluation", input, options)
end
@doc """
Returns an `MLModel` that includes detailed metadata, data source information,
and the current status of the `MLModel`.
`GetMLModel` provides results in normal or verbose format.
"""
def get_m_l_model(client, input, options \\ []) do
request(client, "GetMLModel", input, options)
end
@doc """
Generates a prediction for the observation using the specified `ML Model`.
Note Not all response parameters will be populated. Whether a response parameter
is populated depends on the type of model requested.
"""
def predict(client, input, options \\ []) do
request(client, "Predict", input, options)
end
@doc """
Updates the `BatchPredictionName` of a `BatchPrediction`.
You can use the `GetBatchPrediction` operation to view the contents of the
updated data element.
"""
def update_batch_prediction(client, input, options \\ []) do
request(client, "UpdateBatchPrediction", input, options)
end
@doc """
Updates the `DataSourceName` of a `DataSource`.
You can use the `GetDataSource` operation to view the contents of the updated
data element.
"""
def update_data_source(client, input, options \\ []) do
request(client, "UpdateDataSource", input, options)
end
@doc """
Updates the `EvaluationName` of an `Evaluation`.
You can use the `GetEvaluation` operation to view the contents of the updated
data element.
"""
def update_evaluation(client, input, options \\ []) do
request(client, "UpdateEvaluation", input, options)
end
@doc """
Updates the `MLModelName` and the `ScoreThreshold` of an `MLModel`.
You can use the `GetMLModel` operation to view the contents of the updated data
element.
"""
def update_m_l_model(client, input, options \\ []) do
request(client, "UpdateMLModel", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "machinelearning"}
host = build_host("machinelearning", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AmazonML_20141212.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/machinelearning.ex
| 0.918574 | 0.883688 |
machinelearning.ex
|
starcoder
|
defmodule ExSDP.Origin do
@moduledoc """
This module represents the Origin field of SDP that represents the originator of the session.
If the username is set to `-` the originating host does not support the concept of user IDs.
The username MUST NOT contain spaces.
For more details please see [RFC4566 Section 5.2](https://tools.ietf.org/html/rfc4566#section-5.2)
"""
use Bunch.Access
alias ExSDP.{Address, Utils}
@enforce_keys [
:session_id,
:session_version,
:address
]
defstruct [username: "-", network_type: "IN"] ++ @enforce_keys
@type t :: %__MODULE__{
username: binary(),
session_id: integer(),
session_version: integer(),
network_type: binary(),
address: Address.t()
}
@doc """
Returns new origin struct.
By default:
* `username` is `-`
* `session_id` is random 64 bit number
* `session_version` is `0`
* `address` is `{127, 0, 0, 1}`
"""
@spec new(
username: binary(),
session_id: integer(),
session_version: integer(),
address: Address.t()
) :: t()
def new(opts \\ []) do
%__MODULE__{
username: Keyword.get(opts, :username, "-"),
session_id: Keyword.get(opts, :session_id, generate_random()),
session_version: Keyword.get(opts, :session_version, 0),
address: Keyword.get(opts, :address, {127, 0, 0, 1})
}
end
@spec parse(binary()) ::
{:ok, t()} | {:error, :invalid_addrtype | :invalid_address}
def parse(origin) do
with {:ok, [username, sess_id, sess_version, nettype, addrtype, address]} <-
Utils.split(origin, " ", 6),
{:ok, addrtype} <- Address.parse_addrtype(addrtype),
{:ok, address} <- Address.parse_address(address) do
# check whether fqdn
address = if is_binary(address), do: {addrtype, address}, else: address
origin = %__MODULE__{
username: username,
session_id: String.to_integer(sess_id),
session_version: String.to_integer(sess_version),
network_type: nettype,
address: address
}
{:ok, origin}
else
{:error, _reason} = error -> error
end
end
@doc """
Increments `session_version` field.
Can be used while sending offer/answer again.
"""
@spec bump_version(t()) :: {:ok, t()}
def bump_version(origin), do: {:ok, %{origin | session_version: origin.session_version + 1}}
defp generate_random(), do: :crypto.strong_rand_bytes(7) |> :binary.decode_unsigned()
end
defimpl String.Chars, for: ExSDP.Origin do
alias ExSDP.Address
def to_string(origin) do
"""
#{origin.username} \
#{origin.session_id} \
#{origin.session_version} \
#{origin.network_type} \
#{Address.get_addrtype(origin.address)} \
#{Address.serialize_address(origin.address)}\
"""
end
end
|
lib/ex_sdp/origin.ex
| 0.824179 | 0.519338 |
origin.ex
|
starcoder
|
defmodule Kino.VegaLite do
@moduledoc """
A kino wrapping [VegaLite](https://hexdocs.pm/vega_lite) graphic.
This kino allow for rendering regular VegaLite graphic and then
streaming new data points to update the graphic.
## Examples
chart =
Vl.new(width: 400, height: 400)
|> Vl.mark(:line)
|> Vl.encode_field(:x, "x", type: :quantitative)
|> Vl.encode_field(:y, "y", type: :quantitative)
|> Kino.VegaLite.new()
|> Kino.render()
for i <- 1..300 do
point = %{x: i / 10, y: :math.sin(i / 10)}
Kino.VegaLite.push(chart, point)
Process.sleep(25)
end
"""
use Kino.JS, assets_path: "lib/assets/vega_lite"
use Kino.JS.Live
@type t :: Kino.JS.Live.t()
@doc """
Creates a new kino with the given VegaLite definition.
"""
@spec new(VegaLite.t()) :: t()
def new(vl) when is_struct(vl, VegaLite) do
Kino.JS.Live.new(__MODULE__, vl)
end
@doc false
@spec static(VegaLite.t()) :: Kino.JS.t()
def static(vl) when is_struct(vl, VegaLite) do
data = %{
spec: VegaLite.to_spec(vl),
datasets: []
}
Kino.JS.new(__MODULE__, data, export_info_string: "vega-lite", export_key: :spec)
end
@doc """
Appends a single data point to the graphic dataset.
## Options
* `:window` - the maximum number of data points to keep.
This option is useful when you are appending new
data points to the plot over a long period of time
* `dataset` - name of the targetted dataset from
the VegaLite specification. Defaults to the default
anonymous dataset
"""
@spec push(t(), map(), keyword()) :: :ok
def push(kino, data_point, opts \\ []) do
dataset = opts[:dataset]
window = opts[:window]
data_point = Map.new(data_point)
Kino.JS.Live.cast(kino, {:push, dataset, [data_point], window})
end
@doc """
Appends a number of data points to the graphic dataset.
See `push/3` for more details.
"""
@spec push_many(t(), list(map()), keyword()) :: :ok
def push_many(kino, data_points, opts \\ []) when is_list(data_points) do
dataset = opts[:dataset]
window = opts[:window]
data_points = Enum.map(data_points, &Map.new/1)
Kino.JS.Live.cast(kino, {:push, dataset, data_points, window})
end
@doc """
Removes all data points from the graphic dataset.
## Options
* `dataset` - name of the targetted dataset from
the VegaLite specification. Defaults to the default
anonymous dataset
"""
@spec clear(t(), keyword()) :: :ok
def clear(kino, opts \\ []) do
dataset = opts[:dataset]
Kino.JS.Live.cast(kino, {:clear, dataset})
end
@doc """
Registers a callback to run periodically in the kino process.
The callback is run every `interval_ms` milliseconds and receives
the accumulated value. The callback should return either of:
* `{:cont, acc}` - the continue with the new accumulated value
* `:halt` - to no longer schedule callback evaluation
The callback is run for the first time immediately upon registration.
"""
@spec periodically(t(), pos_integer(), term(), (term() -> {:cont, term()} | :halt)) :: :ok
def periodically(kino, interval_ms, acc, fun) do
Kino.JS.Live.cast(kino, {:periodically, interval_ms, acc, fun})
end
@impl true
def init(vl, ctx) do
{:ok, assign(ctx, vl: vl, datasets: %{})}
end
@compile {:no_warn_undefined, {VegaLite, :to_spec, 1}}
@impl true
def handle_connect(ctx) do
data = %{
spec: VegaLite.to_spec(ctx.assigns.vl),
datasets: for({dataset, data} <- ctx.assigns.datasets, do: [dataset, data])
}
{:ok, data, ctx}
end
@impl true
def handle_cast({:push, dataset, data, window}, ctx) do
broadcast_event(ctx, "push", %{data: data, dataset: dataset, window: window})
ctx =
update(ctx, :datasets, fn datasets ->
{current_data, datasets} = Map.pop(datasets, dataset, [])
new_data =
if window do
Enum.take(current_data ++ data, -window)
else
current_data ++ data
end
Map.put(datasets, dataset, new_data)
end)
{:noreply, ctx}
end
def handle_cast({:clear, dataset}, ctx) do
broadcast_event(ctx, "push", %{data: [], dataset: dataset, window: 0})
ctx = update(ctx, :datasets, &Map.delete(&1, dataset))
{:noreply, ctx}
end
def handle_cast({:periodically, interval_ms, acc, fun}, state) do
periodically_iter(interval_ms, acc, fun)
{:noreply, state}
end
@impl true
def handle_info({:periodically_iter, interval_ms, acc, fun}, ctx) do
periodically_iter(interval_ms, acc, fun)
{:noreply, ctx}
end
defp periodically_iter(interval_ms, acc, fun) do
case fun.(acc) do
{:cont, acc} ->
Process.send_after(self(), {:periodically_iter, interval_ms, acc, fun}, interval_ms)
:halt ->
:ok
end
end
end
|
lib/kino/vega_lite.ex
| 0.900832 | 0.651202 |
vega_lite.ex
|
starcoder
|
defmodule Mix.Tasks.Sbom.Cyclonedx do
@shortdoc "Generates CycloneDX SBoM"
use Mix.Task
import Mix.Generator
@default_path "bom.xml"
@moduledoc """
Generates a Software Bill-of-Materials (SBoM) in CycloneDX format.
## Options
* `--output` (`-o`): the full path to the SBoM output file (default:
#{@default_path})
* `--force` (`-f`): overwrite existing files without prompting for
confirmation
* `--dev` (`-d`): include dependencies for non-production environments
(including `dev`, `test` or `docs`); by default only dependencies for
MIX_ENV=prod are returned
* `--recurse` (`-r`): in an umbrella project, generate individual output
files for each application, rather than a single file for the entire
project
* `--schema` (`-s`): schema version to be used, defaults to "1.2".
"""
@doc false
@impl Mix.Task
def run(all_args) do
{opts, _args} =
OptionParser.parse!(
all_args,
aliases: [o: :output, f: :force, d: :dev, r: :recurse, s: :schema],
strict: [
output: :string,
force: :boolean,
dev: :boolean,
recurse: :boolean,
schema: :string
]
)
output_path = opts[:output] || @default_path
valiate_schema(opts)
environment = (!opts[:dev] && :prod) || nil
opts =
if String.ends_with?(output_path, ".json") do
Keyword.put(opts, :format, :json)
else
opts
end
apps = Mix.Project.apps_paths()
if opts[:recurse] && apps do
Enum.each(apps, &generate_bom(&1, output_path, environment, opts[:force]))
else
generate_bom(output_path, environment, opts)
end
end
defp generate_bom(output_path, environment, opts) do
case SBoM.components_for_project(environment) do
{:ok, components} ->
iodata = SBoM.CycloneDX.bom(components, opts)
create_file(output_path, iodata, force: opts[:force])
{:error, :unresolved_dependency} ->
dependency_error()
end
end
defp generate_bom({app, path}, output_path, environment, force) do
Mix.Project.in_project(app, path, fn _module ->
generate_bom(output_path, environment, force)
end)
end
defp dependency_error do
shell = Mix.shell()
shell.error("Unchecked dependencies; please run `mix deps.get`")
Mix.raise("Can't continue due to errors on dependencies")
end
defp valiate_schema(opts) do
schema_versions = ["1.2", "1.1"]
if opts[:schema] && opts[:schema] not in schema_versions do
shell = Mix.shell()
shell.error(
"invalid cyclonedx schema version, available versions are #{
schema_versions |> Enum.join(", ")
}"
)
Mix.raise("Give correct cyclonedx schema version to continue.")
end
end
end
|
lib/mix/tasks/sbom.cyclonedx.ex
| 0.755457 | 0.425993 |
sbom.cyclonedx.ex
|
starcoder
|
defmodule Day24 do
def part1(input) do
{start_pos, goals, grid} = parse(input)
find_single_solution({start_pos, goals}, grid)
end
def part2(input) do
{start_pos, goals, grid} = parse(input)
find_all_solutions({start_pos, goals}, grid)
|> Enum.map(fn {moves0, position} ->
goals = MapSet.new([start_pos])
{moves, _, _, _} = astar_search({position, goals}, grid)
moves0 + moves
end)
|> Enum.min
end
defp find_single_solution(initial, grid) do
{moves, _, _, _} = astar_search(initial, grid)
moves
end
defp find_all_solutions(from, grid) do
q = q_init(from)
find_all_solutions(q, grid, MapSet.new())
end
defp find_all_solutions(q, grid, seen) do
case astar(q, grid, seen) do
nil ->
[]
{moves, position, seen, q} ->
[{moves, position} | find_all_solutions(q, grid, seen)]
end
end
defp astar_search(from, grid) do
q = q_init(from)
seen = MapSet.new()
astar(q, grid, seen)
end
defp astar(q, grid, seen) do
case q_get(q) do
nil ->
nil
{moves, {position, goals}, q} ->
case MapSet.size(goals) do
0 ->
{moves, position, seen, q}
_ ->
ns = neighbors(position, grid)
|> Enum.reject(&MapSet.member?(seen, {&1, goals}))
seen = Enum.reduce(ns, seen, fn neighbor, acc ->
MapSet.put(acc, {neighbor, goals})
end)
moves = moves + 1
q = Enum.reduce(ns, q, fn position, q ->
goals = MapSet.delete(goals, position)
heuristic = 0
priority = moves + heuristic
q_add(q, priority, moves, {position, goals})
end)
astar(q, grid, seen)
end
end
end
defp q_init(from) do
:gb_sets.singleton({0, 0, from})
end
defp q_get(q) do
case :gb_sets.is_empty(q) do
true ->
nil
false ->
{{_, moves, position}, q} = :gb_sets.take_smallest(q)
{moves, position, q}
end
end
defp q_add(q, priority, moves, position) do
:gb_sets.insert({priority, moves, position}, q)
end
defp neighbors({x, y}, grid) do
[{x, y - 1}, {x - 1, y}, {x + 1, y}, {x, y + 1}]
|> Enum.filter(fn position ->
MapSet.member?(grid, position)
end)
end
defp parse(input) do
input = Enum.map(input, &String.to_charlist/1)
grid = input
|> Enum.with_index
|> Enum.flat_map(fn {line, r} ->
line
|> Enum.with_index
|> Enum.map(fn {char, c} ->
{{r, c}, char}
end)
end)
groups = Enum.group_by(grid, fn {_pos, char} ->
if char in ?0..?9, do: ?0, else: char
end)
{start_pos, _} = List.keyfind(groups[?0], ?0, 1)
digits = groups[?0]
|> Enum.map(fn {pos, _digit} -> pos end)
|> MapSet.new
|> MapSet.delete(start_pos)
grid = groups[?0] ++ groups[?.]
|> Enum.map(&(elem(&1, 0)))
|> MapSet.new
{start_pos, digits, grid}
end
end
|
day24/lib/day24.ex
| 0.633637 | 0.681223 |
day24.ex
|
starcoder
|
defmodule Ptolemy.Engines.KV.Engine do
@moduledoc """
`Ptolemy.Engines.KV` provides interaction with a Vault server's Key Value V2 secret egnine.
"""
require Logger
@doc """
Reads a secret from a remote vault server using Vault's KV engine.
"""
@spec read_secret(Tesla.Client.t(), String.t(), [version: integer()] | []) ::
{:ok, map()} | {:error, String.t()}
def read_secret(client, path, vers \\ []) do
with {:ok, resp} <- Tesla.get(client, "#{path}", query: vers) do
case {resp.status, resp.body} do
{status, body} when status in 200..299 ->
{:ok, body}
{status, _} ->
{:error, "Could not fetch secret in remote vault server. Error code: #{status}"}
end
end
end
@doc """
Creates a new vault secret using vault's KV engine.
"""
@spec create_secret(Tesla.Client.t(), String.t(), map()) ::
{:ok, String.t()} | {:error, String.t()}
def create_secret(client, path, data, cas \\ nil) do
payload = if is_nil(cas), do: %{data: data}, else: %{options: %{cas: cas}, data: data}
with {:ok, resp} <- Tesla.post(client, path, payload) do
case {resp.status, resp.body} do
{status, _} when status in 200..299 ->
{:ok, "KV secret created"}
{status, _} ->
{:error, "Could not create secret in remote vault server. Error code: #{status}"}
end
end
end
@doc """
Deletes a specific set of version(s) belonging to a specific secret.
If a 403 response is received, please check your ACL policy on vault.
"""
@spec delete(Tesla.Client.t(), String.t(), list(integer)) ::
{:ok, String.t()} | {:error, String.t()}
def delete(client, path, vers) do
payload = %{versions: vers}
with {:ok, resp} <- Tesla.post(client, "#{path}", payload) do
case {resp.status, resp.body} do
{status, _} when status in 200..299 ->
{:ok, "KV secret deleted"}
{status, _} ->
{:error,
"Could not delete version(s) of secret in remote vault server. Error code: #{status}"}
end
end
end
@doc """
Destroys a specific set of version(s) belonging to a specific secret.
If a 403 response is received, please check your ACL policy on vault.
"""
@spec destroy(Tesla.Client.t(), String.t(), list(integer)) ::
{:ok, String.t()} | {:error, String.t()}
def destroy(client, path, vers) do
payload = %{versions: vers}
with {:ok, resp} <- Tesla.post(client, "#{path}", payload) do
case {resp.status, resp.body} do
{status, _} when status in 200..299 ->
{:ok, "KV secret destroyed"}
{status, _} ->
{:error,
"Could not destroy version(s) of secret in remote vault server. Error code: #{status}"}
end
end
end
end
|
lib/engines/kv/kv_engine.ex
| 0.84781 | 0.414306 |
kv_engine.ex
|
starcoder
|
defmodule Membrane.Pipeline.Spec do
@moduledoc """
Structure representing topology of a pipeline. It can be returned from
`Membrane.Pipeline.handle_init/1` callback upon pipeline's initialization.
It will define a topology of children and links that build the pipeline.
## Children
Children that should be spawned when the pipeline starts can be defined
with the `:children` field.
You have to set it to a keyword list, where keys are valid element name
that is unique within this pipeline and values are either element's module or
struct of that module.
Sample definitions:
[
first_element: %Element.With.Options.Struct{option_a: 42},
some_element: Element.Without.Options,
other_element: Element.Using.Default.Options
]
When defining children, some additional parameters can be provided by wrapping
child definition with a tuple and putting keyword list of parameters at the end:
[
first_element: {Element.Bare, indexed: true},
second_element: {%Element{opt_a: 42}, indexed: true}
]
Available params are described in `t:child_property_t/0`
## Links
Links that should be made when the pipeline starts, and children are spawned
can be defined with the `:links` field.
You have to set it to a map, where both keys and values are tuples of
`{element_name, pad_name}`. Values can also have additional options passed by
keyword list at the end (See `t:link_option_t/0`).
Element names have to match names given to the `:children` field.
Once it's done, pipeline will ensure that links are present.
Sample definition:
%{
{:source, :output} => {:converter, :input, pull_buffer: [preferred_size: 20_000]},
{:converter, :output} => {:aggregator, :input},
{:aggregator, :output} => {:sink, :input},
}
"""
alias Membrane.Element
alias Membrane.Core.PullBuffer
alias Element.Pad
@type child_spec_t :: module | struct
@typedoc """
Description of all the children elements inside the pipeline
"""
@type children_spec_t ::
[{Membrane.Element.name_t(), child_spec_t}]
| %{Membrane.Element.name_t() => child_spec_t}
@typedoc """
Options available when linking elements in the pipeline
`:pull_buffer` allows to configure Buffer between elements. See `t:Membrane.Core.PullBuffer.props_t/0`
"""
@type link_option_t :: {:pull_buffer, PullBuffer.props_t()}
@type link_from_spec_t :: {Element.name_t(), Pad.name_t()}
@type link_to_spec_t ::
{Element.name_t(), Pad.name_t()}
| {Element.name_t(), Pad.name_t(), [link_option_t]}
@typedoc """
Map describing links between elements
"""
@type links_spec_t :: %{required(link_from_spec_t) => link_to_spec_t}
@typedoc """
Struct used when launching a pipeline
"""
@type t :: %__MODULE__{
children: children_spec_t,
links: links_spec_t
}
defstruct children: [],
links: %{}
end
|
lib/membrane/pipeline/spec.ex
| 0.892858 | 0.658273 |
spec.ex
|
starcoder
|
defmodule EQC.StateM do
@moduledoc """
This module contains macros to be used with [Quviq
QuickCheck](http://www.quviq.com). It defines Elixir versions of Erlang
functions found in `eqc/include/eqc_statem.hrl`. For detailed documentation of the
functions, please refer to the QuickCheck documentation.
`Copyright (C) Quviq AB, 2014-2016.`
"""
defmacro __using__(_opts) do
quote do
import :eqc_statem, only: [commands: 1, commands: 2,
parallel_commands: 1, parallel_commands: 2,
more_commands: 2,
commands_length: 1]
import EQC.StateM
@file "eqc_statem.hrl"
@compile {:parse_transform, :eqc_group_commands}
@tag eqc_callback: :eqc_statem
end
end
@doc """
Runs a state machine generated command sequence and returns a keyword list with
`:history`, `:state`, and `:result` instead of a tuple.
"""
def run_commands(cmds) do
run_commands(cmds, [])
end
@doc """
Runs a state machine generated command sequence where vairables in this
sequence are substituted by a Keyword list defined context.
Returns a keyword list with
`:history`, `:state`, and `:result` instead of a tuple.
"""
def run_commands(cmds, env) do
{history, state, result} = :eqc_statem.run_commands(cmds, env)
[history: history, state: state, result: result]
end
@doc false
# deprecated
def run_commands(mod, cmds, env) do
{history, state, result} = :eqc_statem.run_commands(mod, cmds, env)
[history: history, state: state, result: result]
end
@doc """
Runs a state machine generated parallel command sequenceand returns a keyword list with
`:history`, `:state`, and `:result` instead of a tuple. Note that there is no
actual final state in this case.
"""
def run_parallel_commands(cmds) do
{history, state, result} = :eqc_statem.run_parallel_commands(cmds)
[history: history, state: state, result: result]
end
@doc """
Runs a state machine generated parallel command sequence where vairables in this
sequence are substituted by a Keyword list defined context.
Returns a keyword list with
`:history`, `:state`, and `:result` instead of a tuple. Note that there is no
actual final state in this case.
"""
def run_parallel_commands(cmds, env) do
{history, state, result} = :eqc_statem.run_parallel_commands(cmds, env)
[history: history, state: state, result: result]
end
@doc false
# deprecated
def run_parallel_commands(mod, cmds, env) do
{history, state, result} = :eqc_statem.run_parallel_commands(mod, cmds, env)
[history: history, state: state, result: result]
end
@doc """
When a test case fails, this pretty prints the failing test case.
"""
def pretty_commands(cmds, res, bool)
def pretty_commands([{:model, m} | cmds], res, bool) do
:eqc_gen.with_parameter(:elixir, :true,
:eqc_statem.pretty_commands(m, [{:model, m} | cmds],
{res[:history], res[:state], res[:result]},
bool))
end
@doc false
# deprecated
def pretty_commands(mod, cmds, res, bool) do
:eqc_gen.with_parameter(:elixir, :true,
:eqc_statem.pretty_commands(mod, cmds,
{res[:history], res[:state], res[:result]},
bool))
end
@doc false
# deprecated
def check_commands(mod, cmds, run_result) do
check_commands(mod, cmds, run_result, []) end
@doc false
# deprecated
def check_commands(mod, cmds, res, env) do
:eqc_gen.with_parameter(:elixir, :true,
:eqc_statem.check_commands(mod, cmds,
{res[:history], res[:state], res[:result]},
env))
end
@doc """
Add weights to the commands in a statem specification
## Example
weight _, take: 10, reset: 1
# Choose 10 times more 'take' than 'reset'
weight s, take: 10, reset: s
# The more tickets taken, the more likely reset becomes
"""
defmacro weight(state, cmds) do
for {cmd, w} <- cmds do
quote do
def weight(unquote(state), unquote(cmd)) do unquote(w) end
end
end ++
[ quote do
def weight(_, _) do 1 end
end ]
end
@doc """
Same as `:eqc_statem.command_names/1` but replaces the module name to Elixir style.
"""
def command_names(cmds) do
for {m, f, as} <- :eqc_statem.command_names(cmds) do
{String.to_atom(Enum.join(Module.split(m), ".")), f, as}
end
end
@doc """
Converts the given call expression into a symbolic call.
## Examples
symcall extract_pid(result)
# {:call, __MODULE__, :extract_pid, [result]}
symcall OtherModule.do_something(result, args)
# {:call, OtherModule, :do_something, [result, args]}
"""
defmacro symcall({{:., _, [mod, fun]}, _, args}) do
quote do
{:call, unquote(mod), unquote(fun), unquote(args)}
end
end
defmacro symcall({fun, _, args}) do
quote do
{:call, __MODULE__, unquote(fun), unquote(args)}
end
end
defp replace_var([], binding, seq) do
{Enum.reverse(seq), binding}
end
defp replace_var([{:=, _, [{var, _, _}, {{:., _, [mod, fun]}, _, args}]} | cmds], binding, seq) do
freshvar = {:var, length(seq) + 1}
{callargs, _} = Code.eval_quoted(args, binding, __ENV__)
symbcmd = quote do {:set, unquote(freshvar),
{:call, unquote(mod), unquote(fun), unquote(callargs)}} end
replace_var(cmds, [{var, freshvar}|binding], [symbcmd|seq])
end
defp replace_var([{{:., _, [mod, fun]}, _, args} | cmds], binding, seq) do
freshvar = {:var, length(seq) + 1}
{callargs, _} = Code.eval_quoted(args, binding, __ENV__)
symbcmd = quote do {:set, unquote(freshvar),
{:call, unquote(mod), unquote(fun), unquote(callargs)}} end
replace_var(cmds, binding, [symbcmd|seq])
end
defp replace_var([{:=, _, [{var, _, _}, {fun, _, args}]} | cmds], binding, seq) do
freshvar = {:var, length(seq) + 1}
{callargs, _} = Code.eval_quoted(args, binding, __ENV__)
symbcmd = quote do {:set, unquote(freshvar),
{:call, Macro.escape(__MODULE__), unquote(fun), unquote(callargs)}} end
replace_var(cmds, [{var, freshvar}|binding], [symbcmd|seq])
end
defp replace_var([{fun, _, args} | cmds], binding, seq) when is_atom(fun) do
freshvar = {:var, length(seq) + 1}
{callargs, _} = Code.eval_quoted(args, binding, __ENV__)
symbcmd = quote do {:set, unquote(freshvar),
{:call, Macro.escape(__MODULE__), unquote(fun), unquote(callargs)}} end
replace_var(cmds, binding, [symbcmd|seq])
end
@doc """
Translates test cases of a specific format into a list of commands that is compatible with
`EQC.StateM`.
## Examples
@check same_seat: [
eqc_test do
v1 = book("business")
book("economy")
checkin(2, v1)
bookings()
end ]
"""
defmacro eqc_test([do: cmds]) do
commands = case cmds do
{:__block__, _, block} -> block
nil -> []
cmd -> [cmd]
end
{new_commands, _binding} =
replace_var(commands, [], [])
quote do
[ {:model, __MODULE__} | unquote(new_commands) ]
end
end
end
|
lib/eqc/statem.ex
| 0.784897 | 0.549822 |
statem.ex
|
starcoder
|
defmodule NashvilleZoneLookup.Zoning.Zone do
@moduledoc ~S"""
A `NashvilleZoneLookup.Domain.Zone` defines and limits acceptable land use for
property within the district.
Each Zone is assigned an alphanumeric
`:code` that is unique. A Zone is often referred to as simply a "zone".
While Zones are somewhat arbitrary, they usually fit into
a pre-defined `:category`.
A Zone may include a short textual `:description`. This usually
describes the type of property included in this zone and should be a
complete sentence, as it will likely appear in the user interface.
"""
use Ecto.Schema
import Ecto.Changeset
alias NashvilleZoneLookup.Zoning.Zone
# Private Constants
# These categories were manually copied from the column headers in
# https://docs.google.com/spreadsheets/d/1O0Qc8nErSbstCiWpbpRQ0tPMS0NukCmcov2-s_u8Umg/edit#gid=1126820804
@category_agricultural "Agricultural"
@category_residential "Residential"
@category_specific_plan "Specific Plan"
@category_mixed_use "Mixed Use"
@category_office "Office"
@category_commercial "Commercial"
@category_downtown "Downtown"
@category_shopping_center "Shopping Center"
@category_industrial "Industrial"
@categories [
@category_agricultural,
@category_residential,
@category_specific_plan,
@category_mixed_use,
@category_office,
@category_commercial,
@category_downtown,
@category_shopping_center,
@category_industrial
]
schema "zones" do
field(:category, :string)
field(:code, :string)
field(:description, :string)
timestamps()
end
@doc false
def changeset(%Zone{} = zone, attrs) do
zone
|> cast(attrs, [:category, :code, :description])
|> validate_required([:category, :code, :description])
|> validate_inclusion(:category, @categories)
|> unique_constraint(:code)
end
# Public Constants
def categories, do: @categories
def category_agricultural, do: @category_agricultural
def category_residential, do: @category_residential
def category_specific_plan, do: @category_specific_plan
def category_mixed_use, do: @category_mixed_use
def category_office, do: @category_office
def category_commercial, do: @category_commercial
def category_downtown, do: @category_downtown
def category_shopping_center, do: @category_shopping_center
def category_industrial, do: @category_industrial
end
|
lib/nashville_zone_lookup/zoning/zone.ex
| 0.737253 | 0.482612 |
zone.ex
|
starcoder
|
defmodule AWS.ApplicationDiscovery do
@moduledoc """
AWS Application Discovery Service
AWS Application Discovery Service helps you plan application migration projects.
It automatically identifies servers, virtual machines (VMs), and network
dependencies in your on-premises data centers. For more information, see the
[AWS Application Discovery Service FAQ](http://aws.amazon.com/application-discovery/faqs/). Application Discovery
Service offers three ways of performing discovery and collecting data about your
on-premises servers:
* **Agentless discovery** is recommended for environments that use
VMware vCenter Server. This mode doesn't require you to install an agent on each
host. It does not work in non-VMware environments.
* Agentless discovery gathers server information
regardless of the operating systems, which minimizes the time required for
initial on-premises infrastructure assessment.
* Agentless discovery doesn't collect information about
network dependencies, only agent-based discovery collects that information.
* **Agent-based discovery** collects a richer set of data than
agentless discovery by using the AWS Application Discovery Agent, which you
install on one or more hosts in your data center.
* The agent captures infrastructure and application
information, including an inventory of running processes, system performance
information, resource utilization, and network dependencies.
* The information collected by agents is secured at rest
and in transit to the Application Discovery Service database in the cloud.
* **AWS Partner Network (APN) solutions** integrate with Application
Discovery Service, enabling you to import details of your on-premises
environment directly into Migration Hub without using the discovery connector or
discovery agent.
* Third-party application discovery tools can query AWS
Application Discovery Service, and they can write to the Application Discovery
Service database using the public API.
* In this way, you can import data into Migration Hub
and view it, so that you can associate applications with servers and track
migrations.
## Recommendations
We recommend that you use agent-based discovery for non-VMware environments, and
whenever you want to collect information about network dependencies. You can run
agent-based and agentless discovery simultaneously. Use agentless discovery to
complete the initial infrastructure assessment quickly, and then install agents
on select hosts to collect additional information.
## Working With This Guide
This API reference provides descriptions, syntax, and usage examples for each of
the actions and data types for Application Discovery Service. The topic for each
action shows the API request parameters and the response. Alternatively, you can
use one of the AWS SDKs to access an API that is tailored to the programming
language or platform that you're using. For more information, see [AWS SDKs](http://aws.amazon.com/tools/#SDKs).
Remember that you must set your Migration Hub home region before
you call any of these APIs.
You must make API calls for write actions (create, notify,
associate, disassociate, import, or put) while in your home region, or a
`HomeRegionNotSetException` error is returned.
API calls for read actions (list, describe, stop, and delete) are
permitted outside of your home region.
Although it is unlikely, the Migration Hub home region could
change. If you call APIs outside the home region, an `InvalidInputException` is
returned.
You must call `GetHomeRegion` to obtain the latest Migration Hub
home region.
This guide is intended for use with the [AWS Application Discovery Service User Guide](http://docs.aws.amazon.com/application-discovery/latest/userguide/).
All data is handled according to the [AWS Privacy Policy](http://aws.amazon.com/privacy/). You can operate Application Discovery
Service offline to inspect collected data before it is shared with the service.
"""
@doc """
Associates one or more configuration items with an application.
"""
def associate_configuration_items_to_application(client, input, options \\ []) do
request(client, "AssociateConfigurationItemsToApplication", input, options)
end
@doc """
Deletes one or more import tasks, each identified by their import ID.
Each import task has a number of records that can identify servers or
applications.
AWS Application Discovery Service has built-in matching logic that will identify
when discovered servers match existing entries that you've previously
discovered, the information for the already-existing discovered server is
updated. When you delete an import task that contains records that were used to
match, the information in those matched records that comes from the deleted
records will also be deleted.
"""
def batch_delete_import_data(client, input, options \\ []) do
request(client, "BatchDeleteImportData", input, options)
end
@doc """
Creates an application with the given name and description.
"""
def create_application(client, input, options \\ []) do
request(client, "CreateApplication", input, options)
end
@doc """
Creates one or more tags for configuration items.
Tags are metadata that help you categorize IT assets. This API accepts a list of
multiple configuration items.
"""
def create_tags(client, input, options \\ []) do
request(client, "CreateTags", input, options)
end
@doc """
Deletes a list of applications and their associations with configuration items.
"""
def delete_applications(client, input, options \\ []) do
request(client, "DeleteApplications", input, options)
end
@doc """
Deletes the association between configuration items and one or more tags.
This API accepts a list of multiple configuration items.
"""
def delete_tags(client, input, options \\ []) do
request(client, "DeleteTags", input, options)
end
@doc """
Lists agents or connectors as specified by ID or other filters.
All agents/connectors associated with your user account can be listed if you
call `DescribeAgents` as is without passing any parameters.
"""
def describe_agents(client, input, options \\ []) do
request(client, "DescribeAgents", input, options)
end
@doc """
Retrieves attributes for a list of configuration item IDs.
All of the supplied IDs must be for the same asset type from one of the
following:
server
application
process
connection
Output fields are specific to the asset type specified. For example, the output
for a *server* configuration item includes a list of attributes about the
server, such as host name, operating system, number of network cards, etc.
For a complete list of outputs for each asset type, see [Using the DescribeConfigurations
Action](https://docs.aws.amazon.com/application-discovery/latest/userguide/discovery-api-queries.html#DescribeConfigurations)
in the *AWS Application Discovery Service User Guide*.
"""
def describe_configurations(client, input, options \\ []) do
request(client, "DescribeConfigurations", input, options)
end
@doc """
Lists exports as specified by ID.
All continuous exports associated with your user account can be listed if you
call `DescribeContinuousExports` as is without passing any parameters.
"""
def describe_continuous_exports(client, input, options \\ []) do
request(client, "DescribeContinuousExports", input, options)
end
@doc """
`DescribeExportConfigurations` is deprecated.
Use
[DescribeImportTasks](https://docs.aws.amazon.com/application-discovery/latest/APIReference/API_DescribeExportTasks.html),
instead.
"""
def describe_export_configurations(client, input, options \\ []) do
request(client, "DescribeExportConfigurations", input, options)
end
@doc """
Retrieve status of one or more export tasks.
You can retrieve the status of up to 100 export tasks.
"""
def describe_export_tasks(client, input, options \\ []) do
request(client, "DescribeExportTasks", input, options)
end
@doc """
Returns an array of import tasks for your account, including status information,
times, IDs, the Amazon S3 Object URL for the import file, and more.
"""
def describe_import_tasks(client, input, options \\ []) do
request(client, "DescribeImportTasks", input, options)
end
@doc """
Retrieves a list of configuration items that have tags as specified by the
key-value pairs, name and value, passed to the optional parameter `filters`.
There are three valid tag filter names:
* tagKey
* tagValue
* configurationId
Also, all configuration items associated with your user account that have tags
can be listed if you call `DescribeTags` as is without passing any parameters.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Disassociates one or more configuration items from an application.
"""
def disassociate_configuration_items_from_application(client, input, options \\ []) do
request(client, "DisassociateConfigurationItemsFromApplication", input, options)
end
@doc """
Deprecated.
Use `StartExportTask` instead.
Exports all discovered configuration data to an Amazon S3 bucket or an
application that enables you to view and evaluate the data. Data includes tags
and tag associations, processes, connections, servers, and system performance.
This API returns an export ID that you can query using the
*DescribeExportConfigurations* API. The system imposes a limit of two
configuration exports in six hours.
"""
def export_configurations(client, input, options \\ []) do
request(client, "ExportConfigurations", input, options)
end
@doc """
Retrieves a short summary of discovered assets.
This API operation takes no request parameters and is called as is at the
command prompt as shown in the example.
"""
def get_discovery_summary(client, input, options \\ []) do
request(client, "GetDiscoverySummary", input, options)
end
@doc """
Retrieves a list of configuration items as specified by the value passed to the
required parameter `configurationType`.
Optional filtering may be applied to refine search results.
"""
def list_configurations(client, input, options \\ []) do
request(client, "ListConfigurations", input, options)
end
@doc """
Retrieves a list of servers that are one network hop away from a specified
server.
"""
def list_server_neighbors(client, input, options \\ []) do
request(client, "ListServerNeighbors", input, options)
end
@doc """
Start the continuous flow of agent's discovered data into Amazon Athena.
"""
def start_continuous_export(client, input, options \\ []) do
request(client, "StartContinuousExport", input, options)
end
@doc """
Instructs the specified agents or connectors to start collecting data.
"""
def start_data_collection_by_agent_ids(client, input, options \\ []) do
request(client, "StartDataCollectionByAgentIds", input, options)
end
@doc """
Begins the export of discovered data to an S3 bucket.
If you specify `agentIds` in a filter, the task exports up to 72 hours of
detailed data collected by the identified Application Discovery Agent, including
network, process, and performance details. A time range for exported agent data
may be set by using `startTime` and `endTime`. Export of detailed agent data is
limited to five concurrently running exports.
If you do not include an `agentIds` filter, summary data is exported that
includes both AWS Agentless Discovery Connector data and summary data from AWS
Discovery Agents. Export of summary data is limited to two exports per day.
"""
def start_export_task(client, input, options \\ []) do
request(client, "StartExportTask", input, options)
end
@doc """
Starts an import task, which allows you to import details of your on-premises
environment directly into AWS Migration Hub without having to use the
Application Discovery Service (ADS) tools such as the Discovery Connector or
Discovery Agent.
This gives you the option to perform migration assessment and planning directly
from your imported data, including the ability to group your devices as
applications and track their migration status.
To start an import request, do this:
1. Download the specially formatted comma separated value (CSV)
import template, which you can find here:
[https://s3-us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv](https://s3-us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv). 2. Fill out the template with your server and application data.
3. Upload your import file to an Amazon S3 bucket, and make a note
of it's Object URL. Your import file must be in the CSV format.
4. Use the console or the `StartImportTask` command with the AWS CLI
or one of the AWS SDKs to import the records from your file.
For more information, including step-by-step procedures, see [Migration Hub
Import](https://docs.aws.amazon.com/application-discovery/latest/userguide/discovery-import.html)
in the *AWS Application Discovery Service User Guide*.
There are limits to the number of import tasks you can create (and delete) in an
AWS account. For more information, see [AWS Application Discovery Service Limits](https://docs.aws.amazon.com/application-discovery/latest/userguide/ads_service_limits.html)
in the *AWS Application Discovery Service User Guide*.
"""
def start_import_task(client, input, options \\ []) do
request(client, "StartImportTask", input, options)
end
@doc """
Stop the continuous flow of agent's discovered data into Amazon Athena.
"""
def stop_continuous_export(client, input, options \\ []) do
request(client, "StopContinuousExport", input, options)
end
@doc """
Instructs the specified agents or connectors to stop collecting data.
"""
def stop_data_collection_by_agent_ids(client, input, options \\ []) do
request(client, "StopDataCollectionByAgentIds", input, options)
end
@doc """
Updates metadata about an application.
"""
def update_application(client, input, options \\ []) do
request(client, "UpdateApplication", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "discovery"}
host = build_host("discovery", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSPoseidonService_V2015_11_01.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/application_discovery.ex
| 0.888027 | 0.571886 |
application_discovery.ex
|
starcoder
|
defmodule Shoehorn.Handler do
@moduledoc """
A behaviour module for implementing handling of failing applications
A Shoehorn.Handler is a module that knows how to respond to specific
applications going down. There are two types of failing applications.
The first is an application that fails to initialize and the second
is an application that stops while it is running.
## Example
The Shoehorn.Handler behaviour requires developers to implement two
callbacks.
The `init` callback sets up any state the handler needs.
Ther `application_started` callback is called when an application
starts up.
The `application_exited` callback processes the incoming failure and
replies with the reaction that `Shoehorn` should take in case of
application failure.
defmodule Example.ShoehornHandler do
use Shoehorn.Handler
def init(_opts) do
{:ok, %{restart_counts: 0}}
end
def application_started(app, state) do
{:continue, state}
end
def application_exited(:non_essential_app, _reason, state) do
{:continue, state}
end
def application_exited(:essential_app, _reason, %{restart_counts: restart_counts} = state) when restart_counts < 2 do
# repair actions to make before restarting
Application.ensure_all_started(:essential_app)
{:continue, %{state | restart_counts: restart_counts + 1}}
end
def applicaton_exited(_, state) do
{:halt, state}
end
end
We initialize our `Shoehorn.Handler` with a restart count for state
by calling `init` with the configuration options from our shoehorn
config. The stored state is passed in from the
`Shoehorn.ApplicationController`.
When we have an application startup, we will put a message on the
console to notify the developer.
When we have a non-essential application fail we return `:continue` to
inform the system to keep going like nothing happened.
We restart the essential application of our system two times, and
then we tell the system to halt if starting over wasn't fixing the
system.
"""
@typedoc """
The reaction letting `Shoehorn.ApplicationController` know what to do
* `:continue` - keep the system going like nothing happened
* `:halt` - stop the application and bring the system down
"""
@type reaction :: :continue | :halt
@typedoc """
The cause that is firing the handler
"""
@type cause :: any
@doc """
Callback to initialize the handle
The callback must return a tuple of `{:ok, state}`. Where state is
the initial state of the handler. The system will halt if the
return is anything other than `:ok`.
"""
@callback init(opts :: map) :: {:ok, state :: any}
@doc """
Callback for handling application crashes
Called with the application name, cause, and the handler's
state. It must return a tuple contaning the `reaction` that the
`Shoehorn.ApplicationController` should take, and the new state
of the handler.
The code that you execute here can be used to notify or capture some
information before halting the system. This information can later
be used to recreate the issue or debug the problem causing the
application to exit.
Use `application_exited` as a place for a last-ditch effort to fix the
issue and restart the application. Ideally, capture
some information on the system state, and solve it upstream. Shoehorn
restarts should be used as a splint to keep a critical system
running.
The default implementation returns the previous state, and a `:halt`
reaction.
"""
@callback application_exited(app :: atom, cause, state :: any) :: {reaction, state :: any}
@doc """
Callback for handling application starts
Called with the application name, and the handler's
state. It must return a tuple containing the `reaction` that the
`Shoehorn.ApplicationController` should take, and the new state
of the handler.
def application_exited(:essential_app, _reason, state) do
# repair actions to make before restarting
# notify someone of the crash and the details
# log debug data
Application.ensure_all_started(:essential_app)
{:continue, %{state | restart_counts: restart_counts + 1}}
end
The default implementation returns unchanged state, and a `:continue`
reaction.
"""
@callback application_started(app :: atom, state :: any) :: {reaction, state :: any}
defmacro __using__(_opts) do
quote do
@behaviour Shoehorn.Handler
def init(_opts) do
{:ok, :no_state}
end
def application_started(_app, state) do
{:continue, state}
end
def application_exited(_app, _reason, state) do
{:halt, state}
end
defoverridable init: 1, application_started: 2, application_exited: 3
end
end
@type t :: %__MODULE__{module: atom, state: any}
@type opts :: [handler: atom]
defstruct [:module, :state]
@spec init(opts) :: t | no_return
def init(opts) do
module = opts[:handler] || Shoehorn.Handler.Ignore
{:ok, state} = module.init(opts)
%__MODULE__{module: module, state: state}
end
@spec invoke(:application_exited, app :: atom, cause, t) :: {reaction, t}
def invoke(
:application_exited = event,
app,
cause,
%__MODULE__{state: state, module: module} = handler
) do
{reaction, new_state} = apply(module, event, [app, cause, state])
{reaction, %{handler | state: new_state}}
rescue
e ->
IO.puts("Shoehorn handler raised an exception: #{inspect(e)}")
{:halt, state}
end
@spec invoke(:application_started, app :: atom, t) :: {reaction, t}
def invoke(
:application_started = event,
app,
%__MODULE__{state: state, module: module} = handler
) do
{reaction, new_state} = apply(module, event, [app, state])
{reaction, %{handler | state: new_state}}
rescue
e ->
IO.puts("Shoehorn handler raised an exception: #{inspect(e)}")
{:continue, state}
end
end
|
lib/shoehorn/handler.ex
| 0.868924 | 0.614972 |
handler.ex
|
starcoder
|
defmodule Cards do
@moduledoc """
Provides methods for creating and handling a deck of Cards.
"""
@doc """
You should ignore this as it has nothing to do with the project, but rather
the result of failure to resist the urge to delete it.
"""
def hello do
"<NAME>"
end
@doc """
Please ignore as it has no effect with the project. it is only a representation of
wish to practice testing at the onset of this project.
"""
def location(x, y) do
x * y / 4
end
@doc """
Returns a list of strings representing a deck of playing cards.
"""
def create_deck do
values = ["Ace", "Two", "Three", "Four", "Five", "Six", "Seven", "Other"]
suits = ["Spades", "Diamond", "Clubs", "Gold", "Silver", "Jubilee", "Rest"]
for suit <- suits, value <- values do
"#{value} of #{suit}"
end
end
def shuffle(deck) do
Enum.shuffle(deck)
end
@doc """
This confirms with either Tue or False if a particular card is present or not in the deck.
## Examples
iex(1)> deck = Cards.create_deck
iex(2)> Cards.confirm(deck, "Ace")
false
iex(3)> Cards.confirm(deck, "Ace of Spades")
true
"""
def confirm(deck, card) do
Enum.member?(deck, card)
end
@doc """
Divides a deck into a hand and the remainder of the deck.
The hand_size argument indicate how many cards should be in the hand.
## Examples
iex(1)> deck = Cards.create_deck
iex(2)> Cards.handle(deck, 5)
iex(3)> {hand, deck} = Cards.handle(deck, 5)
iex(4)> hand
["Ace of Spades", "Two of Spades", "Three of Spades", "Four of Spades",
"Five of Spades"]
"""
def handle(deck, hand_size) do
Enum.split(deck, hand_size)
end
def save(deck, filename) do
binary = :erlang.term_to_binary(deck)
File.write(filename, binary)
end
def load(filename) do
case File.read(filename) do
{:ok, binary} -> :erlang.binary_to_term binary
{:error, _reason}-> "That file you are looking for is under your bed and not here"
end
end
def create_hand(hand_size) do
Cards.create_deck
|> Cards.shuffle
|> Cards.handle(hand_size)
end
end
|
lib/cards.ex
| 0.593963 | 0.555797 |
cards.ex
|
starcoder
|
defmodule EVM.Gas do
@moduledoc """
Functions for interacting wth gas and costs of opscodes.
"""
alias EVM.{
Address,
Configuration,
ExecEnv,
Helpers,
MachineCode,
MachineState,
Operation
}
@type t :: EVM.val()
@type gas_price :: EVM.Wei.t()
# Nothing paid for operations of the set W_zero.
@g_zero 0
# Amount of gas to pay for operations of the set W_base.
@g_base 2
# Amount of gas to pay for operations of the set W_verylow.
@g_verylow 3
# Amount of gas to pay for operations of the set W_low.
@g_low 5
# Amount of gas to pay for operations of the set W_mid.
@g_mid 8
# Amount of gas to pay for operations of the set W_high.
@g_high 10
# Paid for a JUMPDEST operation.
@g_jumpdest 1
# Paid for an SSTORE operation when the storage value is set to non-zero from zero.
@g_sset 20_000
# Paid for an SSTORE operation when the storage value’s zeroness remains unchanged or is set to zero.
@g_sreset 5000
@g_sload 200
# Paid for a CREATE operation.
@g_create 32_000
# Paid per byte for a CREATE operation to succeed in placing code into state.
@g_codedeposit 200
# Paid for a non-zero value transfer as part of the CALL operation.
@g_callvalue 9000
# A stipend for the called contract subtracted from Gcallvalue for a non-zero value transfer.
@g_callstipend 2300
# Paid for a CALL or SELFDESTRUCT operation which creates an account.
@g_newaccount 25_000
# Partial payment for an EXP operation.
@g_exp 10
# Paid for every additional word when expanding memory.
@g_memory 3
# The divsor of quadratic costs
@g_quad_coeff_div 512
# Paid for every zero byte of data or code for a transaction.
@g_txdatazero 4
# Paid for every non-zero byte of data or code for a transaction.
@g_txdatanonzero 68
# Paid for every transaction.
@g_transaction 21_000
# Partial payment for a LOG operation.
@g_log 375
# Paid for each byte in a LOG operation’s data.
@g_logdata 8
# Paid for each topic of a LOG operation.
@g_logtopic 375
# Paid for each SHA3 operation.
@g_sha3 30
# Paid for each word (rounded up) for input data to a SHA3 operation.
@g_sha3word 6
# Partial payment for *COPY operations, multiplied by words copied, rounded up.
@g_copy 3
# Payment for BLOCKHASH operation
@g_blockhash 20
@g_extcodehash 400
@w_zero_instr [:stop, :return, :revert]
@w_base_instr [
:address,
:origin,
:caller,
:callvalue,
:calldatasize,
:codesize,
:gasprice,
:coinbase,
:timestamp,
:number,
:difficulty,
:gaslimit,
:pop,
:pc,
:msize,
:gas,
:returndatasize
]
@push_instrs Enum.map(0..32, fn n -> :"push#{n}" end)
@dup_instrs Enum.map(0..16, fn n -> :"dup#{n}" end)
@swap_instrs Enum.map(0..16, fn n -> :"swap#{n}" end)
@w_very_low_instr [
:add,
:sub,
:calldatacopy,
:codecopy,
:not_,
:lt,
:gt,
:slt,
:sgt,
:eq,
:iszero,
:and_,
:or_,
:xor_,
:byte,
:calldataload,
:mload,
:mstore,
:mstore8,
:shl,
:shr,
:sar
] ++ @push_instrs ++ @dup_instrs ++ @swap_instrs
@w_low_instr [:mul, :div, :sdiv, :mod, :smod, :signextend]
@w_mid_instr [:addmod, :mulmod, :jump]
@w_high_instr [:jumpi]
@call_operations [:call, :callcode, :delegatecall, :staticcall]
@doc """
Returns the cost to execute the given a cycle of the VM. This is defined
in Appenix H of the Yellow Paper, Eq.(294) and is denoted `C`.
## Examples
# TODO: Figure out how to hand in state
iex> EVM.Gas.cost(%EVM.MachineState{}, %EVM.ExecEnv{})
0
"""
@spec cost(MachineState.t(), ExecEnv.t()) :: t
def cost(machine_state, exec_env) do
case cost_with_status(machine_state, exec_env) do
{:original, cost} -> cost
{:changed, value, _} -> value
end
end
@spec cost_with_status(MachineState.t(), ExecEnv.t()) :: {:original, t} | {:changed, t, t}
def cost_with_status(machine_state, exec_env) do
operation = MachineCode.current_operation(machine_state, exec_env)
inputs = Operation.inputs(operation, machine_state)
operation_cost = operation_cost(operation.sym, inputs, machine_state, exec_env)
memory_cost = memory_cost(operation.sym, inputs, machine_state)
gas_cost = memory_cost + operation_cost
if exec_env.config.should_fail_nested_operation_lack_of_gas do
{:original, gas_cost}
else
gas_cost_for_nested_operation(operation.sym,
inputs: inputs,
original_cost: gas_cost,
machine_state: machine_state
)
end
end
def memory_cost(:calldatacopy, [memory_offset, _call_data_start, length], machine_state) do
memory_expansion_cost(machine_state, memory_offset, length)
end
def memory_cost(:extcodecopy, [_address, mem_offset, _code_offset, length], machine_state) do
if mem_offset + length > EVM.max_int() do
0
else
memory_expansion_cost(machine_state, mem_offset, length)
end
end
def memory_cost(:returndatacopy, [mem_offset, _code_offset, length], machine_state) do
if mem_offset + length > EVM.max_int() do
0
else
memory_expansion_cost(machine_state, mem_offset, length)
end
end
def memory_cost(:codecopy, [memory_offset, _code_offset, length], machine_state) do
memory_expansion_cost(machine_state, memory_offset, length)
end
def memory_cost(:mload, [memory_offset], machine_state) do
memory_expansion_cost(machine_state, memory_offset, 32)
end
def memory_cost(:mstore8, [memory_offset, _value], machine_state) do
memory_expansion_cost(machine_state, memory_offset, 1)
end
def memory_cost(:sha3, [memory_offset, length], machine_state) do
memory_expansion_cost(machine_state, memory_offset, length)
end
def memory_cost(:mstore, [memory_offset, _value], machine_state) do
memory_expansion_cost(machine_state, memory_offset, 32)
end
def memory_cost(:call, stack_args, machine_state) do
call_memory_cost(stack_args, machine_state)
end
def memory_cost(:callcode, stack_args, machine_state) do
call_memory_cost(stack_args, machine_state)
end
def memory_cost(:staticcall, stack_args, machine_state) do
call_memory_cost(stack_args, machine_state)
end
def memory_cost(:delegatecall, stack_args, machine_state) do
stack_args = List.insert_at(stack_args, 2, 0)
call_memory_cost(stack_args, machine_state)
end
def memory_cost(:create, [_value, in_offset, in_length], machine_state) do
memory_expansion_cost(machine_state, in_offset, in_length)
end
def memory_cost(:create2, [_value, in_offset, in_length, _salt], machine_state) do
memory_expansion_cost(machine_state, in_offset, in_length) +
@g_sha3word * MathHelper.bits_to_words(in_length)
end
def memory_cost(:return, [offset, length], machine_state) do
memory_expansion_cost(machine_state, offset, length)
end
def memory_cost(:revert, [offset, length], machine_state) do
memory_expansion_cost(machine_state, offset, length)
end
def memory_cost(:log0, [offset, length | _], machine_state) do
memory_expansion_cost(machine_state, offset, length)
end
def memory_cost(:log1, [offset, length | _], machine_state) do
memory_expansion_cost(machine_state, offset, length)
end
def memory_cost(:log2, [offset, length | _], machine_state) do
memory_expansion_cost(machine_state, offset, length)
end
def memory_cost(:log3, [offset, length | _], machine_state) do
memory_expansion_cost(machine_state, offset, length)
end
def memory_cost(:log4, [offset, length | _], machine_state) do
memory_expansion_cost(machine_state, offset, length)
end
def memory_cost(_operation, _inputs, _machine_state), do: 0
@spec call_memory_cost(Operation.stack_args(), MachineState.t()) :: t
defp call_memory_cost(
params,
machine_state
) do
[in_offset, in_length, out_offset, out_length] = Enum.take(params, -4)
out_memory_cost = memory_expansion_cost(machine_state, out_offset, out_length)
in_memory_cost = memory_expansion_cost(machine_state, in_offset, in_length)
max(out_memory_cost, in_memory_cost)
end
# From Eq. (294): C_mem(μ′_i) − C_mem(μ_i)
def memory_expansion_cost(machine_state, offset, length) do
memory_expansion_value = memory_expansion_value(machine_state.active_words, offset, length)
if memory_expansion_value > machine_state.active_words do
quadratic_memory_cost(memory_expansion_value) -
quadratic_memory_cost(machine_state.active_words)
else
0
end
end
# Eq. (223)
def memory_expansion_value(
# s
active_words,
# f
offset,
# l
size
) do
if size == 0 do
active_words
else
max(active_words, round(:math.ceil((offset + size) / 32)))
end
end
# Eq. (296)
def quadratic_memory_cost(a) do
linear_cost = a * @g_memory
quadratic_cost = MathHelper.floor(:math.pow(a, 2) / @g_quad_coeff_div)
linear_cost + quadratic_cost
end
@doc """
Returns the operation cost for every possible operation.
This is defined in Appendix H of the Yellow Paper.
"""
@spec operation_cost(atom(), list(EVM.val()), MachineState.t(), ExecEnv.t()) :: t | nil
def operation_cost(operation, inputs, machine_state, exec_env)
def operation_cost(:exp, [_base, exponent], _machine_state, exec_env) do
@g_exp + exec_env.config.exp_byte_cost * MathHelper.integer_byte_size(exponent)
end
def operation_cost(:codecopy, [_memory_offset, _code_offset, length], _machine_state, _exec_env) do
@g_verylow + @g_copy * MathHelper.bits_to_words(length)
end
def operation_cost(
:calldatacopy,
[_memory_offset, _code_offset, length],
_machine_state,
_exec_env
) do
@g_verylow + @g_copy * MathHelper.bits_to_words(length)
end
def operation_cost(
:extcodecopy,
[_address, _code_offset, _mem_offset, length],
_machine_state,
exec_env
) do
exec_env.config.extcodecopy_cost + @g_copy * MathHelper.bits_to_words(length)
end
def operation_cost(
:returndatacopy,
[_memory_offset, _code_offset, length],
_machine_state,
_exec_env
) do
@g_verylow + @g_copy * MathHelper.bits_to_words(length)
end
def operation_cost(:sha3, [_length, offset], _machine_state, _exec_env) do
@g_sha3 + @g_sha3word * MathHelper.bits_to_words(offset)
end
def operation_cost(:sstore, [key, new_value], _machine_state, exec_env) do
if exec_env.config.eip1283_sstore_gas_cost_changed do
eip1283_sstore_gas_cost([key, new_value], exec_env)
else
basic_sstore_gas_cost([key, new_value], exec_env)
end
end
def operation_cost(:selfdestruct, [address | _], _, exec_env) do
address = Address.new(address)
is_new_account =
cond do
!exec_env.config.empty_account_value_transfer &&
ExecEnv.non_existent_account?(exec_env, address) ->
true
exec_env.config.empty_account_value_transfer &&
ExecEnv.non_existent_or_empty_account?(exec_env, address) &&
ExecEnv.get_balance(exec_env) > 0 ->
true
true ->
false
end
Configuration.for(exec_env.config).selfdestruct_cost(exec_env.config,
new_account: is_new_account
)
end
def operation_cost(
:call,
[call_gas, to_address, value, _in_offset, _in_length, _out_offset, _out_length],
_machine_state,
exec_env
) do
to_address = Address.new(to_address)
exec_env.config.call_cost + call_value_cost(value) +
new_account_cost(exec_env, to_address, value) + call_gas
end
def operation_cost(
:staticcall,
[gas_limit, to_address, _in_offset, _in_length, _out_offset, _out_length],
_machine_state,
exec_env
) do
to_address = Address.new(to_address)
value = 0
exec_env.config.call_cost + new_account_cost(exec_env, to_address, value) + gas_limit
end
def operation_cost(
:delegatecall,
[gas_limit, _to_address, _in_offset, _in_length, _out_offset, _out_length],
_machine_state,
exec_env
) do
exec_env.config.call_cost + gas_limit
end
def operation_cost(
:callcode,
[gas_limit, _to_address, value, _in_offset, _in_length, _out_offset, _out_length],
_machine_state,
exec_env
) do
exec_env.config.call_cost + call_value_cost(value) + gas_limit
end
def operation_cost(:log0, [_offset, size | _], _machine_state, _exec_env) do
@g_log + @g_logdata * size
end
def operation_cost(:log1, [_offset, size | _], _machine_state, _exec_env) do
@g_log + @g_logdata * size + @g_logtopic
end
def operation_cost(:log2, [_offset, size | _], _machine_state, _exec_env) do
@g_log + @g_logdata * size + @g_logtopic * 2
end
def operation_cost(:log3, [_offset, size | _], _machine_state, _exec_env) do
@g_log + @g_logdata * size + @g_logtopic * 3
end
def operation_cost(:log4, [_offset, size | _], _machine_state, _exec_env) do
@g_log + @g_logdata * size + @g_logtopic * 4
end
# credo:disable-for-next-line
def operation_cost(operation, _inputs, _machine_state, exec_env) do
cond do
operation in @w_very_low_instr ->
@g_verylow
operation in @w_zero_instr ->
@g_zero
operation in @w_base_instr ->
@g_base
operation in @w_low_instr ->
@g_low
operation in @w_mid_instr ->
@g_mid
operation in @w_high_instr ->
@g_high
operation == :extcodesize ->
exec_env.config.extcodecopy_cost
operation == :create ->
@g_create
operation == :create2 ->
@g_create
operation == :blockhash ->
@g_blockhash
operation == :balance ->
exec_env.config.balance_cost
operation == :sload ->
exec_env.config.sload_cost
operation == :jumpdest ->
@g_jumpdest
operation == :extcodehash ->
@g_extcodehash
true ->
0
end
end
@spec callstipend() :: integer()
def callstipend do
@g_callstipend
end
@spec codedeposit_cost() :: integer()
def codedeposit_cost do
@g_codedeposit
end
defp call_value_cost(0), do: 0
defp call_value_cost(_), do: @g_callvalue
defp new_account_cost(exec_env, address, value) do
cond do
!exec_env.config.empty_account_value_transfer &&
ExecEnv.non_existent_account?(exec_env, address) ->
@g_newaccount
exec_env.config.empty_account_value_transfer && value > 0 &&
ExecEnv.non_existent_or_empty_account?(exec_env, address) ->
@g_newaccount
true ->
0
end
end
@doc """
Returns the gas cost for G_txdata{zero, nonzero} as defined in
Appendix G (Fee Schedule) of the Yellow Paper.
This implements `g_txdatazero` and `g_txdatanonzero`
## Examples
iex> EVM.Gas.g_txdata(<<1, 2, 3, 0, 4, 5>>)
5 * 68 + 4
iex> EVM.Gas.g_txdata(<<0>>)
4
iex> EVM.Gas.g_txdata(<<0, 0>>)
8
iex> EVM.Gas.g_txdata(<<>>)
0
"""
@spec g_txdata(binary()) :: t
def g_txdata(data) do
for <<byte <- data>> do
case byte do
0 -> @g_txdatazero
_ -> @g_txdatanonzero
end
end
|> Enum.sum()
end
@doc "Paid by all contract-creating transactions after the Homestead transition."
@spec g_txcreate() :: t
def g_txcreate, do: @g_create
@doc "Paid for every transaction."
@spec g_transaction() :: t
def g_transaction, do: @g_transaction
@spec g_sreset() :: t
def g_sreset, do: @g_sreset
@spec g_sset() :: t
def g_sset, do: @g_sset
@spec g_sload() :: t
def g_sload, do: @g_sload
# EIP150
@spec gas_cost_for_nested_operation(atom(), keyword()) ::
{:original, t()} | {:changed, t(), t()}
defp gas_cost_for_nested_operation(
operation,
inputs: inputs,
original_cost: original_cost,
machine_state: machine_state
) do
if operation in @call_operations do
stack_exec_gas = List.first(inputs)
call_cost_without_exec_gas = original_cost - stack_exec_gas
remaining_gas = machine_state.gas - call_cost_without_exec_gas
if remaining_gas >= 0 do
new_call_gas = Helpers.all_but_one_64th(remaining_gas)
new_gas_cost = new_call_gas + call_cost_without_exec_gas
if new_gas_cost < original_cost do
{:changed, new_gas_cost, new_call_gas}
else
{:original, original_cost}
end
else
# will fail in EVM.Functions.is_exception_halt?
{:original, original_cost}
end
else
{:original, original_cost}
end
end
defp eip1283_sstore_gas_cost([key, new_value], exec_env) do
initial_value = get_initial_value(exec_env, key)
current_value = get_current_value(exec_env, key)
cond do
current_value == new_value -> @g_sload
initial_value == current_value && initial_value == 0 -> @g_sset
initial_value == current_value && initial_value != 0 -> @g_sreset
true -> @g_sload
end
end
defp basic_sstore_gas_cost([key, new_value], exec_env) do
case ExecEnv.get_storage(exec_env, key) do
:account_not_found ->
@g_sset
:key_not_found ->
if new_value != 0 do
@g_sset
else
@g_sreset
end
{:ok, value} ->
if new_value != 0 && value == 0 do
@g_sset
else
@g_sreset
end
end
end
defp get_initial_value(exec_env, key) do
case ExecEnv.get_initial_storage(exec_env, key) do
:account_not_found -> 0
:key_not_found -> 0
{:ok, value} -> value
end
end
defp get_current_value(exec_env, key) do
case ExecEnv.get_storage(exec_env, key) do
:account_not_found -> 0
:key_not_found -> 0
{:ok, value} -> value
end
end
end
|
apps/evm/lib/evm/gas.ex
| 0.819605 | 0.483283 |
gas.ex
|
starcoder
|
defmodule Bitcoin.Base58Check do
@moduledoc """
Base58Check encoding.
Base58Check is used in Bitcoin addresses and WIF.
It's a Base58 where additional 4 checksum bytes are appended to the payload
before encoding (and stripped and checked when decoding).
Checksum is first 4 bytes from the double sha256 of the payload.
"""
# Base58 alphabet, without 0,O,l,I
@code '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
@code_0 @code |> List.first
# optimization to avoid browsing code list each time, with this we have O(1)
@num_to_code @code |> Enum.with_index |> Enum.map(fn {k,v} -> {v,k} end) |> Enum.into(%{})
@code_to_num @code |> Enum.with_index |> Enum.into(%{})
@doc """
Encode binary into Base58Check.
"""
@spec encode(binary) :: String.t
def encode(payload) do
payload
|> Binary.append(payload |> checksum)
|> base_encode
end
@doc """
Decode Base58Check string into binary.
Returns `{:ok, binary}` tuple in case of success, otherwise an `{:error, err}` tuple.
"""
@spec decode(String.t) :: {:ok, binary} | {:error, term}
def decode(string) do
with {:ok, bin} <- base_decode(string),
{:ok, payload} <- validate_checksum(bin),
do: {:ok, payload}
end
@doc """
Just like `decode/1` but raises exception in case of an error.
"""
@spec decode!(String.t) :: binary
def decode!(string) do
{:ok, payload} = string |> decode
payload
end
@doc """
Returns true if the string is a valid Base58Check encoding.
"""
@spec valid?(String.t) :: boolean
def valid?(string) do
# we need to decode anyway to validate the checksum
{result, _payload} = string |> decode
result == :ok
end
@doc """
Encode binary payload in Base58.
"""
@spec base_encode(binary) :: String.t
def base_encode(payload)
# Convert leading zeros separately, because they would be lost in to_integer conversion
def base_encode(<<0>> <> payload) when byte_size(payload) > 0, do: base_encode(<<0>>) <> base_encode(payload)
# Handle special case because "" would be interpreted as 0, same as <<0>>
def base_encode(""), do: ""
# Actual Base58 encoding
def base_encode(payload) do
payload
|> Binary.to_integer
|> Integer.digits(58)
|> Enum.map(& @num_to_code[&1])
|> Binary.from_list
end
@doc """
Decode Base58 encoded string into binary.
Returns `{:ok, binary}` if decoding was successful or `{:error, :invalid_character}` if some
character outside the alphabet was found.
"""
@spec base_decode(String.t) :: {:ok, binary} | {:error, term}
def base_decode(string) do
case base_valid?(string) do
true -> {:ok, string |> base_decode!}
false -> {:error, :invalid_character}
end
end
@doc """
Same as `base_decode/1` but returns binary without the tuple and raisse exception in case of an error.
"""
@spec base_decode!(String.t) :: binary
def base_decode!(string)
# Append base_decoded zeros separately, otherwise they would be lost in From_integer conversion
def base_decode!(<<@code_0>> <> string) when byte_size(string) > 0, do: base_decode!(<<@code_0>>) <> base_decode!(string)
# Handle special case because Integer.undigits([]) == 0
def base_decode!(""), do: ""
# Actual Base58 decoding
def base_decode!(string) do
string
|> Binary.to_list
|> Enum.map(& @code_to_num[&1])
|> Integer.undigits(58)
|> Binary.from_integer
end
@doc """
Check if the string is a valid Base58 encoding.
"""
@spec base_valid?(String.t) :: boolean
def base_valid?(string)
def base_valid?(""), do: true
def base_valid?(<<char>> <> string), do: char in @code && base_valid?(string)
@spec validate_checksum(binary) :: {:ok, binary} | {:error, :invalid_checksum}
defp validate_checksum(bin) do
{payload, checksum} = Binary.split_at(bin, -4)
if checksum(payload) == checksum, do: {:ok, payload}, else: {:error, :invalid_checksum}
end
defp checksum(payload), do: payload |> Bitcoin.Util.double_sha256 |> Binary.take(4)
end
|
lib/bitcoin/base58_check.ex
| 0.859561 | 0.420838 |
base58_check.ex
|
starcoder
|
defmodule Parsey do
@moduledoc """
A library to setup basic parsing requirements for non-complex nested
inputs.
Parsing behaviours are defined using rulesets, these sets take the format
of <code class="inline">[<a href="#t:rule/0">rule</a>]</code>. Rulesets are
matched against in the order defined. The first rule in the set will have a
higher priority than the last rule in the set.
A <code class="inline"><a href="#t:rule/0">rule</a></code> is a matching
expression that is named. The name of a rule can be any atom, and multiple
rules can consist of the same name. While the matching expression can be
either a Regex expression or a function.
Rules may additionally be configured to specify the additional options that
will be returned in the <code class="inline"><a href="#t:ast/0">ast</a></code>,
or the ruleset modification behaviour (what rules to exclude, include or
re-define), and if the rule should be ignored (not added to the
<code class="inline"><a href="#t:ast/0">ast</a></code>).
The default behaviour of a matched rule is to remove all rules with the same
name from the ruleset, and then try further match the matched input with the
new ruleset. Returning the <code class="inline"><a href="#t:ast/0">ast</a></code>
one completion.
The behaviour of matchers (applies to both regex and functions) is return a
list of indices `[{ index, length }]` where the first `List.first` tuple in
the list is used to indicate the portion of the input to be removed, while
the last `List.last` is used to indicate the portion of the input to be
focused on (parsed further).
"""
@type name :: atom
@type matcher :: Regex.t | (String.t -> (nil | [{ integer, integer }]))
@type formatter :: String.t | (String.t -> String.t)
@type option :: any
@type excluder :: name | { name, option }
@type rule :: { name, matcher } | { name, %{ :match => matcher, :capture => non_neg_integer, :format => formatter, :option => option, :ignore => boolean, :skip => boolean, :exclude => excluder | [excluder], :include => rule | [rule], :rules => rule | [rule] } }
@type ast :: String.t | { name, [ast] } | { name, [ast], option }
@doc """
Parse the given input using the specified ruleset.
Example
-------
iex> rules = [
...> whitespace: %{ match: ~r/\\A\\s/, ignore: true },
...> element_end: %{ match: ~r/\\A<\\/.*?>/, ignore: true },
...> element: %{ match: fn
...> input = <<"<", _ :: binary>> ->
...> elements = String.splitter(input, "<", trim: true)
...>
...> [first] = Enum.take(elements, 1)
...> [{ 0, tag_length }] = Regex.run(~r/\\A.*?>/, first, return: :index)
...> tag_length = tag_length + 1
...>
...> { 0, length } = Stream.drop(elements, 1) |> Enum.reduce_while({ 1, 0 }, fn
...> element = <<"/", _ :: binary>>, { 1, length } ->
...> [{ 0, tag_length }] = Regex.run(~r/\\A.*?>/, element, return: :index)
...> { :halt, { 0, length + tag_length + 1 } }
...> element = <<"/", _ :: binary>>, { count, length } -> { :cont, { count - 1, length + String.length(element) + 1 } }
...> element, { count, length } -> { :cont, { count + 1, length + String.length(element) + 1 } }
...> end)
...>
...> length = length + String.length(first) + 1
...> [{ 0, length }, {1, tag_length - 2}, { tag_length, length - tag_length }]
...> _ -> nil
...> end, exclude: nil, option: fn input, [_, { index, length }, _] -> String.slice(input, index, length) end },
...> value: %{ match: ~r/\\A\\d+/, rules: [] }
...> ]
iex> input = \"\"\"
...> <array>
...> <integer>1</integer>
...> <integer>2</integer>
...> </array>
...> <array>
...> <integer>3</integer>
...> <integer>4</integer>
...> </array>
...> \"\"\"
iex> Parsey.parse(input, rules)
[
{ :element, [
{ :element, [value: ["1"]], "integer" },
{ :element, [value: ["2"]], "integer" }
], "array" },
{ :element, [
{ :element, [value: ["3"]], "integer" },
{ :element, [value: ["4"]], "integer" }
], "array" },
]
"""
@spec parse(String.t, [rule]) :: [ast]
def parse(input, rules), do: parse(input, rules, [])
@doc false
@spec parse(String.t, [rule], [ast]) :: [ast]
defp parse("", _, nodes), do: flatten(nodes)
defp parse(input, rules, [string|nodes]) when is_binary(string) do
case get_node(input, rules) do
{ next, node } -> parse(next, rules, [node, string|nodes])
nil -> parse(String.slice(input, 1..-1), rules, [string <> String.first(input)|nodes])
end
end
defp parse(input, rules, [nil|nodes]) do
case get_node(input, rules) do
{ next, nil } -> parse(next, rules, [nil|nodes])
{ next, node } -> parse(next, rules, [node|nodes])
nil -> parse(String.slice(input, 1..-1), rules, [String.first(input)|nodes])
end
end
defp parse(input, rules, nodes) do
case get_node(input, rules) do
{ next, nil } -> parse(next, rules, nodes)
{ next, node } -> parse(next, rules, [node|nodes])
nil -> parse(String.slice(input, 1..-1), rules, [String.first(input)|nodes])
end
end
@doc false
@spec flatten([ast | nil], [ast]) :: [ast]
defp flatten(nodes, list \\ [])
defp flatten([], nodes), do: nodes
defp flatten([nil|nodes], list), do: flatten(nodes, list)
defp flatten([node|nodes], list) when is_list(node), do: flatten(nodes, node ++ list)
defp flatten([node|nodes], list), do: flatten(nodes, [node|list])
@doc false
@spec get_node(String.t, [rule]) :: { String.t, ast | nil } | nil
defp get_node(input, rules) do
Enum.find_value(rules, fn
rule = { _, regex = %Regex{} } -> make_node(input, rule, Regex.run(regex, input, return: :index), rules)
rule = { _, %{ match: regex = %Regex{} } } -> make_node(input, rule, Regex.run(regex, input, return: :index), rules)
rule = { _, %{ match: func } } -> make_node(input, rule, func.(input), rules)
rule = { _, func } -> make_node(input, rule, func.(input), rules)
end)
end
@doc false
@spec make_node(String.t, rule, nil | [{ integer, integer }], [rule]) :: { String.t, ast } | nil
defp make_node(_, _, nil, _), do: nil
defp make_node(input, rule = { _, %{ capture: capture } }, indexes, rules), do: make_node(input, rule, indexes, Enum.at(indexes, capture), rules)
defp make_node(input, rule, indexes, rules), do: make_node(input, rule, indexes, List.last(indexes), rules)
@doc false
@spec make_node(String.t, rule, [{ integer, integer }], { integer, integer }, [rule]) :: { String.t, ast }
defp make_node(input, rule, indexes = [{ entire_index, entire_length }|_], { index, length }, rules) do
match_total = entire_index + entire_length
<<_ :: unit(8)-size(match_total), next :: binary>> = input
{ next, node(format(binary_part(input, index, length), rule), rule, remove_rules(rules, rule) |> include_rules(rule) |> replace_rules(rule), input, indexes) }
end
@doc false
@spec node(String.t, rule, [rule], String.t, [{ integer, integer }]) :: ast | nil
defp node(_, { _, %{ ignore: true } }, _, _, _), do: nil
defp node(input, { _, %{ skip: true } }, rules, _, _), do: parse(input, rules)
defp node(input, { name, %{ option: option } }, rules, original, indexes) when is_function(option), do: { name, parse(input, rules), option.(original, indexes) }
defp node(input, { name, %{ option: option } }, rules, _, _), do: { name, parse(input, rules), option }
defp node(input, { name, _ }, rules, _, _), do: { name, parse(input, rules) }
@doc false
@spec remove_rules([rule], rule) :: [rule]
defp remove_rules(rules, { _, %{ exclude: { name, option } } }) do
Enum.filter(rules, fn
{ rule, %{ option: rule_option } } ->
rule != name or rule_option != option
_ -> true
end)
end
defp remove_rules(rules, { _, %{ exclude: name } }) when is_atom(name), do: Enum.filter(rules, fn { rule, _ } -> rule != name end)
defp remove_rules(rules, { _, %{ exclude: names } }) do
Enum.filter(rules, fn
{ rule, %{ option: rule_option } } -> !Enum.any?(names, fn
{ name, option } -> rule == name and rule_option == option
name -> rule == name
end)
{ rule, _ } -> !Enum.any?(names, fn
{ _name, _option } -> false
name -> rule == name
end)
end)
end
defp remove_rules(rules, { name, _ }), do: Enum.filter(rules, fn { rule, _ } -> rule != name end)
@doc false
@spec include_rules([rule], rule) :: [rule]
defp include_rules(rules, { _, %{ include: new_rules } }), do: new_rules ++ rules
defp include_rules(rules, _), do: rules
@doc false
@spec replace_rules([rule], rule) :: [rule]
defp replace_rules(_, { _, %{ rules: new_rules } }), do: new_rules
defp replace_rules(rules, _), do: rules
@doc false
@spec format(String.t, rule) :: String.t
defp format(_, { _, %{ format: string } }) when is_binary(string), do: string
defp format(input, { _, %{ format: func } }), do: func.(input)
defp format(input, _), do: input
end
|
lib/parsey.ex
| 0.845544 | 0.680174 |
parsey.ex
|
starcoder
|
defmodule Day9 do
def run(lines) do
with input = parse_input(lines),
[p1] = part1(input),
p2 = part2(p1, input) do
"part1: #{p1} part2: #{p2}"
end
end
def part1({preamble_size, input}) do
for index <- preamble_size..(Kernel.map_size(input) - 1),
low_index = index - preamble_size,
high_index = low_index + preamble_size - 1,
not is_sum_of_two(input[index], low_index, high_index, input) do
input[index]
end
end
def is_sum_of_two(value, low_index, high_index, data) do
for index_a <- low_index..(high_index - 1),
index_b <- (index_a + 1)..high_index,
value == data[index_a] + data[index_b],
reduce: false do
_ -> true
end
end
def part2(target, {_, data}) do
with subseq = find_subsequence_with_sum(target, data) do
Enum.min(subseq) + Enum.max(subseq)
end
end
@doc """
Finds a subsequence of indices into a map, where the values
sum to a given target.
## Examples:
iex> Day9.find_subsequence_with_sum(6, %{0 => 1, 1 => 2, 2 => 4, 3 => 8})
[2, 4]
"""
def find_subsequence_with_sum(target, data) do
find_subsequence_with_sum(target, 0, 0, data[0], Kernel.map_size(data) - 1, data)
end
def find_subsequence_with_sum(target, low, high, sum, max, data) do
cond do
target == sum ->
for i <- low..high do
data[i]
end
sum < target ->
if high < max do
find_subsequence_with_sum(target, low, high + 1, sum + data[high + 1], max, data)
else
nil
end
sum > target ->
find_subsequence_with_sum(target, low + 1, high, sum - data[low], max, data)
end
end
@spec parse_input(nonempty_maybe_improper_list) :: {integer, any}
@doc """
Parse the input, producing two things: the size of the preamble,
and a map from index to input number.
## Examples
iex> Day9.parse_input(["1", "3", "400"])
{1, %{0 => 3, 1 => 400}}
"""
def parse_input([first | rest]) do
{String.to_integer(first), lines_to_map(rest)}
end
def lines_to_map(lines) do
with swap = fn {a, b} -> {b, a} end do
lines
|> Enum.map(&String.to_integer/1)
|> Enum.with_index()
|> Enum.map(swap)
|> Enum.into(%{})
end
end
end
|
elixir_advent/lib/day9.ex
| 0.555435 | 0.66741 |
day9.ex
|
starcoder
|
defmodule Legion.Identity.Information.PersonalData do
@moduledoc """
Represents personal information of the user.
## Schema fields
- `:given_name`: A name given to a individual to differentiate them from members of such group or family. May be referred as "family name" or "forename".
- `:middle_name`: A name given to a individual to differentiate them from members of such group or family with members having same given name.
- `:family_name`: A name given to a individual to represent its family or group.
- `:name_prefix`: Prefix of the name to vocal the individual, such as "Mr.".
- `:name_postfix`: Postfix of the name to vocal the individual, such as "-san", which may expand to something like "Suguro-san".
- `:nickname`: A nick given to an individual, generally pronounced informally.
- `:phonetic_representation`: Phonetic representation of the name of the user.
- `:nationality_abbreviation`: Nationality of a user, e.g. "sa" for *Saudi Arabian*.
- `:gender`: Gender of a user.
"""
use Legion.Stereotype, :model
alias Legion.Identity.Information.Registration, as: User
alias Legion.Identity.Information.{Gender, Nationality}
@env Application.get_env(:legion, Legion.Identity.Information.PersonalData)
@given_name_len Keyword.fetch!(@env, :given_name_length)
@middle_name_len Keyword.fetch!(@env, :middle_name_length)
@family_name_len Keyword.fetch!(@env, :family_name_length)
@name_prefix_len Keyword.fetch!(@env, :name_prefix_length)
@name_postfix_len Keyword.fetch!(@env, :name_postfix_length)
@nickname_len Keyword.fetch!(@env, :nickname_length)
@phonetic_representation_len Keyword.fetch!(@env, :phonetic_representation_length)
@primary_key {:user_id, :integer, autogenerate: false}
schema "user_personal_information" do
belongs_to :user, User, define_field: false
field :given_name, :string
field :middle_name, :string
field :family_name, :string
field :name_prefix, :string
field :name_postfix, :string
field :nickname, :string
field :phonetic_representation, :string
field :gender, Gender
belongs_to :nationality, Nationality,
foreign_key: :nationality_abbreviation,
references: :abbreviation,
type: :binary
timestamps inserted_at: false
end
def changeset(struct, params \\ %{}) do
struct
|> cast(params, [
:user_id,
:given_name,
:middle_name,
:family_name,
:name_prefix,
:name_postfix,
:nickname,
:phonetic_representation,
:gender,
:nationality_abbreviation
])
|> validate_required([:user_id])
|> validate_range(:given_name, @given_name_len)
|> validate_range(:middle_name, @middle_name_len)
|> validate_range(:family_name, @family_name_len)
|> validate_range(:name_postfix, @name_postfix_len)
|> validate_range(:name_prefix, @name_prefix_len)
|> validate_range(:nickname, @nickname_len)
|> validate_range(:phonetic_representation, @phonetic_representation_len)
|> foreign_key_constraint(:user_id)
|> foreign_key_constraint(:nationality_abbreviation)
|> unique_constraint(:user_id)
end
def validate_range(changeset, field, range),
do: validate_length(changeset, field, min: Enum.min(range), max: Enum.max(range))
end
|
apps/legion/lib/identity/information/personal_data.ex
| 0.816918 | 0.475179 |
personal_data.ex
|
starcoder
|
defmodule Snek.Board.Point do
@moduledoc """
A struct for representing points on a board's grid.
"""
@moduledoc since: "0.1.0"
@typedoc """
A point on a board.
May be relative or absolute.
"""
@typedoc since: "0.1.0"
@type t :: {x, y}
@typedoc """
A point's X coordinate.
Smaller values are toward the west side of the board, larger are toward the
east.
For absolute coordinates on a board, use an integer between zero and the
board width minus one.
For relative points, you may use a negative integer.
"""
@typedoc since: "0.1.0"
@type x :: integer()
@typedoc """
A point's Y coordinate.
Smaller values are toward the north side of the board, larger are toward the
south.
For absolute coordinates on a board, use an integer between zero and the
board height minus one.
For relative points, you may use a negative integer.
"""
@typedoc since: "0.1.0"
@type y :: integer()
@typedoc """
A direction from a point toward its adjascent or diagonal neighbor.
"""
@typedoc since: "0.1.0"
@type direction :: :up | :down | :left | :right
@doc """
Returns a new point at the given X and Y coordinates.
## Examples
iex> Point.new(0, 0)
{0, 0}
iex> Point.new(3, 1)
{3, 1}
iex> Point.new(-2, 0)
{-2, 0}
"""
@doc since: "0.1.0"
@spec new(x, y) :: t
def new(x, y) when is_integer(x) and is_integer(y) do
{x, y}
end
@doc """
Returns the point that is one step toward a given direction from a point of
origin.
## Examples
iex> Point.new(5, 5) |> Point.step(:up)
{5, 4}
iex> Point.new(5, 5) |> Point.step(:down)
{5, 6}
iex> Point.new(5, 5) |> Point.step(:right)
{6, 5}
iex> Point.new(5, 5) |> Point.step(:left)
{4, 5}
iex> Point.new(5, 5) |> Point.step(:up) |> Point.step(:left)
{4, 4}
iex> Point.new(5, 5) |> Point.step(:up) |> Point.step(:right)
{6, 4}
iex> Point.new(5, 5) |> Point.step(:down) |> Point.step(:right)
{6, 6}
iex> Point.new(5, 5) |> Point.step(:down) |> Point.step(:left)
{4, 6}
"""
@doc since: "0.1.0"
@spec step(t, direction) :: t
def step(origin, direction)
def step({x, y}, :up) when is_integer(x) and is_integer(y) do
{x, y - 1}
end
def step({x, y}, :down) when is_integer(x) and is_integer(y) do
{x, y + 1}
end
def step({x, y}, :right) when is_integer(x) and is_integer(y) do
{x + 1, y}
end
def step({x, y}, :left) when is_integer(x) and is_integer(y) do
{x - 1, y}
end
@doc """
Returns a list of neighboring points adjascent to a point of origin.
## Examples
iex> Point.adjascent_neighbors(Point.new(1, 1))
[
{1, 0},
{1, 2},
{2, 1},
{0, 1}
]
iex> Point.adjascent_neighbors(Point.new(0, 0))
[
{0, -1},
{0, 1},
{1, 0},
{-1, 0}
]
"""
@doc since: "0.1.0"
@spec adjascent_neighbors(t) :: list(t)
def adjascent_neighbors(origin) do
[
step(origin, :up),
step(origin, :down),
step(origin, :right),
step(origin, :left)
]
end
@doc """
Returns a list of neighboring points diagonal to a point of origin.
## Examples
iex> Point.diagonal_neighbors(Point.new(1, 1))
[
{0, 0},
{2, 0},
{2, 2},
{0, 2}
]
iex> Point.diagonal_neighbors(Point.new(0, 0))
[
{-1, -1},
{1, -1},
{1, 1},
{-1, 1}
]
"""
@doc since: "0.1.0"
@spec diagonal_neighbors(t) :: list(t)
def diagonal_neighbors(origin) do
up = step(origin, :up)
down = step(origin, :down)
[
step(up, :left),
step(up, :right),
step(down, :right),
step(down, :left)
]
end
@doc """
Returns the difference between two points, which could be used to find a
vector between points, such as when using the neck and head of a snake to
determine the point continuing in the last moved direction.
## Examples
iex> Point.difference(Point.new(1, 2), Point.new(1, 3))
{0, -1}
iex> Point.difference(Point.new(4, 4), Point.new(5, 4))
{-1, 0}
"""
@doc since: "0.1.0"
@spec difference(t, t) :: t
def difference({x1, y1}, {x2, y2}) do
{
x1 - x2,
y1 - y2
}
end
@doc """
Returns the sum of two points, which could be used to apply a vector point to
a fixed points, such as when using the neck and head of a snake to determine
the point continuing in the last moved direction.
## Examples
iex> Point.sum(Point.new(1, 2), Point.new(1, 0))
{2, 2}
iex> Point.sum(Point.new(4, 4), Point.new(-1, 1))
{3, 5}
"""
@doc since: "0.1.0"
@spec sum(t, t) :: t
def sum({x1, y1}, {x2, y2}) do
{
x1 + x2,
y1 + y2
}
end
@doc """
Returns true if and only if both X and Y are zero, which could be used to
determine if a point is a null vector.
## Examples
iex> Point.zero?(Point.new(0, 0))
true
iex> Point.zero?(Point.new(0, 1))
false
"""
@doc since: "0.1.0"
@spec zero?(t) :: boolean
def zero?({0, 0}), do: true
def zero?({x, y}) when is_integer(x) and is_integer(y), do: false
@doc """
Returns true if and only if this point falls on an even square for an board,
alternating like a checkerboard.
## Examples
iex> Point.even?(Point.new(0, 0))
true
iex> Point.even?(Point.new(0, 1))
false
iex> Point.even?(Point.new(0, 2))
true
iex> Point.even?(Point.new(1, 0))
false
iex> Point.even?(Point.new(1, 1))
true
iex> Point.even?(Point.new(1, 2))
false
"""
@doc since: "0.1.0"
@spec even?(t) :: boolean
def even?({x, y}) do
rem(x + y, 2) == 0
end
@doc """
Returns the Manhattan distance between two points.
## Examples
iex> Point.manhattan_distance(Point.new(0, 0), Point.new(1, 2))
3
"""
@doc since: "0.1.0"
@spec manhattan_distance(t, t) :: integer
def manhattan_distance(point_a, point_b)
def manhattan_distance({x1, y1}, {x2, y2}) do
abs(x1 - x2) + abs(y1 - y2)
end
@doc """
Rotates a point 90 degrees clockwise.
This is useful for rotating vectors, which can help find relative directions.
## Examples
iex> Point.rotate_clockwise(Point.new(0, 1))
{-1, 0}
"""
@doc since: "0.1.0"
@spec rotate_clockwise(t) :: t
def rotate_clockwise({x, y}), do: {-y, x}
@doc """
Rotates a point 90 degrees counter-clockwise.
This is useful for rotating vectors, which can help find relative directions.
## Examples
iex> Point.rotate_counterclockwise(Point.new(-1, 0))
{0, 1}
"""
@doc since: "0.1.0"
@spec rotate_counterclockwise(t) :: t
def rotate_counterclockwise({x, y}), do: {y, -x}
end
|
lib/snek/board/point.ex
| 0.939262 | 0.789964 |
point.ex
|
starcoder
|
defprotocol Eml.Encoder do
@moduledoc """
The Eml Encoder protocol.
This protocol is used by Eml's compiler to convert different Elixir
data types to it's `Eml.Compiler.chunk` type.
Chunks can be of the type `String.t`, `{ :safe, String.t }`,
`Eml.Element.t`, or `Macro.t`, so any implementation of the
`Eml.Encoder` protocol needs to return one of these types.
Eml implements the following types by default:
`Integer`, `Float`, `Atom`, `Tuple`, `BitString` and `Eml.Element`
You can easily implement a protocol implementation for a custom
type, by defining an `encode` function that receives the custom type
and outputs to `Eml.Compiler.chunk`.
### Example
iex> defmodule Customer do
...> defstruct [:name, :email, :phone]
...> end
iex> defimpl Eml.Encoder, for: Customer do
...> def encode(%Customer{name: name, email: email, phone: phone}) do
...> use Eml.HTML
...>
...> div [class: "customer"] do
...> div [span("name: "), span(name)]
...> div [span("email: "), span(email)]
...> div [span("phone: "), span(phone)]
...> end
...> end
...> end
iex> c = %Customer{name: "Fred", email: "<EMAIL>", phone: "+31 6 5678 1234"}
%Customer{email: "<EMAIL>", name: "Fred", phone: "+31 6 5678 1234"}
iex> Eml.Encoder.encode c
#div<%{class: "customer"}
[#div<[#span<"name: ">, #span<"Fred">]>,
#div<[#span<"email: ">, #span<"<EMAIL>">]>,
#div<[#span<"phone: ">, #span<"+31 6 5678 1234">]>]>
iex> Eml.render c
"<div class='customer'><div><span>name: </span><span>Fred</span></div><div><span>email: </span><span><EMAIL></span></div><div><span>phone: </span><span>+31 6 5678 1234</span></div></div>"
"""
@spec encode(Eml.Encoder.t) :: Eml.node_primitive
def encode(data)
end
defimpl Eml.Encoder, for: Integer do
def encode(data), do: Integer.to_string(data)
end
defimpl Eml.Encoder, for: Float do
def encode(data), do: Float.to_string(data)
end
defimpl Eml.Encoder, for: Atom do
def encode(nil), do: nil
def encode(data), do: Atom.to_string(data)
end
defimpl Eml.Encoder, for: Tuple do
def encode({ :safe, data }) do
if is_binary(data) do
{ :safe, data }
else
raise Protocol.UndefinedError, protocol: Eml.Encoder, value: { :safe, data }
end
end
def encode(data) do
if Macro.validate(data) == :ok do
data
else
raise Protocol.UndefinedError, protocol: Eml.Encoder, value: data
end
end
end
defimpl Eml.Encoder, for: [BitString, Eml.Element] do
def encode(data), do: data
end
|
lib/eml/encoder.ex
| 0.825449 | 0.495545 |
encoder.ex
|
starcoder
|
defmodule Affine.Transforms do
@moduledoc """
This module defines all the basic transforms. These include translate, scale, and rotation for 1, 2 or 3 dimensions.
The transform library can be accessed at it's lowest level giving the best
performance and full control to the developer. An example of using the API at
this level is:
t_translate = Affine.Transforms.translate ( 3.0, 4.0, 5.0 )
point = Affine.transform t_translate [ 1.0, 2.0, 3.0 ]
assert point == [4.0, 6.0, 8.0]
And to add a transform to the first one:
t_scale = Affine.Transforms.scale (2.0, 2.0, 2.0)
t_scale_then_translate = Affine.multiply t_translate, t_scale
point = Affine.transform t_scale_then_translate [ 1.0, 2.0, 3.0 ]
assert point == [ 5.0, 8.0, 11.0 ]
Keep in mind that the order individual transforms are provided to the multiply
function is important since transforms are not commutative. With the same
example as above but with t_translate and t_scale reversed, the resulting point is different:
t_translate_then_scale = Affine.multiply t_scale, t_translate
point = Affine.transform t_translate_then_scale [ 1.0, 2.0, 3.0 ]
assert point == [ 8.0, 12.0, 16.0 ]
The last transform, t_translate, in this case will be the first to be done. Of course,
the beauty of Affine transforms is that all multiplied transforms are done
simultaneously but logically, the last transform multiplied is the first to be
applied.
"""
# Define the matrix type used to return a transform.
@type matrix :: [[number]]
@doc """
Defines a 1D tranlation transform for variable x.
"""
@spec translate(number) :: matrix
def translate(x) do
[
[ 1.0, x ],
[ 0.0, 1.0 ]
]
end
@doc """
Defines a 2D tranlation transform for variables x and y.
"""
@spec translate(number,number) :: matrix
def translate(x, y) do
[
[ 1.0, 0.0, x ],
[ 0.0, 1.0, y ],
[ 0.0, 0.0, 1.0 ]
]
end
@doc """
Defines a 3D tranlation transform for variables x, y and z.
"""
@spec translate(number,number,number) :: matrix
def translate(x, y, z) do
[
[ 1.0, 0.0, 0.0, x ],
[ 0.0, 1.0, 0.0, y ],
[ 0.0, 0.0, 1.0, z ],
[ 0.0, 0.0, 0.0, 1.0 ]
]
end
@doc """
Defines a 1D scale transform for variable x.
"""
@spec scale(number) :: matrix
def scale(x) do
[
[ x, 0.0 ],
[ 0.0, 1.0 ]
]
end
@doc """
Defines a 2D scale transform for variables x and y.
"""
@spec scale(number,number) :: matrix
def scale(x, y) do
[
[ x, 0.0, 0.0 ],
[ y, 1.0, 0.0 ],
[ 0.0, 0.0, 1.0 ]
]
end
@doc """
Defines a 3D scale transform for variables x, y and z.
"""
@spec scale(number,number,number) :: matrix
def scale(x, y, z) do
[
[ x, 0.0, 0.0, 0.0 ],
[ 0.0, y, 0.0, 0.0 ],
[ 0.0, 0.0, z, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ]
]
end
@doc """
Defines a 3d rotation around the x axis in the counter clockwise direction for
the specified angle in radians.
"""
@spec rotate_x(number) :: matrix
def rotate_x angle do
{ sin, cos } = sin_cos angle
[
[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, cos, -sin, 0.0 ],
[ 0.0, sin, cos, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ]
]
end
@doc """
Defines a 3d rotation around the y axis in the counter clockwise direction for
the specified angle in radians.
"""
@spec rotate_y(number) :: matrix
def rotate_y angle do
{ sin, cos } = sin_cos angle
[
[ cos, 0.0, sin, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ -sin, 0.0, cos, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ]
]
end
@doc """
Defines a 3d rotation around the z axis in the counter clockwise direction for
the specified angle in radians.
"""
@spec rotate_z(number) :: matrix
def rotate_z angle do
{ sin, cos } = sin_cos angle
[
[ cos, -sin, 0.0, 0.0 ],
[ sin, cos, 0.0, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ]
]
end
@doc """
Defines a 2d otation around in the xy plane and is only for a 2D transformation.
Rotation is in the counter clockwise direction for
the specified angle in radians.
"""
@spec rotate_xy(number) :: matrix
def rotate_xy angle do
{ sin, cos } = sin_cos angle
[
[ cos, -sin, 0.0 ],
[ sin, cos, 0.0 ],
[ 0.0, 0.0, 1.0 ]
]
end
# Converts the angle based on units specified by user and returns
# the sin and cos for the angle. This is a helper function for the
# rotate transforms.
defp sin_cos angle do
{ :math.sin(angle), :math.cos(angle) }
end
end
|
lib/affine/transforms.ex
| 0.876317 | 0.859015 |
transforms.ex
|
starcoder
|
defmodule Furlong.Symbolics do
@moduledoc """
Functions for constructing constraints.
```
x = make_ref()
y = make_ref()
constraint = lte(add(x, 2), add(multiply(y, 5), 10)) # x + 2 <= 5 * y + 10
```
"""
@doc """
Multiplies a variable / term / expression with a constant.
"""
def multiply(var, coefficient) when is_reference(var) and is_number(coefficient),
do: {:term, var, coefficient}
def multiply(coefficient, var) when is_reference(var) and is_number(coefficient),
do: {:term, var, coefficient}
def multiply({:term, var, coeff}, coefficient) when is_number(coefficient),
do: {:term, var, coeff * coefficient}
def multiply(coefficient, {:term, var, coeff}) when is_number(coefficient),
do: {:term, var, coeff * coefficient}
def multiply({:expression, terms, constant}, coefficient) when is_number(coefficient) do
multiplied_terms = Enum.map(terms, fn term -> multiply(term, coefficient) end)
{:expression, multiplied_terms, constant * coefficient}
end
def multiply(coefficient, {:expression, _, _} = expression) when is_number(coefficient),
do: multiply(expression, coefficient)
def multiply({:expression, _, _} = expression, {:expression, [], constant}),
do: multiply(expression, constant)
def multiply({:expression, [], constant}, {:expression, _, _} = expression),
do: multiply(expression, constant)
@doc """
Divides a variable / term / expression by a constant.
"""
def divide(var, denominator) when is_reference(var) and is_number(denominator),
do: {:term, var, 1.0 / denominator}
def divide({:term, var, coefficient}, denominator) when is_number(denominator),
do: {:term, var, coefficient * (1.0 / denominator)}
def divide({:expression, _, _} = expression, denominator) when is_number(denominator),
do: multiply(expression, 1.0 / denominator)
def divide({:expression, _, _} = expression, {:expression, [], constant}),
do: divide(expression, constant)
@doc """
Negates a variable / term / expression.
"""
def negate(var) when is_reference(var), do: {:term, var, -1}
def negate({:term, var, coefficient}), do: {:term, var, -coefficient}
def negate({:expression, _, _} = expression), do: multiply(expression, -1)
@doc """
Adds two summands, which can be constants, variables, terms, expressions.
"""
def add(c1, c2) when is_number(c1) and is_number(c2), do: c1 + c2
def add({:term, _, _} = term, constant) when is_number(constant),
do: {:expression, [term], constant}
def add(constant, {:term, _, _} = term) when is_number(constant),
do: {:expression, [term], constant}
def add({:term, _, _} = term, var) when is_reference(var),
do: {:expression, [term, {:term, var, 1}], 0}
def add(var, {:term, _, _} = term) when is_reference(var),
do: {:expression, [term, {:term, var, 1}], 0}
def add({:term, _, _} = first, {:term, _, _} = second), do: {:expression, [first, second], 0}
def add({:expression, terms, constant}, const) when is_number(const),
do: {:expression, terms, constant + const}
def add(const, {:expression, terms, constant}) when is_number(const),
do: {:expression, terms, constant + const}
def add({:expression, terms, constant}, var) when is_reference(var),
do: {:expression, [{:term, var, 1} | terms], constant}
def add(var, {:expression, terms, constant}) when is_reference(var),
do: {:expression, [{:term, var, 1} | terms], constant}
def add({:expression, terms, constant}, {:term, _, _} = term),
do: {:expression, [term | terms], constant}
def add({:term, _, _} = term, {:expression, terms, constant}),
do: {:expression, [term | terms], constant}
def add({:expression, terms_1, constant_1}, {:expression, terms_2, constant_2}),
do: {:expression, terms_1 ++ terms_2, constant_1 + constant_2}
def add(var, constant) when is_number(constant) and is_reference(var),
do: {:expression, [{:term, var, 1}], constant}
def add(constant, var) when is_number(constant) and is_reference(var), do: add(var, constant)
def add(first, second) when is_reference(first) and is_reference(second),
do: add(first, {:expression, [{:term, second, 1}], 0})
@doc """
Subtraction. Minuend and subtrahend can be constants, variables, terms, expressions.
"""
def subtract({:expression, _, _} = expression, constant) when is_number(constant),
do: add(expression, -constant)
def subtract(constant, {:expression, _, _} = expression) when is_number(constant),
do: add(negate(expression), constant)
def subtract({:expression, _, _} = expression, var) when is_reference(var),
do: add(expression, negate(var))
def subtract(var, {:expression, _, _} = expression) when is_reference(var),
do: add(var, negate(expression))
def subtract({:expression, _, _} = expression, {:term, _, _} = term),
do: add(expression, negate(term))
def subtract({:term, _, _} = term, {:expression, _, _} = expression),
do: add(negate(expression), term)
def subtract({:expression, _, _} = first, {:expression, _, _} = second),
do: add(first, negate(second))
def subtract({:term, _, _} = term, constant) when is_number(constant), do: add(term, -constant)
def subtract(constant, {:term, _, _} = term) when is_number(constant),
do: add(negate(term), constant)
def subtract({:term, _, _} = term, var) when is_reference(var), do: add(term, negate(var))
def subtract(var, {:term, _, _} = term) when is_reference(var), do: add(var, negate(term))
def subtract({:term, _, _} = first, {:term, _, _} = second), do: add(first, negate(second))
def subtract(var, constant) when is_number(constant) and is_reference(var),
do: add(var, -constant)
def subtract(constant, var) when is_number(constant) and is_reference(var),
do: add(negate(var), constant)
def subtract(first, second) when is_reference(first) and is_reference(second),
do: add(first, negate(second))
@doc """
Creates an equality constraint.
"""
def eq(first, second), do: rel(:eq, first, second)
@doc """
Creates an inequality constraint, where the first argument is less than or equal to the second argument.
"""
def lte(first, second), do: rel(:lte, first, second)
@doc """
Creates an inequality constraint, where the first argument is greater than or equal to the second argument.
"""
def gte(first, second), do: rel(:gte, first, second)
defp rel(op, {:expression, _, _} = first, {:expression, _, _} = second),
do: {:constraint, reduce(subtract(first, second)), op}
defp rel(op, {:expression, _, _} = expression, {:term, _, _} = term),
do: {:constraint, reduce(subtract(expression, {:expression, [term], 0})), op}
defp rel(op, {:term, _, _} = term, {:expression, _, _} = expression),
do: rel(op, {:expression, [term], 0}, expression)
defp rel(op, {:expression, _, _} = expression, var) when is_reference(var),
do: rel(op, expression, {:term, var, 1})
defp rel(op, var, {:expression, _, _} = expression) when is_reference(var),
do: rel(op, {:term, var, 1}, expression)
defp rel(op, {:expression, _, _} = expression, constant) when is_number(constant),
do: rel(op, expression, {:expression, [], constant})
defp rel(op, constant, {:expression, _, _} = expression) when is_number(constant),
do: rel(op, {:expression, [], constant}, expression)
defp rel(op, {:term, _, _} = first, {:term, _, _} = second),
do: rel(op, {:expression, [first], 0}, second)
defp rel(op, {:term, _, _} = term, var) when is_reference(var),
do: rel(op, {:expression, [term], 0}, var)
defp rel(op, var, {:term, _, _} = term) when is_reference(var),
do: rel(op, var, {:expression, [term], 0})
defp rel(op, {:term, _, _} = term, constant) when is_number(constant),
do: rel(op, {:expression, [term], 0}, constant)
defp rel(op, constant, {:term, _, _} = term) when is_number(constant),
do: rel(op, constant, {:expression, [term], 0})
defp rel(op, first, second) when is_reference(first) and is_reference(second),
do: rel(op, {:term, first, 1}, second)
defp rel(op, var, const) when is_reference(var) and is_number(const),
do: rel(op, {:term, var, 1}, const)
defp rel(op, const, var) when is_reference(var) and is_number(const),
do: rel(op, const, {:term, var, 1})
@doc """
Reduces common terms in an expression by summing their coefficients.
"""
def reduce({:expression, terms, constant}) do
reduced_terms =
terms
|> Enum.reduce(%{}, fn {:term, var, coefficient}, summed_coefficients ->
Map.update(summed_coefficients, var, coefficient, fn sum -> sum + coefficient end)
end)
|> Enum.map(fn {var, coefficient} -> {:term, var, coefficient} end)
{:expression, reduced_terms, constant}
end
end
|
lib/furlong/symbolics.ex
| 0.871639 | 0.840423 |
symbolics.ex
|
starcoder
|
defmodule AWS.ResourceGroupsTaggingAPI do
@moduledoc """
Resource Groups Tagging API
This guide describes the API operations for the resource groups tagging.
A tag is a label that you assign to an AWS resource. A tag consists of a key and
a value, both of which you define. For example, if you have two Amazon EC2
instances, you might assign both a tag key of "Stack." But the value of "Stack"
might be "Testing" for one and "Production" for the other.
Do not store personally identifiable information (PII) or other confidential or
sensitive information in tags. We use tags to provide you with billing and
administration services. Tags are not intended to be used for private or
sensitive data.
Tagging can help you organize your resources and enables you to simplify
resource management, access management and cost allocation.
You can use the resource groups tagging API operations to complete the following
tasks:
* Tag and untag supported resources located in the specified Region
for the AWS account.
* Use tag-based filters to search for resources located in the
specified Region for the AWS account.
* List all existing tag keys in the specified Region for the AWS
account.
* List all existing values for the specified key in the specified
Region for the AWS account.
To use resource groups tagging API operations, you must add the following
permissions to your IAM policy:
* `tag:GetResources`
* `tag:TagResources`
* `tag:UntagResources`
* `tag:GetTagKeys`
* `tag:GetTagValues`
You'll also need permissions to access the resources of individual services so
that you can tag and untag those resources.
For more information on IAM policies, see [Managing IAM Policies](http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage.html)
in the *IAM User Guide*.
* **Services that support the Resource Groups Tagging API** *
You can use the Resource Groups Tagging API to tag resources for the following
AWS services.
* [Alexa for Business (a4b)](https://docs.aws.amazon.com/a4b) * [API Gateway](https://docs.aws.amazon.com/apigateway)
* [Amazon AppStream](https://docs.aws.amazon.com/appstream2) * [AWS AppSync](https://docs.aws.amazon.com/appsync)
* [AWS App Mesh](https://docs.aws.amazon.com/app-mesh) * [Amazon Athena](https://docs.aws.amazon.com/athena)
* [Amazon Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide)
* [AWS Backup](https://docs.aws.amazon.com/aws-backup) * [AWS Certificate Manager](https://docs.aws.amazon.com/acm)
* [AWS Certificate Manager Private CA](https://docs.aws.amazon.com/acm)
* [Amazon Cloud Directory](https://docs.aws.amazon.com/clouddirectory)
* [AWS Cloud Map](https://docs.aws.amazon.com/cloud-map) * [AWS CloudFormation](https://docs.aws.amazon.com/cloudformation)
* [Amazon CloudFront](https://docs.aws.amazon.com/cloudfront) * [AWS CloudHSM](https://docs.aws.amazon.com/cloudhsm)
* [AWS CloudTrail](https://docs.aws.amazon.com/cloudtrail) * [Amazon CloudWatch (alarms
only)](https://docs.aws.amazon.com/cloudwatch)
* [Amazon CloudWatch Events](https://docs.aws.amazon.com/cloudwatch/?id=docs_gateway#amazon-cloudwatch-events)
* [Amazon CloudWatch Logs](https://docs.aws.amazon.com/cloudwatch/?id=docs_gateway#amazon-cloudwatch-logs)
* [Amazon Cloudwatch Synthetics](https://docs.aws.amazon.com/cloudwatch)
* [AWS CodeBuild](https://docs.aws.amazon.com/codebuild) * [AWS CodeCommit](https://docs.aws.amazon.com/codecommit)
* [AWS CodeGuru Profiler](https://docs.aws.amazon.com/codeguru/latest/profiler-ug/)
* [AWS CodePipeline](https://docs.aws.amazon.com/codepipeline) * [AWS CodeStar](https://docs.aws.amazon.com/codestar)
* [AWS CodeStar Connections](https://docs.aws.amazon.com/codestar-connections/latest/APIReference/)
* [Amazon Cognito Identity](https://docs.aws.amazon.com/cognito) * [Amazon Cognito User Pools](https://docs.aws.amazon.com/cognito)
* [Amazon Comprehend](https://docs.aws.amazon.com/comprehend) * [AWS Config](https://docs.aws.amazon.com/config)
* [Amazon Connect](http://aws.amazon.com/connect/resources/?whats-new-cards#Documentation)
* [AWS Data Exchange](https://docs.aws.amazon.com/data-exchange) * [AWS Data Pipeline](https://docs.aws.amazon.com/data-pipeline)
* [AWS Database Migration Service](https://docs.aws.amazon.com/dms) * [AWS DataSync](https://docs.aws.amazon.com/datasync)
* [AWS Device Farm](https://docs.aws.amazon.com/devicefarm) * [AWS Direct Connect](https://docs.aws.amazon.com/directconnect)
* [AWS Directory Service](https://docs.aws.amazon.com/directory-service)
* [Amazon DynamoDB](https://docs.aws.amazon.com/dynamodb) * [Amazon EBS](https://docs.aws.amazon.com/ebs)
* [Amazon EC2](https://docs.aws.amazon.com/ec2) * [EC2 Image Builder](https://docs.aws.amazon.com/imagebuilder)
* [Amazon ECR](https://docs.aws.amazon.com/ecr) * [Amazon ECS](https://docs.aws.amazon.com/ecs)
* [Amazon EKS](https://docs.aws.amazon.com/eks) * [AWS Elastic
Beanstalk](https://docs.aws.amazon.com/elastic-beanstalk)
* [Amazon Elastic File System](https://docs.aws.amazon.com/efs) * [Elastic Load
Balancing](https://docs.aws.amazon.com/elasticloadbalancing)
* [Amazon Elastic Inference](https://docs.aws.amazon.com/elastic-inference)
* [Amazon ElastiCache](https://docs.aws.amazon.com/elasticache) * [Amazon Elasticsearch
Service](https://docs.aws.amazon.com/elasticsearch-service)
* [AWS Elemental MediaLive](https://docs.aws.amazon.com/medialive) * [AWS Elemental
MediaPackage](https://docs.aws.amazon.com/mediapackage)
* [AWS Elemental MediaPackage VoD](https://docs.aws.amazon.com/mediapackage)
* [AWS Elemental MediaTailor](https://docs.aws.amazon.com/mediatailor)
* [Amazon EMR](https://docs.aws.amazon.com/emr) * [Amazon EventBridge
Schema](https://docs.aws.amazon.com/eventbridge)
* [AWS Firewall Manager](https://docs.aws.amazon.com/firewall-manager)
* [Amazon Forecast](https://docs.aws.amazon.com/forecast) * [Amazon Fraud Detector](https://docs.aws.amazon.com/frauddetector)
* [Amazon FSx](https://docs.aws.amazon.com/fsx) * [Amazon S3
Glacier](https://docs.aws.amazon.com/s3/?id=docs_gateway#amazon-s3-glacier)
* [AWS Global Accelerator](https://docs.aws.amazon.com/global-accelerator)
* [AWS Ground Station](https://docs.aws.amazon.com/ground-station) * [AWS Glue](https://docs.aws.amazon.com/glue)
* [Amazon GuardDuty](https://docs.aws.amazon.com/guardduty) * [Amazon Inspector](https://docs.aws.amazon.com/inspector)
* [Amazon Interactive Video Service](https://docs.aws.amazon.com/ivs)
* [AWS IoT Analytics](https://docs.aws.amazon.com/iotanalytics) * [AWS IoT Core](https://docs.aws.amazon.com/iot)
* [AWS IoT Device Defender](https://docs.aws.amazon.com/iot-device-defender)
* [AWS IoT Device Management](https://docs.aws.amazon.com/iot-device-management)
* [AWS IoT Events](https://docs.aws.amazon.com/iotevents) * [AWS IoT Greengrass](https://docs.aws.amazon.com/greengrass)
* [AWS IoT 1-Click](https://docs.aws.amazon.com/iot-1-click) * [AWS IoT Sitewise](https://docs.aws.amazon.com/iot-sitewise)
* [AWS IoT Things Graph](https://docs.aws.amazon.com/thingsgraph) * [Amazon Kendra](https://docs.aws.amazon.com/kendra)
* [AWS Key Management Service](https://docs.aws.amazon.com/kms) * [Amazon Kinesis](https://docs.aws.amazon.com/kinesis)
* [Amazon Kinesis Data Analytics](https://docs.aws.amazon.com/kinesis/?id=docs_gateway#amazon-kinesis-data-analytics)
* [Amazon Kinesis Data Firehose](https://docs.aws.amazon.com/kinesis/?id=docs_gateway#amazon-kinesis-data-firehose)
* [AWS Lambda](https://docs.aws.amazon.com/lambda) * [Amazon Lex](https://docs.aws.amazon.com/lex)
* [AWS License Manager](https://docs.aws.amazon.com/license-manager) * [Amazon Lightsail](https://docs.aws.amazon.com/lightsail)
* [Amazon Macie](https://docs.aws.amazon.com/macie) * [Amazon Machine
Learning](https://docs.aws.amazon.com/machine-learning)
* [Amazon MQ](https://docs.aws.amazon.com/amazon-mq) * [Amazon MSK](https://docs.aws.amazon.com/msk)
* [Amazon MSK](https://docs.aws.amazon.com/msk) * [Amazon Neptune](https://docs.aws.amazon.com/neptune)
* [AWS Network Manager](https://docs.aws.amazon.com/vpc/latest/tgw/what-is-network-manager.html)
* [AWS OpsWorks](https://docs.aws.amazon.com/opsworks) * [AWS OpsWorks CM](https://docs.aws.amazon.com/opsworks)
* [AWS Organizations](https://docs.aws.amazon.com/organizations) * [Amazon Pinpoint](https://docs.aws.amazon.com/pinpoint)
* [Amazon Quantum Ledger Database (QLDB)](https://docs.aws.amazon.com/qldb)
* [Amazon RDS](https://docs.aws.amazon.com/rds) * [Amazon Redshift](https://docs.aws.amazon.com/redshift)
* [AWS Resource Access Manager](https://docs.aws.amazon.com/ram) * [AWS Resource Groups](https://docs.aws.amazon.com/ARG)
* [AWS RoboMaker](https://docs.aws.amazon.com/robomaker) * [Amazon Route 53](https://docs.aws.amazon.com/route53)
* [Amazon Route 53 Resolver](https://docs.aws.amazon.com/route53) * [Amazon S3 (buckets only)](https://docs.aws.amazon.com/s3)
* [Amazon SageMaker](https://docs.aws.amazon.com/sagemaker) * [Savings Plans](https://docs.aws.amazon.com/savingsplans)
* [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager) * [AWS Security Hub](https://docs.aws.amazon.com/securityhub)
* [AWS Service Catalog](https://docs.aws.amazon.com/servicecatalog) * [Amazon Simple Email Service
(SES)](https://docs.aws.amazon.com/ses)
* [Amazon Simple Notification Service (SNS)](https://docs.aws.amazon.com/sns)
* [Amazon Simple Queue Service (SQS)](https://docs.aws.amazon.com/sqs)
* [Amazon Simple Workflow Service](https://docs.aws.amazon.com/swf) * [AWS Step Functions](https://docs.aws.amazon.com/step-functions)
* [AWS Storage Gateway](https://docs.aws.amazon.com/storagegateway) * [AWS Systems Manager](https://docs.aws.amazon.com/systems-manager)
* [AWS Transfer for SFTP](https://docs.aws.amazon.com/transfer) * [Amazon VPC](https://docs.aws.amazon.com/vpc)
* [AWS WAF](https://docs.aws.amazon.com/waf) * [AWS WAF Regional](https://docs.aws.amazon.com/waf)
* [Amazon WorkLink](https://docs.aws.amazon.com/worklink) * [Amazon WorkSpaces](https://docs.aws.amazon.com/workspaces)
"""
@doc """
Describes the status of the `StartReportCreation` operation.
You can call this operation only from the organization's master account and from
the us-east-1 Region.
"""
def describe_report_creation(client, input, options \\ []) do
request(client, "DescribeReportCreation", input, options)
end
@doc """
Returns a table that shows counts of resources that are noncompliant with their
tag policies.
For more information on tag policies, see [Tag Policies](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html)
in the *AWS Organizations User Guide.*
You can call this operation only from the organization's master account and from
the us-east-1 Region.
"""
def get_compliance_summary(client, input, options \\ []) do
request(client, "GetComplianceSummary", input, options)
end
@doc """
Returns all the tagged or previously tagged resources that are located in the
specified Region for the AWS account.
Depending on what information you want returned, you can also specify the
following:
* *Filters* that specify what tags and resource types you want
returned. The response includes all tags that are associated with the requested
resources.
* Information about compliance with the account's effective tag
policy. For more information on tag policies, see [Tag Policies](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html)
in the *AWS Organizations User Guide.*
You can check the `PaginationToken` response parameter to determine if a query
is complete. Queries occasionally return fewer results on a page than allowed.
The `PaginationToken` response parameter value is `null` *only* when there are
no more results to display.
"""
def get_resources(client, input, options \\ []) do
request(client, "GetResources", input, options)
end
@doc """
Returns all tag keys in the specified Region for the AWS account.
"""
def get_tag_keys(client, input, options \\ []) do
request(client, "GetTagKeys", input, options)
end
@doc """
Returns all tag values for the specified key in the specified Region for the AWS
account.
"""
def get_tag_values(client, input, options \\ []) do
request(client, "GetTagValues", input, options)
end
@doc """
Generates a report that lists all tagged resources in accounts across your
organization and tells whether each resource is compliant with the effective tag
policy.
Compliance data is refreshed daily.
The generated report is saved to the following location:
`s3://example-bucket/AwsTagPolicies/o-exampleorgid/YYYY-MM-ddTHH:mm:ssZ/report.csv`
You can call this operation only from the organization's master account and from
the us-east-1 Region.
"""
def start_report_creation(client, input, options \\ []) do
request(client, "StartReportCreation", input, options)
end
@doc """
Applies one or more tags to the specified resources.
Note the following:
* Not all resources can have tags. For a list of services that
support tagging, see [this list](http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html).
* Each resource can have up to 50 tags. For other limits, see [Tag Naming and Usage
Conventions](http://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions)
in the *AWS General Reference.*
* You can only tag resources that are located in the specified
Region for the AWS account.
* To add tags to a resource, you need the necessary permissions for
the service that the resource belongs to as well as permissions for adding tags.
For more information, see [this list](http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html).
Do not store personally identifiable information (PII) or other confidential or
sensitive information in tags. We use tags to provide you with billing and
administration services. Tags are not intended to be used for private or
sensitive data.
"""
def tag_resources(client, input, options \\ []) do
request(client, "TagResources", input, options)
end
@doc """
Removes the specified tags from the specified resources.
When you specify a tag key, the action removes both that key and its associated
value. The operation succeeds even if you attempt to remove tags from a resource
that were already removed. Note the following:
* To remove tags from a resource, you need the necessary permissions
for the service that the resource belongs to as well as permissions for removing
tags. For more information, see [this list](http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html).
* You can only tag resources that are located in the specified
Region for the AWS account.
"""
def untag_resources(client, input, options \\ []) do
request(client, "UntagResources", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "tagging"}
host = build_host("tagging", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "ResourceGroupsTaggingAPI_20170126.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/resource_groups_tagging_api.ex
| 0.872768 | 0.621081 |
resource_groups_tagging_api.ex
|
starcoder
|
defmodule Day10 do
@moduledoc """
AoC 2019, Day 10 - Monitoring Station
"""
defmodule SpaceMap do
defstruct text: "", asteroids: %{}, rows: 1, cols: 1
def print(map), do: IO.puts "#{map.text}"
def print_neighbors(map) do
for c <- 0..map.cols-1 do
for r <- 0..map.rows-1 do
IO.write("#{Map.get(map.asteroids, {r, c}, '.')}")
end
IO.write("\n")
end
end
end
alias Day10.SpaceMap
@doc """
How many asteroids can be detected from the best position?
"""
def part1 do
Util.priv_file(:day10, "day10_input.txt")
|> File.read!()
|> parse()
|> find_neighbors()
|> best()
end
@doc """
Checksum of the 200th asteroid vaporized
"""
def part2 do
Util.priv_file(:day10, "day10_input.txt")
|> File.read!()
|> parse()
|> vaporize_checksum(200)
end
@doc """
Compute the checksum of the nth vaporized asteroid
"""
def vaporize_checksum(map, cnt) do
counts = find_neighbors(map)
{best, _cnt} = Map.to_list(counts.asteroids)
|> Enum.sort_by(fn {_pos, cnt} -> cnt end, &>=/2)
|> hd()
vaporize_checksum(map, best, cnt)
end
def vaporize_checksum(map, pos, cnt) do
asteroids = Map.keys(map.asteroids)
{px, py} = pos
{x, y} = neighbors(pos, asteroids, [])
|> Enum.map(&map_quad/1)
|> Enum.map(fn {quad, slope, loc={ax, ay}} ->
{quad, slope, abs(px - ax) + abs(py - ay), loc}
end)
|> Enum.sort_by(fn {q, s, d, _l} -> {q, s, d} end)
|> Enum.chunk_by(fn {q, s, _d, _l} -> {q, s} end)
|> vaporize(cnt)
(100*x)+y
end
def vaporize([], _cnt), do: IO.puts "Empty list..."
def vaporize(_lst, 0), do: IO.puts "Reached 0..."
def vaporize([{_q, _s, _d, loc} | _rest], 1), do: loc
def vaporize([[{_q, _s, _d, loc} | _rest] | _rest2], 1), do: loc
def vaporize([[_l] | rest], cnt), do: vaporize(rest, cnt-1)
def vaporize([[_l | rest] | rest2], cnt), do: vaporize(rest2 ++ rest, cnt-1)
def vaporize([_l | rest], cnt), do: vaporize(rest, cnt-1)
defp map_quad({true, false, slope, pos}), do: {0, slope, pos}
defp map_quad({true, true, slope, pos}), do: {3, slope, pos}
defp map_quad({false, true, slope, pos}), do: {2, slope, pos}
defp map_quad({false, false, slope, pos}), do: {1, slope, pos}
def neighbors(_k, [], acc), do: acc
def neighbors(k, [k | rest], acc), do: neighbors(k, rest, acc)
def neighbors(k = {c, kr}, [a = {c, hr} | rest], acc) when hr <= kr, do: neighbors(k, rest, [{true, false, -100, a} | acc])
def neighbors(k = {c, _kr}, [a = {c, _hr} | rest], acc), do: neighbors(k, rest, [{false, false, 100, a} | acc])
def neighbors(k = {kc, kr}, [a = {hc, hr} | rest], acc) do
slope = (hr - kr)/(hc - kc)
neighbors(k, rest, [{hr<=kr, hc<=kc, slope, a} | acc])
end
@doc """
Return the number visible at the best position
"""
def best(map) do
Enum.max(Map.values(map.asteroids))
end
@doc """
Compute the visible neighbors for all asteroids
"""
def find_neighbors(map) do
keys = Map.keys(map.asteroids)
new_neighbors = Enum.reduce(keys, %{}, fn (k, acc) -> Map.put(acc, k, neighbor_count(k, keys)) end)
%SpaceMap{map | asteroids: new_neighbors}
end
def neighbor_count(k, all), do: neighbor_count(k, all, [])
def neighbor_count(_k, [], acc), do: Enum.uniq(acc) |> Enum.count()
def neighbor_count(k, [k | rest], acc), do: neighbor_count(k, rest, acc)
def neighbor_count(k = {c, kr}, [{c, hr} | rest], acc) when hr < kr, do: neighbor_count(k, rest, ["N" | acc])
def neighbor_count(k = {c, _kr}, [{c, _hr} | rest], acc), do: neighbor_count(k, rest, ["S" | acc])
def neighbor_count(k = {kc, kr}, [{hc, hr} | rest], acc) do
slope = (hr - kr)/(hc - kc)
neighbor_count(k, rest, [{hr<kr, hc<kc, slope} | acc])
end
@doc """
Parse an asteroid map
"""
def parse(str) do
data =
String.split(str, "\n", trim: true)
|> Enum.map(&String.to_charlist/1)
|> Enum.map(&Enum.with_index/1)
asteroids = Enum.with_index(data)
|> Enum.map(&filter_row/1)
|> List.flatten()
|> Enum.into(%{}, fn x -> {x, 0} end)
%SpaceMap{text: str, asteroids: asteroids, rows: Enum.count(data), cols: hd(data) |> Enum.count()}
end
defp filter_row({l, row}) do
Enum.reverse(l)
|> Enum.reduce([],
fn ({val, col}, acc) ->
if val == ?# do
[{col, row} | acc]
else
acc
end
end)
end
end
|
apps/day10/lib/day10.ex
| 0.800341 | 0.536738 |
day10.ex
|
starcoder
|
defmodule ExPixBRCode.Payments.Models.DynamicImmediatePixPayment do
@moduledoc """
A dynamic immediate Pix payment.
This payment structure is the result of loading it from a Pix endpoint.
"""
use ExPixBRCode.ValueObject
alias ExPixBRCode.Changesets
@required [:revisao, :chave, :txid, :status]
@optional [:solicitacaoPagador]
@calendario_required [:criacao, :apresentacao]
@calendario_optional [:expiracao]
@valor_required [:original]
@valor_optional [:modalidadeAlteracao]
@saque_required [:valor, :prestadorDoServicoDeSaque, :modalidadeAgente]
@saque_optional [:modalidadeAlteracao]
@troco_required [:valor, :prestadorDoServicoDeSaque, :modalidadeAgente]
@troco_optional [:modalidadeAlteracao]
embedded_schema do
field :revisao, :integer
field :chave, :string
field :txid, :string
field :status, Ecto.Enum,
values: ~w(ATIVA CONCLUIDA REMOVIDA_PELO_USUARIO_RECEBEDOR REMOVIDA_PELO_PSP)a
field :solicitacaoPagador, :string
embeds_one :calendario, Calendario, primary_key: false do
field :criacao, :utc_datetime
field :apresentacao, :utc_datetime
field :expiracao, :integer, default: 86_400
end
embeds_one :devedor, Devedor, primary_key: false do
field :cpf, :string
field :cnpj, :string
field :nome, :string
end
embeds_one :valor, Valor, primary_key: false do
field :original, :decimal
field :modalidadeAlteracao, :integer, default: 0
embeds_one :retirada, Retirada, primary_key: false do
embeds_one :saque, Saque, primary_key: false do
field :valor, :decimal
field :modalidadeAlteracao, :integer, default: 0
field :prestadorDoServicoDeSaque, :string
field :modalidadeAgente, :string
end
embeds_one :troco, Troco, primary_key: false do
field :valor, :decimal
field :modalidadeAlteracao, :integer, default: 0
field :prestadorDoServicoDeSaque, :string
field :modalidadeAgente, :string
end
end
end
embeds_many :infoAdicionais, InfoAdicionais, primary_key: false do
field :nome, :string
field :valor, :string
end
end
@doc false
def changeset(model \\ %__MODULE__{}, params) do
model
|> cast(coalesce_params(params), @required ++ @optional)
|> validate_required(@required)
|> cast_embed(:calendario, with: &calendario_changeset/2, required: true)
|> cast_embed(:devedor, with: &devedor_changeset/2)
|> cast_embed(:valor, with: &valor_changeset/2, required: true)
|> cast_embed(:infoAdicionais, with: &info_adicionais_changeset/2)
|> validate_number(:revisao, greater_than_or_equal_to: 0)
|> validate_length(:txid, max: 35)
|> validate_length(:solicitacaoPagador, max: 140)
end
defp coalesce_params(%{"infoAdicionais" => nil} = params),
do: Map.put(params, "infoAdicionais", [])
defp coalesce_params(%{infoAdicionais: nil} = params), do: Map.put(params, :infoAdicionais, [])
defp coalesce_params(params), do: params
defp calendario_changeset(model, params) do
model
|> cast(params, @calendario_required ++ @calendario_optional)
|> validate_required(@calendario_required)
end
defp devedor_changeset(model, params) do
model
|> cast(params, [:nome, :cpf, :cnpj])
|> validate_either_cpf_or_cnpj()
end
defp validate_either_cpf_or_cnpj(%{valid?: false} = c), do: c
defp validate_either_cpf_or_cnpj(changeset) do
cpf = get_field(changeset, :cpf)
cnpj = get_field(changeset, :cnpj)
name = get_field(changeset, :nome)
cond do
not is_nil(cpf) and not is_nil(cnpj) ->
add_error(changeset, :devedor, "only one of cpf or cnpj must be present")
(not is_nil(cpf) or not is_nil(cnpj)) and is_nil(name) ->
add_error(changeset, :devedor, "when either cpf or cnpj is present so must be 'nome'")
not is_nil(cpf) ->
Changesets.validate_document(changeset, :cpf)
true ->
Changesets.validate_document(changeset, :cnpj)
end
end
defp valor_changeset(model, params) do
model
|> cast(params, @valor_required ++ @valor_optional)
|> validate_required(@valor_required)
|> validate_inclusion(:modalidadeAlteracao, [0, 1])
|> cast_embed(:retirada, with: &retirada_changeset/2)
|> validate_valor_original()
|> validate_either_saque_or_troco()
end
defp retirada_changeset(model, params) do
model
|> cast(params, [])
|> cast_embed(:saque, with: &saque_changeset/2)
|> cast_embed(:troco, with: &troco_changeset/2)
end
defp saque_changeset(model, params) do
model
|> cast(params, @saque_required ++ @saque_optional)
|> validate_required(@saque_required)
|> validate_inclusion(:modalidadeAlteracao, [0, 1])
|> validate_valor()
|> validate_length(:prestadorDoServicoDeSaque, is: 8)
|> validate_format(:prestadorDoServicoDeSaque, ~r/^[[:digit:]]+$/)
|> validate_inclusion(:modalidadeAgente, ["AGTEC", "AGTOT", "AGPSS"])
end
defp troco_changeset(model, params) do
model
|> cast(params, @troco_required ++ @troco_optional)
|> validate_required(@troco_required)
|> validate_inclusion(:modalidadeAlteracao, [0, 1])
|> validate_valor()
|> validate_length(:prestadorDoServicoDeSaque, is: 8)
|> validate_format(:prestadorDoServicoDeSaque, ~r/^[[:digit:]]+$/)
|> validate_inclusion(:modalidadeAgente, ["AGTEC"])
end
defp validate_valor(changeset) do
modalidade_alteracao = get_field(changeset, :modalidadeAlteracao)
cond do
modalidade_alteracao == 0 ->
validate_number(changeset, :valor, greater_than: 0)
modalidade_alteracao == 1 ->
validate_number(changeset, :valor, greater_than_or_equal_to: 0)
end
end
defp validate_valor_original(%{changes: %{retirada: _saque_or_troco}} = changeset) do
modalidade_alteracao = get_field(changeset, :modalidadeAlteracao)
retirada = get_field(changeset, :retirada)
saque = retirada.saque
troco = retirada.troco
cond do
is_nil(saque) and is_nil(troco) and modalidade_alteracao == 0 ->
validate_number(changeset, :original, greater_than: 0)
is_nil(saque) and is_nil(troco) and modalidade_alteracao == 1 ->
validate_number(changeset, :original, greater_than_or_equal_to: 0)
not is_nil(saque) and is_nil(troco) ->
validate_number(changeset, :original, equal_to: 0)
is_nil(saque) and not is_nil(troco) ->
validate_number(changeset, :original, greater_than: 0)
true ->
changeset
end
end
defp validate_valor_original(changeset) do
modalidade_alteracao = get_field(changeset, :modalidadeAlteracao)
cond do
modalidade_alteracao == 0 ->
validate_number(changeset, :original, greater_than: 0)
modalidade_alteracao == 1 ->
validate_number(changeset, :original, greater_than_or_equal_to: 0)
end
end
defp validate_either_saque_or_troco(%{changes: %{retirada: _saque_or_troco}} = changeset) do
modalidade_alteracao = get_field(changeset, :modalidadeAlteracao)
retirada = get_field(changeset, :retirada)
saque = retirada.saque
troco = retirada.troco
cond do
not is_nil(saque) and not is_nil(troco) ->
add_error(
changeset,
:retirada,
"only one of withdrawal or payment with change must be present"
)
modalidade_alteracao == 1 ->
add_error(
changeset,
:modalidadeAlteracao,
"must be 0 when it is withdrawal or payment with change"
)
true ->
changeset
end
end
defp validate_either_saque_or_troco(changeset), do: changeset
defp info_adicionais_changeset(model, params) do
model
|> cast(params, [:nome, :valor])
|> validate_required([:nome, :valor])
end
end
|
lib/ex_pix_brcode/payments/models/dynamic_immediate_pix_payment.ex
| 0.708011 | 0.424949 |
dynamic_immediate_pix_payment.ex
|
starcoder
|
defmodule AzureFunctionsBase.Logger do
@moduledoc """
A logger for Azure Functions.
## Levels
The supported levels, ordered by precedence, are:
- `:debug` - for debug-related messages
- `:info` - for information of any kind
- `:warn` - for warnings
- `:error` - for errors
For example, `:info` takes precedence over `:debug`. If your log level is set to `:info`, `:info`, `:warn`, and `:error` will be printed to the console. If your log level is set to `:warn`, only `:warn` and `:error` will be printed.
## Setting
Set Log level to `environment` -> `LOG_LEVEL`
"""
use Agent
@type level() :: :error | :info | :warn | :debug
@type on_start() :: {:ok, pid()} | {:error, {:already_started, pid()} | term()}
@doc """
Start Logger.
`log_level` must be in `[:debug, :info, :warn, :error]`
"""
@spec start_link(level()) :: on_start()
def start_link(log_level \\ :info) do
Agent.start_link(fn -> %{log_level: log_level, logs: []} end, name: __MODULE__)
end
@doc """
Log Debug.
"""
def debug(message) do
if log?(:debug) do
log("#{timestamp()} [DEBUG] #{message |> log_message}")
end
message
end
@doc """
Log Information.
"""
def info(message) do
if log?(:info) do
log("#{timestamp()} [INFO] #{message |> log_message}")
end
message
end
@doc """
Log Warning.
"""
def warn(message) do
if log?(:warn) do
log("#{timestamp()} [WARN] #{message |> log_message}")
end
message
end
@doc """
Log Error.
"""
def error(message) do
if log?(:error) do
log("#{timestamp()} [ERROR] #{message |> log_message}")
end
message
end
@doc """
Get logs.
"""
def logs() do
Agent.get_and_update(__MODULE__, fn state -> {state, %{state | logs: []}} end).logs
end
defp timestamp do
DateTime.utc_now |> to_string
end
defp log_message(message) when is_binary(message), do: message
defp log_message(message), do: inspect(message)
defp log(message) do
Agent.update(__MODULE__, fn %{logs: logs} = state -> %{state | logs: logs ++ [message]} end)
message
end
defp log_level do
Agent.get(__MODULE__, & &1).log_level
end
defp log?(level) do
level |> log?(log_level())
end
defp log?(:debug, log_level) do
case log_level do
:debug -> true
_ -> false
end
end
defp log?(:info, log_level) do
case log_level do
:debug -> true
:info -> true
_ -> false
end
end
defp log?(:warn, log_level) do
case log_level do
:debug -> true
:info -> true
:warn -> true
_ -> false
end
end
defp log?(:error, log_level) do
case log_level do
:debug -> true
:info -> true
:warn -> true
:error -> true
_ -> false
end
end
end
|
lib/azure_functions_base/logger.ex
| 0.832169 | 0.455744 |
logger.ex
|
starcoder
|
defmodule TextDelta.Delta do
# Deprecated and to be removed in 2.0
@moduledoc false
alias TextDelta.Delta.{Transformation, Composition}
@doc false
def new(ops \\ []) do
ops
|> TextDelta.new()
|> unwrap()
end
@doc false
def insert(delta, el, attrs \\ %{}) do
delta
|> wrap()
|> TextDelta.insert(el, attrs)
|> unwrap()
end
@doc false
def retain(delta, len, attrs \\ %{}) do
delta
|> wrap()
|> TextDelta.retain(len, attrs)
|> unwrap()
end
@doc false
def delete(delta, len) do
delta
|> wrap()
|> TextDelta.delete(len)
|> unwrap()
end
@doc false
def append(nil, op), do: append(new(), op)
def append(delta, op) do
delta
|> wrap()
|> TextDelta.append(op)
|> unwrap()
end
defdelegate compose(delta_a, delta_b), to: Composition
defdelegate transform(delta_a, delta_b, priority), to: Transformation
@doc false
def trim(delta) do
delta
|> wrap()
|> TextDelta.trim()
|> unwrap()
end
@doc false
def length(delta, included_ops \\ [:insert, :retain, :delete]) do
delta
|> wrap()
|> TextDelta.length(included_ops)
end
@doc false
def wrap(ops), do: TextDelta.new(ops)
@doc false
def unwrap(delta), do: TextDelta.operations(delta)
end
defmodule TextDelta.Delta.Composition do
# Deprecated and to be removed in 2.0
@moduledoc false
alias TextDelta.Delta
@doc false
def compose(delta_a, delta_b) do
delta_a
|> Delta.wrap()
|> TextDelta.compose(Delta.wrap(delta_b))
|> Delta.unwrap()
end
end
defmodule TextDelta.Delta.Transformation do
# Deprecated and to be removed in 2.0
@moduledoc false
alias TextDelta.Delta
@doc false
def transform(delta_a, delta_b, priority) do
delta_a
|> Delta.wrap()
|> TextDelta.transform(Delta.wrap(delta_b), priority)
|> Delta.unwrap()
end
end
defmodule TextDelta.Delta.Iterator do
# Deprecated and to be removed in 2.0
@moduledoc false
defdelegate next(deltas, skip_type \\ nil), to: TextDelta.Iterator
end
|
lib/text_delta/backwards_compatibility_with_1.0.ex
| 0.587825 | 0.486819 |
backwards_compatibility_with_1.0.ex
|
starcoder
|
defmodule ReadDoc.StateMachine do
use ReadDoc.Types
alias ReadDoc.Options
alias ReadDoc.Message
alias ReadDoc.StateMachine.Result
alias ReadDoc.StateMachine.State
import ReadDoc.DocExtractor, only: [extract_doc: 1]
@type result_t() :: Result.result_tuple()
@spec run!(list(String.t()), Options.t(), String.t()) :: list(String.t())
def run!(lines, options, file) do
with {output, messages} <- run(lines, options) do
if !options.silent do
Message.emit_messages(messages, file)
end
output
end
end
@spec run(list(String.t()), Options.t()) :: result_t()
def run(lines, options) do
lines
|> Stream.zip(Stream.iterate(1, &(&1 + 1)))
|> Enum.to_list()
|> state_machine(%Result{}, %State{options: options})
end
@spec state_machine(numbered_lines(), Result.t(), State.t()) :: result_t()
defp state_machine([], result, state), do: _state_machine([], result, state)
defp state_machine(lines = [_l | _], result, state) do
# IO.inspect({state.opendoc[:for], l})
_state_machine(lines, result, state)
end
@copy_state %{state: :copy}
@spec _state_machine(numbered_lines(), Result.t(), State.t()) :: result_t()
defp _state_machine(lines, result, state)
defp _state_machine([], result, @copy_state), do: Result.finalize(result)
defp _state_machine([], result, state) do
result_prime =
Result.add_warning(
result,
"end @doc for #{State.format_opendoc(state)} missing",
State.opened_at(state)
)
case extract_doc(State.current_open(state)) do
nil ->
Result.add_warning(result_prime, "end @doc missing for #{State.format_opendoc(state)}")
doc ->
Result.add_lines(result_prime, doc)
end
|> Result.finalize()
end
defp _state_machine([line | rest], result, state = @copy_state) do
case begin_doc_match(line, state) do
nil ->
substate_inside_copy(line, rest, result, state)
[_, opendoc] ->
state_machine(
rest,
Result.add_numbered_line(result, line),
State.open(state, opendoc, result)
)
end
end
defp _state_machine(
[line | rest],
result,
state = %{state: :remove_old, opendoc: %{for: opendoc}}
) do
case end_doc_match(line, state) do
nil -> substate_inside_remove(line, rest, result, state)
[_, ^opendoc] -> substate_replace_doc(line, rest, result, state)
[_, opendoc_prime] -> substate_ignore_illegal_close(rest, opendoc_prime, result, state)
end
end
@spec substate_ignore_illegal_close(numbered_lines(), String.t(), Result.t(), State.t()) ::
result_t
defp substate_ignore_illegal_close(rest, opendoc_prime, result, state) do
result_prime =
Result.add_warning(
result,
"ignoring end @doc of #{State.format_opendoc(opendoc_prime, Result.next_lnb(result))} as we are inside a @doc block for #{
State.format_opendoc(state)
}"
)
state_machine(rest, result_prime, state)
end
@spec substate_ignore_illegal_open(
numbered_line(),
numbered_lines(),
String.t(),
Result.t(),
State.t()
) :: result_t()
defp substate_ignore_illegal_open(line, rest, opendoc_prime, result, state) do
result_prime =
Result.add_warning(
result,
"ignoring begin @doc of #{State.format_opendoc(opendoc_prime, Result.next_lnb(result))} as we are inside a @doc block for #{
State.format_opendoc(state)
}"
)
state_machine(rest, Result.add_numbered_line(result_prime, line), state)
end
# CHECK
@spec substate_illegal_close_in_copy(
numbered_line(),
numbered_lines(),
String.t(),
Result.t(),
State.t()
) :: result_t()
defp substate_illegal_close_in_copy(
line,
rest,
closedoc,
result,
state = %{options: %{fix_errors: fix_errors}}
) do
result_prime =
Result.add_warning(
result,
"ignoring end @doc of #{State.format_opendoc(closedoc, Result.next_lnb(result))} as we are not inside a @doc block"
)
state_machine(rest, Result.add_numbered_line_unless(result_prime, line, fix_errors), state)
end
# CHECK
@spec substate_inside_copy(numbered_line(), numbered_lines(), Result.t(), State.t()) ::
result_t()
defp substate_inside_copy(line, rest, result, state) do
case end_doc_match(line, state) do
nil -> state_machine(rest, Result.add_numbered_line(result, line), state)
[_, closedoc] -> substate_illegal_close_in_copy(line, rest, closedoc, result, state)
end
end
@spec substate_inside_remove(numbered_line(), numbered_lines(), Result.t(), State.t()) ::
result_t()
defp substate_inside_remove(line, rest, result, state) do
case begin_doc_match(line, state) do
nil -> state_machine(rest, result, state)
[_, opendoc_prime] -> substate_ignore_illegal_open(line, rest, opendoc_prime, result, state)
end
end
@spec substate_replace_doc(numbered_line(), numbered_lines(), Result.t(), State.t()) ::
result_t()
defp substate_replace_doc(line, rest, result, state = %{opendoc: %{for: for}}) do
copy_state = %{state | state: :copy}
case extract_doc(for) do
nil ->
state_machine(
rest,
Result.add_warning(result, "doc not found for #{State.format_opendoc(state)}")
|> Result.add_numbered_line(line),
copy_state
)
doc ->
state_machine(rest, add_docs_with_line(result, doc, line), copy_state)
end
end
# ------------------------------------
# Helpers
# ------------------------------------
@spec add_docs_with_line(Result.t(), String.t(), numbered_line()) :: Result.t()
defp add_docs_with_line(result, docs, line) do
result
|> Result.add_lines(docs)
|> Result.add_numbered_line(line)
end
# CHECK
@spec begin_doc_match(numbered_line(), State.t()) :: rgx_run_result
defp begin_doc_match({line, _}, state) do
Regex.run(state.options.begin_rgx, line)
end
# CHECK
@spec end_doc_match(numbered_line(), State.t()) :: rgx_run_result
defp end_doc_match({line, _}, state) do
Regex.run(state.options.end_rgx, line)
end
end
|
lib/read_doc/state_machine.ex
| 0.705278 | 0.532547 |
state_machine.ex
|
starcoder
|
defmodule Base62 do
use CustomBase, '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
@moduledoc """
This module provides data encoding and decoding functions for base62 alphabet:
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| 0| 16| G| 32| W| 48| m|
| 1| 1| 17| H| 33| X| 49| n|
| 2| 2| 18| I| 34| Y| 50| o|
| 3| 3| 19| J| 35| Z| 51| p|
| 4| 4| 20| K| 36| a| 52| q|
| 5| 5| 21| L| 37| b| 53| r|
| 6| 6| 22| M| 38| c| 54| s|
| 7| 7| 23| N| 39| d| 55| t|
| 8| 8| 24| O| 40| e| 56| u|
| 9| 9| 25| P| 41| f| 57| v|
| 10| A| 26| Q| 42| g| 58| w|
| 11| B| 27| R| 43| h| 59| x|
| 12| C| 28| S| 44| i| 60| y|
| 13| D| 29| T| 45| j| 61| z|
| 14| E| 30| U| 46| k| | |
| 15| F| 31| V| 47| l| | |
"""
@doc """
Encode base 10 integer to base 62 char from alphabet above.
"""
def encode(integer)
@doc """
Decode base 62 char from alphabet above to base 10 integer.
Returns tuple `{:ok, number}` if binary can be converted using alphabet,
`:error` instead.
"""
def decode(binary)
@doc """
Same as `decode/1` except this function return just number or raise error if
binary can not be converted.
"""
def decode!(binary)
end
|
lib/base62.ex
| 0.819857 | 0.510802 |
base62.ex
|
starcoder
|
defmodule CCSP.Chapter4.Dijkstra do
alias __MODULE__, as: T
alias CCSP.Chapter4.DijkstraNode
alias CCSP.Chapter4.WeightedGraph
alias CCSP.Chapter4.WeightedEdge
alias CCSP.Chapter2.PriorityQueue
@moduledoc """
Corresponds to CCSP in Python, Chapter 4, titled "Graph Problems"
"""
@type a :: any
@type weighted_path :: list(WeightedEdge.t())
@type t :: __MODULE__.t()
@spec dijkstra(WeightedGraph.t(), a) :: {list(integer | nil), %{integer => WeightedEdge.t()}}
def dijkstra(wg, root) do
first = WeightedGraph.index_of(wg, root)
distances = List.duplicate(nil, WeightedGraph.vertex_count(wg))
distances = List.update_at(distances, first, fn _ -> 0 end)
path = %{}
pq = PriorityQueue.new()
pq = PriorityQueue.push(pq, DijkstraNode.new(first, 0))
{distances, path} = dijkstra_helper(wg, pq, distances, path)
{distances, path}
end
@spec dijkstra_helper(
WeightedGraph.t(),
PriorityQueue.t(WeightedEdge.t()),
list(integer | nil),
%{
integer => WeightedEdge.t()
}
) :: {list(integer | nil), %{integer => WeightedEdge.t()}}
def dijkstra_helper(wg, pq, distances, path) do
if PriorityQueue.empty?(pq) do
{distances, path}
else
{node, pq} = PriorityQueue.pop(pq)
u = node.vertex
dist_u = Enum.at(distances, u)
{pq, distances, path} =
Enum.reduce(
WeightedGraph.edges_for_index(wg, u),
{pq, distances, path},
fn we, {pq, distances, path} ->
dist_v = Enum.at(distances, we.v)
if dist_v == nil or dist_v > we.weight + dist_u do
distances = List.update_at(distances, we.v, fn _ -> we.weight + dist_u end)
path = Map.put(path, we.v, we)
pq = PriorityQueue.push(pq, DijkstraNode.new(we.v, we.weight + dist_u))
{pq, distances, path}
else
{pq, distances, path}
end
end
)
dijkstra_helper(wg, pq, distances, path)
end
end
@spec distance_array_to_vertex_dict(
WeightedGraph.t(),
list(integer | nil)
) :: %{
a => integer | nil
}
def distance_array_to_vertex_dict(wg, distances) do
Enum.reduce(0..(length(distances) - 1), %{}, fn i, distance_dict ->
vertex = WeightedGraph.vertex_at(wg, i)
Map.put(distance_dict, vertex, Enum.at(distances, i))
end)
end
@spec path_dict_to_path(integer, integer, %{integer => WeightedEdge.t()}) :: weighted_path
def path_dict_to_path(_, _, path_dict) when %{} == path_dict do
[]
end
def path_dict_to_path(start, goal, path_dict) do
edge_path = []
e = Map.get(path_dict, goal)
edge_path = [e | edge_path]
path_dict_to_path_helper(path_dict, edge_path, e, start)
end
@spec path_dict_to_path_helper(
%{integer => WeightedEdge.t()},
list(WeightedEdge.t()),
WeightedEdge.t(),
integer
) :: list(WeightedEdge.t())
defp path_dict_to_path_helper(path_dict, edge_path, e, start) do
if e.u == start do
edge_path
else
e = Map.get(path_dict, e.u)
edge_path = [e | edge_path]
path_dict_to_path_helper(path_dict, edge_path, e, start)
end
end
end
|
lib/ccsp/chapter4/dijkstra.ex
| 0.75037 | 0.602237 |
dijkstra.ex
|
starcoder
|
defmodule Wallaby.Element do
@moduledoc """
Defines an Element Struct and interactions with Elements.
Typically these functions are used in conjunction with a `find`:
```
page
|> find(Query.css(".some-element"), fn(element) -> Element.click(element) end)
```
These functions can be used to create new actions specific to your application:
```
def create_todo(todo_field, todo_text) do
todo_field
|> Element.click()
|> Element.fill_in(with: todo_text)
|> Element.send_keys([:enter])
end
```
## Retrying
Unlike `Browser` the actions in `Element` do not retry if the element becomes stale. Instead an exception will be raised.
"""
alias Wallaby.Phantom.Driver
defstruct [:url, :session_url, :parent, :id, screenshots: []]
@opaque value :: String.t | number()
@type attr :: String.t
@type t :: %__MODULE__{
session_url: String.t,
url: String.t,
id: String.t,
screenshots: list,
}
@doc """
Clears any value set in the element.
"""
@spec clear(t) :: t
def clear(element) do
case Driver.clear(element) do
{:ok, _} ->
element
{:error, _} ->
raise Wallaby.StaleReferenceException
end
end
@doc """
Fills in the element with the specified value.
"""
@spec fill_in(t, with: String.t | number()) :: t
def fill_in(element, with: value) when is_number(value) do
fill_in(element, with: to_string(value))
end
def fill_in(element, with: value) when is_binary(value) do
element
|> clear
|> set_value(value)
end
@doc """
Clicks the element.
"""
@spec click(t) :: t
def click(element) do
case Driver.click(element) do
{:ok, _} ->
element
{:error, _} ->
raise Wallaby.StaleReferenceException
end
end
@doc """
Returns the text from the element.
"""
@spec text(t) :: String.t
def text(element) do
case Driver.text(element) do
{:ok, text} ->
text
{:error, :stale_reference_error} ->
raise Wallaby.StaleReferenceException
end
end
@doc """
Gets the value of the element's attribute.
"""
@spec attr(t, attr()) :: String.t | nil
def attr(element, name) do
case Driver.attribute(element, name) do
{:ok, attribute} ->
attribute
{:error, _} ->
raise Wallaby.StaleReferenceException
end
end
@doc """
Returns a boolean based on whether or not the element is selected.
## Note
This only really makes sense for options, checkboxes, and radi buttons.
Everything else will simply return false because they have no notion of
"selected".
"""
@spec selected?(t) :: boolean()
def selected?(element) do
case Driver.selected(element) do
{:ok, value} ->
value
{:error, _} ->
false
end
end
@doc """
Returns a boolean based on whether or not the element is visible.
"""
@spec visible?(t) :: boolean()
def visible?(element) do
case Driver.displayed(element) do
{:ok, value} ->
value
{:error, _} ->
false
end
end
@doc """
Sets the value of the element.
"""
@spec set_value(t, value()) :: t
def set_value(element, value) do
case Driver.set_value(element, value) do
{:ok, _} ->
element
{:error, :stale_reference_error} ->
raise Wallaby.StaleReferenceException
end
end
@doc """
Sends keys to the element.
"""
@spec send_keys(t, String.t | list(atom | String.t)) :: t
def send_keys(element, text) when is_binary(text) do
send_keys(element, [text])
end
def send_keys(element, keys) when is_list(keys) do
case Driver.send_keys(element, keys) do
{:ok, _} ->
element
{:error, :stale_reference_error} ->
raise Wallaby.StaleReferenceException
end
end
@doc """
Matches the Element's value with the provided value.
"""
@spec value(t) :: String.t
def value(element) do
attr(element, "value")
end
end
|
lib/wallaby/element.ex
| 0.807157 | 0.93784 |
element.ex
|
starcoder
|
defmodule Meeseeks.Context do
@moduledoc """
Context is available to both Meeseek's selection process and each
individual selector, and allows for selectors to build state (or receive
state from the selection mechanism).
The selection process expects an `accumulator`, `return?` boolean, and
`matches` map to exist in the context, and stores selected nodes in the
`accumulator`, stores matching nodes than need to be filtered in the
`matches` map, and halts selection if the `return?` boolean becomes true.
"""
alias Meeseeks.{Accumulator, Document, Error, Selector}
@accumulator :__accumulator__
@return? :"__return?__"
@matches :__matches__
@nodes :__nodes__
@type t :: %{optional(any) => any}
@doc """
Adds keys required by selection process to the context.
Used internally by Meeseeks.Select- users should have no reason to call.
"""
@spec prepare_for_selection(t) :: t
def prepare_for_selection(context) do
context
|> Map.put(@return?, false)
|> Map.put(@matches, %{})
end
@doc """
Adds an accumulator to context, overriding any existing accumulator in
context.
"""
@spec add_accumulator(t, Accumulator.t()) :: t
def add_accumulator(context, acc) do
Map.put(context, @accumulator, acc)
end
@doc """
Ensures that context contains an accumulator, returning context if it does,
or raising an error if it does not.
"""
@spec ensure_accumulator!(t) :: t
def ensure_accumulator!(context) do
case Map.fetch(context, @accumulator) do
{:ok, _} ->
context
:error ->
raise Error.new(:context, :accumulator_required, %{
message: "Context does not contain required accumulator",
context: context
})
end
end
@doc """
Updates the context's accumulator with the result of calling
Accumulator.add on the current accumulator with the provided document and
id, and sets return? to the result of calling Accumulator.complete? on the
updated accumulator if return? was not already true.
"""
@spec add_to_accumulator(t, Document.t(), Document.node_id()) :: t
def add_to_accumulator(%{@accumulator => acc, @return? => ret} = context, document, id) do
acc = Accumulator.add(acc, document, id)
ret = ret or Accumulator.complete?(acc)
%{context | @accumulator => acc, @return? => ret}
end
@doc """
Returns the result of calling Accumulator.return on the context's
accumulator.
"""
@spec return_accumulator(t) :: any
def return_accumulator(%{@accumulator => acc}) do
Accumulator.return(acc)
end
@doc """
Adds a node to a list in the context's matches map corresponding to the
selector that the node matched.
"""
@spec add_to_matches(t, Selector.t(), Document.node_t()) :: t
def add_to_matches(%{@matches => matches} = context, selector, node) do
case Map.fetch(matches, selector) do
{:ok, nodes} -> put_in(context[@matches][selector], [node | nodes])
:error -> put_in(context[@matches][selector], [node])
end
end
@doc """
Clears the context's matches map.
"""
@spec clear_matches(t) :: t
def clear_matches(context) do
Map.put(context, @matches, %{})
end
@doc """
Returns the key under which the accumulator is stored in the context.
"""
@spec accumulator_key() :: atom
def accumulator_key() do
@accumulator
end
@doc """
Returns the key under which return? is stored in the context.
"""
@spec return_key() :: atom
def return_key() do
@return?
end
@doc """
Returns the key under which matching nodes that need to be filtered are
stored in the context.
"""
@spec matches_key() :: atom
def matches_key() do
@matches
end
@doc """
Returns the key under which the nodes currently being walked are stored in
the context.
"""
@spec nodes_key() :: atom
def nodes_key() do
@nodes
end
end
|
lib/meeseeks/context.ex
| 0.86034 | 0.70458 |
context.ex
|
starcoder
|
defmodule RedixPool.Config do
@moduledoc """
## Example Pool Configurations
```
# All pools listed in start_pools will be automatically
# started upon application start. Pools not started here
# can be started by adding RedixPool.redix_pool_spec(pool: pool_name)
# into a supervision tree.
config :redix_pool,
start_pools: [:redix_default]
config :redix_pool, :redix_default,
redis_url: {:system, "DEFAULT_REDIS_URL"},
# https://hexdocs.pm/redix/0.10.2/Redix.html#start_link/1-options
redix_opts: [
sync_connect: true,
sock_opts: [:verify, :verify_none],
],
pool_size: {:system, "DEFAULT_POOL_SIZE", 4}
pool_max_overflow: {:system, "DEFAULT_MAX_OVERFLOW", 8},
timeout: 5000
# A pool named "read". This is also used to compute the process name
config :redix_pool, :sessions_ro,
redis_url: {:system, "SESSION_READ_REDIS_URL"}, # Defaults to redis://localhost:6379/0
redix_opts: [
timeout: 3000,
backoff_initial: 1000,
backoff_max: 10000,
sock_opts: [:verify, :verify_none]
],
pool_size: {:system, "SESSION_READ_POOL_SIZE", 8}
pool_max_overflow: {:system, "SESSION_READ_MAX_OVERFLOW", 16}
"""
@default_redis_url "redis://localhost:6379/0"
@default_pool_size 4
@default_pool_max_overflow 8
@doc "Compute and parse config map by pool name"
def config_map(args) do
pool_name = args[:pool] || raise "Must pass [pool: pool_name]"
config_loc = args[:config_loc] || pool_name
# TODO: Possibly filter this through resolve_config {:system, _}
redis_url = args[:redis_url] || get({config_loc, :redis_url})
redix_opts_from_config = args[:redix_opts] || get({config_loc, :redix_opts}, [])
# TODO: Use separate SSL socket opts when SSL is requested
redix_opts =
@default_redis_url
# Defaults
|> opts_from_uri
# Override from config
|> Keyword.merge(redix_opts_from_config)
# Override from supplied redis uri
|> Keyword.merge(opts_from_uri(redis_url))
# Filter out ssl socket_opts' if not using ssl
|> normalize_redix_opts
pool_size = args[:pool_size] || get({config_loc, :pool_size, :integer}, @default_pool_size)
pool_max_overflow =
args[:pool_max_overflow] ||
get({config_loc, :pool_max_overflow, :integer}, @default_pool_max_overflow)
%{
pool_name: pool_name,
redix_opts: redix_opts,
pool_size: pool_size,
pool_max_overflow: pool_max_overflow
}
end
@doc "Gets the list of pools to start when RedixPool application starts"
def starting_pools, do: Application.get_env(:redix_pool, :start_pools, [])
@doc false
def normalize_redix_opts(opts) do
cond do
opts[:ssl] == true ->
opts
!is_nil(opts[:socket_opts][:verify]) ->
# If we are not using SSL, then drop the verify option, otherwise
# Erlang tcp will fail
Keyword.put(opts, :socket_opts, Keyword.drop(opts[:socket_opts], [:verify]))
true ->
opts
end
end
@doc false
def get({pool_name, key, :integer}, default) do
{pool_name, key}
|> get(default)
|> maybe_to_integer
end
@doc false
def get({pool_name, key}, default) do
:redix_pool
|> Application.get_env(pool_name, %{})
|> Access.get(key)
|> resolve_config(default)
end
@doc false
def get(key, default) when is_atom(key) do
get({:default, key}, default)
end
def get({_pool_name, _key, :integer} = spec), do: get(spec, nil)
def get({_pool_name, _key} = spec), do: get(spec, nil)
@doc false
def get(key) when is_atom(key), do: get(key, nil)
@doc "Helper function useful for parsing ENV variables"
def maybe_to_integer(x) when is_binary(x), do: String.to_integer(x)
def maybe_to_integer(x) when is_integer(x), do: x
def maybe_to_integer(x) when is_nil(x), do: nil
@doc false
def resolve_config({:system, var_name, user_default}, _lib_default),
do: System.get_env(var_name) |> present_or_default(user_default)
def resolve_config({:system, var_name}, default),
do: System.get_env(var_name) |> present_or_default(default)
def resolve_config(value, default) when is_nil(value), do: default
def resolve_config(value, _default), do: value
@doc false
defp present_or_default(x, default) when is_nil(x), do: default
defp present_or_default("", default), do: default
defp present_or_default(x, _default), do: x
@doc false
# Add identity clauses
defp opts_from_uri(nil), do: []
defp opts_from_uri(""), do: []
defp opts_from_uri(uri) when is_binary(uri), do: Redix.URI.opts_from_uri(uri)
end
|
lib/redix_pool/config.ex
| 0.575588 | 0.491883 |
config.ex
|
starcoder
|
defmodule Dictionary.Type.Timestamp do
@moduledoc """
Timestamp type in ISO8601 format.
Timestamp format must be supplied for conversion to ISO8601. `nil` values will
be converted to empty strings regardless of specified string format. Empty
string values are supported as well.
See [Timex](https://hexdocs.pm/timex/Timex.Format.DateTime.Formatters.Strftime.html)
for possible format field values.
Timestamps will be converted to UTC timezone if `timezone` is supplied. If no
`timezone` value is supplied, UTC is assumed.
## Init options
* `format` - Format to parse string into `DateTime`.
* `timezone` - Value's timezone. Defaults to UTC.
"""
use Definition, schema: Dictionary.Type.Timestamp.V1
use JsonSerde, alias: "dictionary_timestamp"
@type t :: %__MODULE__{
version: integer,
name: String.t(),
description: String.t(),
format: String.t(),
timezone: String.t()
}
defstruct version: 1,
name: nil,
description: "",
format: "%FT%T.%f",
timezone: "Etc/UTC"
defimpl Dictionary.Type.Normalizer, for: __MODULE__ do
@tokenizer Timex.Parse.DateTime.Tokenizers.Strftime
@utc "Etc/UTC"
def normalize(_, value) when value in [nil, ""] do
Ok.ok("")
end
def normalize(%{format: format, timezone: timezone}, value) do
with {:ok, date} <- Timex.parse(value, format, @tokenizer) do
date
|> attach_timezone(timezone)
|> Ok.map(&to_utc/1)
|> Ok.map(&NaiveDateTime.to_iso8601/1)
end
end
defp attach_timezone(%NaiveDateTime{} = datetime, timezone) do
DateTime.from_naive(datetime, timezone)
end
defp attach_timezone(datetime, _), do: Ok.ok(datetime)
defp to_utc(%DateTime{} = datetime) do
DateTime.shift_zone(datetime, @utc)
end
defp to_utc(datetime), do: Ok.ok(datetime)
end
end
defmodule Dictionary.Type.Timestamp.V1 do
@moduledoc false
use Definition.Schema
@impl true
def s do
schema(%Dictionary.Type.Timestamp{
version: version(1),
name: lowercase_string(),
description: string(),
format: required_string(),
timezone: required_string()
})
end
end
|
apps/definition_dictionary/lib/dictionary/type/timestamp.ex
| 0.892723 | 0.599485 |
timestamp.ex
|
starcoder
|
defmodule Blockchain do
@moduledoc """
The blockchain
"""
alias Blockchain.Block
alias Blockchain.Extensions.SmartContracts
alias Blockchain.Hash
alias Blockchain.Transaction
alias Blockchain.TransactionIO
alias Blockchain.Wallet
@enforce_keys [:blocks, :utxo]
defstruct @enforce_keys
@typedoc """
Represents a blockchain
"""
@type t :: %__MODULE__{
blocks: [Block.t()],
utxo: MapSet.t(TransactionIO)
}
@doc """
Initialize a blockchain given a genesis transaction and genesis hash
"""
@spec initialize(Transaction.t(), Hash.t()) :: __MODULE__.t()
def initialize(%Transaction{value: value, to: to} = transaction, seed_hash) do
seed_block =
transaction
|> Transaction.process()
|> Block.mine_block(seed_hash)
%__MODULE__{
blocks: [seed_block],
utxo: MapSet.new([TransactionIO.new(value, to)])
}
end
@doc """
Calculates the rewards for the number of blocks
"""
@spec mining_reward_factor([Block.t()]) :: number()
def mining_reward_factor(blocks) do
blocks
|> Enum.count()
|> calculate_reward_factor()
end
@doc """
Helper function to calculate the amount of rewards for mining a given number of blocks
"""
@spec calculate_reward_factor(non_neg_integer()) :: number()
def calculate_reward_factor(number_of_blocks)
when is_integer(number_of_blocks) and number_of_blocks >= 0 do
50 / Integer.pow(2, Integer.floor_div(number_of_blocks, 210_000))
end
@doc """
Add a transaction to the blockchain. This mines a new block, creates a new UTXO based on
the processed transaction outputs, inputs, and the current UTXO on the blockchain. Then
the newly mined block is added to the blockchain's list of blocks, and the rewards are
calculated based on the current UTXO.
"""
@spec add_transaction(__MODULE__.t(), Transaction.t()) :: __MODULE__.t()
def add_transaction(
%__MODULE__{blocks: blocks, utxo: utxo} = _blockchain,
%Transaction{from: from, inputs: processed_inputs, outputs: processed_outputs} =
transaction
) do
hashed_blockchain =
Block.mine_block(transaction, blocks |> List.first() |> Map.fetch!(:current_hash))
utxo =
MapSet.union(
MapSet.new(processed_outputs),
MapSet.difference(utxo, MapSet.new(processed_inputs))
)
new_blocks = [hashed_blockchain | blocks]
utxo_rewarded = MapSet.put(utxo, TransactionIO.new(mining_reward_factor(new_blocks), from))
%__MODULE__{
blocks: new_blocks,
utxo: utxo_rewarded
}
end
@doc """
Calculate the balance of a wallet, which is the sum of all unspent transactions for the
wallet's owner on the blockchain
"""
@spec balance_wallet_blockchain(__MODULE__.t(), Wallet.t()) :: number()
def balance_wallet_blockchain(%__MODULE__{utxo: utxo} = _blockchain, %Wallet{} = wallet) do
utxo
|> Enum.filter(fn transaction_io -> wallet == transaction_io.owner end)
|> Enum.map(fn transaction_io -> transaction_io.value end)
|> Enum.sum()
end
@doc """
Send money from one wallet to another on the blockchain by initiating a transaction and processing
it. The transaction is added to the blockchain only if it is valid.
"""
@spec send_money(__MODULE__.t(), Wallet.t(), Wallet.t(), number(), SmartContracts.contract()) ::
__MODULE__.t()
def send_money(
%__MODULE__{utxo: utxo} = blockchain,
%Wallet{} = from,
%Wallet{} = to,
value,
contract \\ []
) do
receiver_transaction_ios =
Enum.filter(utxo, fn transaction_io -> from == transaction_io.owner end)
transaction = Transaction.new(from, to, value, receiver_transaction_ios)
processed_transaction = Transaction.process(transaction)
# If the balance of the sending wallet is greater than or equal to the value being sent
# and the processed transaction is valid, add the transaction to the blockchain.
# Otherwise, return the blockchain unchanged.
if balance_wallet_blockchain(blockchain, from) >= value and
SmartContracts.valid_transaction_contract?(processed_transaction, contract) do
add_transaction(blockchain, processed_transaction)
else
blockchain
end
end
@doc """
Validates a blockchain
"""
@spec valid?(__MODULE__.t()) :: boolean()
def valid?(%__MODULE__{blocks: blocks} = _blockchain) do
all_previous_hashes_except_last =
blocks |> Enum.map(fn block -> block.previous_hash end) |> Enum.drop(-1)
all_current_hashes_except_first =
blocks |> Enum.map(fn block -> block.current_hash end) |> Enum.drop(1)
all_blocks_valid? = Enum.all?(blocks, &Block.valid?/1)
all_transactions_valid? =
Enum.all?(Enum.map(blocks, fn block -> block.data end), &Transaction.valid?/1)
all_blocks_mined? =
Enum.all?(Enum.map(blocks, fn block -> block.current_hash end), &Block.mined?/1)
all_previous_hashes_except_last == all_current_hashes_except_first and
all_blocks_valid? and
all_transactions_valid? and
all_blocks_mined?
end
end
|
lib/blockchain.ex
| 0.839652 | 0.527621 |
blockchain.ex
|
starcoder
|
defmodule Litmus.Type.Boolean do
@moduledoc """
This type validates and converts values to booleans. It converts truthy and
falsy values to `true` or `false`.
## Options
* `:default` - Setting `:default` will populate a field with the provided
value, assuming that it is not present already. If a field already has a
value present, it will not be altered.
* `:required` - Setting `:required` to `true` will cause a validation error
when a field is not present or the value is `nil`. Allowed values for
required are `true` and `false`. The default is `false`.
* `:truthy` - Allows additional values, i.e. truthy values to be considered
valid booleans by converting them to `true` during validation. Allowed value
is an array of strings, numbers, or booleans. The default is `[true, "true"]`
* `:falsy` - Allows additional values, i.e. falsy values to be considered
valid booleans by converting them to `false` during validation. Allowed value
is an array of strings, number or boolean values. The default is `[false,
"false"]`
## Examples
iex> schema = %{
...> "new_user" => %Litmus.Type.Boolean{
...> truthy: ["1"],
...> falsy: ["0"]
...> }
...> }
iex> params = %{"new_user" => "1"}
iex> Litmus.validate(params, schema)
{:ok, %{"new_user" => true}}
iex> schema = %{"new_user" => %Litmus.Type.Boolean{}}
iex> params = %{"new_user" => 0}
iex> Litmus.validate(params, schema)
{:error, "new_user must be a boolean"}
"""
alias Litmus.{Default, Required}
@truthy_default [true, "true"]
@falsy_default [false, "false"]
defstruct default: Litmus.Type.Any.NoDefault,
truthy: @truthy_default,
falsy: @falsy_default,
required: false
@type t :: %__MODULE__{
default: any,
truthy: [term],
falsy: [term],
required: boolean
}
@spec validate_field(t, String.t(), map) :: {:ok, map} | {:error, String.t()}
def validate_field(type, field, data) do
with {:ok, data} <- Required.validate(type, field, data),
{:ok, data} <- Default.validate(type, field, data),
{:ok, data} <- truthy_falsy_validate(type, field, data) do
{:ok, data}
else
{:error, msg} -> {:error, msg}
end
end
@spec check_boolean_values(term, [term], [term]) :: boolean
defp check_boolean_values(initial_value, additional_values, default_values)
when is_binary(initial_value) do
allowed_values =
additional_values
|> (&(&1 ++ default_values)).()
|> Enum.uniq()
|> Enum.map(fn item ->
if is_binary(item) do
String.downcase(item)
end
end)
String.downcase(initial_value) in allowed_values
end
defp check_boolean_values(initial_value, additional_values, default_values) do
initial_value in Enum.uniq(additional_values ++ default_values)
end
@spec truthy_falsy_validate(t, String.t(), map) :: {:ok, map} | {:error, String.t()}
defp truthy_falsy_validate(%__MODULE__{falsy: falsy, truthy: truthy}, field, params) do
cond do
!Map.has_key?(params, field) ->
{:ok, params}
params[field] == nil ->
{:ok, params}
check_boolean_values(params[field], truthy, @truthy_default) ->
{:ok, Map.replace!(params, field, true)}
check_boolean_values(params[field], falsy, @falsy_default) ->
{:ok, Map.replace!(params, field, false)}
true ->
{:error, "#{field} must be a boolean"}
end
end
defimpl Litmus.Type do
alias Litmus.Type
@spec validate(Type.t(), String.t(), map) :: {:ok, map} | {:error, String.t()}
def validate(type, field, data), do: Type.Boolean.validate_field(type, field, data)
end
end
|
lib/litmus/type/boolean.ex
| 0.913982 | 0.675052 |
boolean.ex
|
starcoder
|
defmodule AWS.STS do
@moduledoc """
AWS Security Token Service
The AWS Security Token Service (STS) is a web service that enables you to
request temporary, limited-privilege credentials for AWS Identity and
Access Management (IAM) users or for users that you authenticate (federated
users). This guide provides descriptions of the STS API. For more detailed
information about using this service, go to [Temporary Security
Credentials](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
<note> As an alternative to using the API, you can use one of the AWS SDKs,
which consist of libraries and sample code for various programming
languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs
provide a convenient way to create programmatic access to STS. For example,
the SDKs take care of cryptographically signing requests, managing errors,
and retrying requests automatically. For information about the AWS SDKs,
including how to download and install them, see the [Tools for Amazon Web
Services page](http://aws.amazon.com/tools/).
</note> For information about setting up signatures and authorization
through the API, go to [Signing AWS API
Requests](http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
in the *AWS General Reference*. For general information about the Query
API, go to [Making Query
Requests](http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
in *Using IAM*. For information about using security tokens with other AWS
products, go to [AWS Services That Work with
IAM](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
in the *IAM User Guide*.
If you're new to AWS and need additional technical information about a
specific AWS product, you can find the product's technical documentation at
[http://aws.amazon.com/documentation/](http://aws.amazon.com/documentation/).
**Endpoints**
The AWS Security Token Service (STS) has a default endpoint of
https://sts.amazonaws.com that maps to the US East (N. Virginia) region.
Additional regions are available and are activated by default. For more
information, see [Activating and Deactivating AWS STS in an AWS
Region](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
in the *IAM User Guide*.
For information about STS endpoints, see [Regions and
Endpoints](http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
in the *AWS General Reference*.
**Recording API requests**
STS supports AWS CloudTrail, which is a service that records AWS calls for
your AWS account and delivers log files to an Amazon S3 bucket. By using
information collected by CloudTrail, you can determine what requests were
successfully made to STS, who made the request, when it was made, and so
on. To learn more about CloudTrail, including how to turn it on and find
your log files, see the [AWS CloudTrail User
Guide](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
"""
@doc """
Returns a set of temporary security credentials (consisting of an access
key ID, a secret access key, and a security token) that you can use to
access AWS resources that you might not normally have access to. Typically,
you use `AssumeRole` for cross-account access or federation. For a
comparison of `AssumeRole` with the other APIs that produce temporary
credentials, see [Requesting Temporary Security
Credentials](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS
APIs](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
**Important:** You cannot call `AssumeRole` by using AWS root account
credentials; access is denied. You must use credentials for an IAM user or
an IAM role to call `AssumeRole`.
For cross-account access, imagine that you own multiple accounts and need
to access resources in each account. You could create long-term credentials
in each account to access those resources. However, managing all those
credentials and remembering which one can access which account can be time
consuming. Instead, you can create one set of long-term credentials in one
account and then use temporary security credentials to access all the other
accounts by assuming roles in those accounts. For more information about
roles, see [IAM Roles (Delegation and
Federation)](http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
in the *IAM User Guide*.
For federation, you can, for example, grant single sign-on access to the
AWS Management Console. If you already have an identity and authentication
system in your corporate network, you don't have to recreate user
identities in AWS in order to grant those user identities access to AWS.
Instead, after a user has been authenticated, you call `AssumeRole` (and
specify the role with the appropriate permissions) to get temporary
security credentials for that user. With those temporary security
credentials, you construct a sign-in URL that users can use to access the
console. For more information, see [Common Scenarios for Temporary
Credentials](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
in the *IAM User Guide*.
By default, the temporary security credentials created by `AssumeRole` last
for one hour. However, you can use the optional `DurationSeconds` parameter
to specify the duration of your session. You can provide a value from 900
seconds (15 minutes) up to the maximum session duration setting for the
role. This setting can have a value from 1 hour to 12 hours. To learn how
to view the maximum value for your role, see [View the Maximum Session
Duration Setting for a
Role](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
in the *IAM User Guide*. The maximum session duration limit applies when
you use the `AssumeRole*` API operations or the `assume-role*` CLI
operations but does not apply when you use those operations to create a
console URL. For more information, see [Using IAM
Roles](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
in the *IAM User Guide*.
The temporary security credentials created by `AssumeRole` can be used to
make API calls to any AWS service with the following exception: you cannot
call the STS service's `GetFederationToken` or `GetSessionToken` APIs.
Optionally, you can pass an IAM access policy to this operation. If you
choose not to pass a policy, the temporary security credentials that are
returned by the operation have the permissions that are defined in the
access policy of the role that is being assumed. If you pass a policy to
this operation, the temporary security credentials that are returned by the
operation have the permissions that are allowed by both the access policy
of the role that is being assumed, * **and** * the policy that you pass.
This gives you a way to further restrict the permissions for the resulting
temporary security credentials. You cannot use the passed policy to grant
permissions that are in excess of those allowed by the access policy of the
role that is being assumed. For more information, see [Permissions for
AssumeRole, AssumeRoleWithSAML, and
AssumeRoleWithWebIdentity](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
in the *IAM User Guide*.
To assume a role, your AWS account must be trusted by the role. The trust
relationship is defined in the role's trust policy when the role is
created. That trust policy states which accounts are allowed to delegate
access to this account's role.
The user who wants to access the role must also have permissions delegated
from the role's administrator. If the user is in a different account than
the role, then the user's administrator must attach a policy that allows
the user to call AssumeRole on the ARN of the role in the other account. If
the user is in the same account as the role, then you can either attach a
policy to the user (identical to the previous different account user), or
you can add the user as a principal directly in the role's trust policy. In
this case, the trust policy acts as the only resource-based policy in IAM,
and users in the same account as the role do not need explicit permission
to assume the role. For more information about trust policies and
resource-based policies, see [IAM
Policies](http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
in the *IAM User Guide*.
**Using MFA with AssumeRole**
You can optionally include multi-factor authentication (MFA) information
when you call `AssumeRole`. This is useful for cross-account scenarios in
which you want to make sure that the user who is assuming the role has been
authenticated using an AWS MFA device. In that scenario, the trust policy
of the role being assumed includes a condition that tests for MFA
authentication; if the caller does not include valid MFA information, the
request to assume the role is denied. The condition in a trust policy that
tests for MFA authentication might look like the following example.
`"Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}`
For more information, see [Configuring MFA-Protected API
Access](http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
in the *IAM User Guide* guide.
To use MFA with `AssumeRole`, you pass values for the `SerialNumber` and
`TokenCode` parameters. The `SerialNumber` value identifies the user's
hardware or virtual MFA device. The `TokenCode` is the time-based one-time
password (TOTP) that the MFA devices produces.
"""
def assume_role(client, input, options \\ []) do
request(client, "AssumeRole", input, options)
end
@doc """
Returns a set of temporary security credentials for users who have been
authenticated via a SAML authentication response. This operation provides a
mechanism for tying an enterprise identity store or directory to role-based
AWS access without user-specific credentials or configuration. For a
comparison of `AssumeRoleWithSAML` with the other APIs that produce
temporary credentials, see [Requesting Temporary Security
Credentials](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS
APIs](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
The temporary security credentials returned by this operation consist of an
access key ID, a secret access key, and a security token. Applications can
use these temporary security credentials to sign calls to AWS services.
By default, the temporary security credentials created by
`AssumeRoleWithSAML` last for one hour. However, you can use the optional
`DurationSeconds` parameter to specify the duration of your session. Your
role session lasts for the duration that you specify, or until the time
specified in the SAML authentication response's `SessionNotOnOrAfter`
value, whichever is shorter. You can provide a `DurationSeconds` value from
900 seconds (15 minutes) up to the maximum session duration setting for the
role. This setting can have a value from 1 hour to 12 hours. To learn how
to view the maximum value for your role, see [View the Maximum Session
Duration Setting for a
Role](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
in the *IAM User Guide*. The maximum session duration limit applies when
you use the `AssumeRole*` API operations or the `assume-role*` CLI
operations but does not apply when you use those operations to create a
console URL. For more information, see [Using IAM
Roles](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
in the *IAM User Guide*.
The temporary security credentials created by `AssumeRoleWithSAML` can be
used to make API calls to any AWS service with the following exception: you
cannot call the STS service's `GetFederationToken` or `GetSessionToken`
APIs.
Optionally, you can pass an IAM access policy to this operation. If you
choose not to pass a policy, the temporary security credentials that are
returned by the operation have the permissions that are defined in the
access policy of the role that is being assumed. If you pass a policy to
this operation, the temporary security credentials that are returned by the
operation have the permissions that are allowed by the intersection of both
the access policy of the role that is being assumed, * **and** * the policy
that you pass. This means that both policies must grant the permission for
the action to be allowed. This gives you a way to further restrict the
permissions for the resulting temporary security credentials. You cannot
use the passed policy to grant permissions that are in excess of those
allowed by the access policy of the role that is being assumed. For more
information, see [Permissions for AssumeRole, AssumeRoleWithSAML, and
AssumeRoleWithWebIdentity](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
in the *IAM User Guide*.
Before your application can call `AssumeRoleWithSAML`, you must configure
your SAML identity provider (IdP) to issue the claims required by AWS.
Additionally, you must use AWS Identity and Access Management (IAM) to
create a SAML provider entity in your AWS account that represents your
identity provider, and create an IAM role that specifies this SAML provider
in its trust policy.
Calling `AssumeRoleWithSAML` does not require the use of AWS security
credentials. The identity of the caller is validated by using keys in the
metadata document that is uploaded for the SAML provider entity for your
identity provider.
<important> Calling `AssumeRoleWithSAML` can result in an entry in your AWS
CloudTrail logs. The entry includes the value in the `NameID` element of
the SAML assertion. We recommend that you use a NameIDType that is not
associated with any personally identifiable information (PII). For example,
you could instead use the Persistent Identifier
(`urn:oasis:names:tc:SAML:2.0:nameid-format:persistent`).
</important> For more information, see the following resources:
<ul> <li> [About SAML 2.0-based
Federation](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
in the *IAM User Guide*.
</li> <li> [Creating SAML Identity
Providers](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
in the *IAM User Guide*.
</li> <li> [Configuring a Relying Party and
Claims](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
in the *IAM User Guide*.
</li> <li> [Creating a Role for SAML 2.0
Federation](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
in the *IAM User Guide*.
</li> </ul>
"""
def assume_role_with_s_a_m_l(client, input, options \\ []) do
request(client, "AssumeRoleWithSAML", input, options)
end
@doc """
Returns a set of temporary security credentials for users who have been
authenticated in a mobile or web application with a web identity provider,
such as Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID
Connect-compatible identity provider.
<note> For mobile applications, we recommend that you use Amazon Cognito.
You can use Amazon Cognito with the [AWS SDK for
iOS](http://aws.amazon.com/sdkforios/) and the [AWS SDK for
Android](http://aws.amazon.com/sdkforandroid/) to uniquely identify a user
and supply the user with a consistent identity throughout the lifetime of
an application.
To learn more about Amazon Cognito, see [Amazon Cognito
Overview](http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
in the *AWS SDK for Android Developer Guide* guide and [Amazon Cognito
Overview](http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
in the *AWS SDK for iOS Developer Guide*.
</note> Calling `AssumeRoleWithWebIdentity` does not require the use of AWS
security credentials. Therefore, you can distribute an application (for
example, on mobile devices) that requests temporary security credentials
without including long-term AWS credentials in the application, and without
deploying server-based proxy services that use long-term AWS credentials.
Instead, the identity of the caller is validated by using a token from the
web identity provider. For a comparison of `AssumeRoleWithWebIdentity` with
the other APIs that produce temporary credentials, see [Requesting
Temporary Security
Credentials](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS
APIs](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
The temporary security credentials returned by this API consist of an
access key ID, a secret access key, and a security token. Applications can
use these temporary security credentials to sign calls to AWS service APIs.
By default, the temporary security credentials created by
`AssumeRoleWithWebIdentity` last for one hour. However, you can use the
optional `DurationSeconds` parameter to specify the duration of your
session. You can provide a value from 900 seconds (15 minutes) up to the
maximum session duration setting for the role. This setting can have a
value from 1 hour to 12 hours. To learn how to view the maximum value for
your role, see [View the Maximum Session Duration Setting for a
Role](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
in the *IAM User Guide*. The maximum session duration limit applies when
you use the `AssumeRole*` API operations or the `assume-role*` CLI
operations but does not apply when you use those operations to create a
console URL. For more information, see [Using IAM
Roles](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
in the *IAM User Guide*.
The temporary security credentials created by `AssumeRoleWithWebIdentity`
can be used to make API calls to any AWS service with the following
exception: you cannot call the STS service's `GetFederationToken` or
`GetSessionToken` APIs.
Optionally, you can pass an IAM access policy to this operation. If you
choose not to pass a policy, the temporary security credentials that are
returned by the operation have the permissions that are defined in the
access policy of the role that is being assumed. If you pass a policy to
this operation, the temporary security credentials that are returned by the
operation have the permissions that are allowed by both the access policy
of the role that is being assumed, * **and** * the policy that you pass.
This gives you a way to further restrict the permissions for the resulting
temporary security credentials. You cannot use the passed policy to grant
permissions that are in excess of those allowed by the access policy of the
role that is being assumed. For more information, see [Permissions for
AssumeRole, AssumeRoleWithSAML, and
AssumeRoleWithWebIdentity](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
in the *IAM User Guide*.
Before your application can call `AssumeRoleWithWebIdentity`, you must have
an identity token from a supported identity provider and create a role that
the application can assume. The role that your application assumes must
trust the identity provider that is associated with the identity token. In
other words, the identity provider must be specified in the role's trust
policy.
<important> Calling `AssumeRoleWithWebIdentity` can result in an entry in
your AWS CloudTrail logs. The entry includes the
[Subject](http://openid.net/specs/openid-connect-core-1_0.html#Claims) of
the provided Web Identity Token. We recommend that you avoid using any
personally identifiable information (PII) in this field. For example, you
could instead use a GUID or a pairwise identifier, as [suggested in the
OIDC
specification](http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
</important> For more information about how to use web identity federation
and the `AssumeRoleWithWebIdentity` API, see the following resources:
<ul> <li> [Using Web Identity Federation APIs for Mobile
Apps](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
and [Federation Through a Web-based Identity
Provider](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
</li> <li> [ Web Identity Federation
Playground](https://web-identity-federation-playground.s3.amazonaws.com/index.html).
This interactive website lets you walk through the process of
authenticating via Login with Amazon, Facebook, or Google, getting
temporary security credentials, and then using those credentials to make a
request to AWS.
</li> <li> [AWS SDK for iOS](http://aws.amazon.com/sdkforios/) and [AWS SDK
for Android](http://aws.amazon.com/sdkforandroid/). These toolkits contain
sample apps that show how to invoke the identity providers, and then how to
use the information from these providers to get and use temporary security
credentials.
</li> <li> [Web Identity Federation with Mobile
Applications](http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
This article discusses web identity federation and shows an example of how
to use web identity federation to get access to content in Amazon S3.
</li> </ul>
"""
def assume_role_with_web_identity(client, input, options \\ []) do
request(client, "AssumeRoleWithWebIdentity", input, options)
end
@doc """
Decodes additional information about the authorization status of a request
from an encoded message returned in response to an AWS request.
For example, if a user is not authorized to perform an action that he or
she has requested, the request returns a `Client.UnauthorizedOperation`
response (an HTTP 403 response). Some AWS actions additionally return an
encoded message that can provide details about this authorization failure.
<note> Only certain AWS actions return an encoded authorization message.
The documentation for an individual action indicates whether that action
returns an encoded message in addition to returning an HTTP code.
</note> The message is encoded because the details of the authorization
status can constitute privileged information that the user who requested
the action should not see. To decode an authorization status message, a
user must be granted permissions via an IAM policy to request the
`DecodeAuthorizationMessage` (`sts:DecodeAuthorizationMessage`) action.
The decoded message includes the following type of information:
<ul> <li> Whether the request was denied due to an explicit deny or due to
the absence of an explicit allow. For more information, see [Determining
Whether a Request is Allowed or
Denied](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
in the *IAM User Guide*.
</li> <li> The principal who made the request.
</li> <li> The requested action.
</li> <li> The requested resource.
</li> <li> The values of condition keys in the context of the user's
request.
</li> </ul>
"""
def decode_authorization_message(client, input, options \\ []) do
request(client, "DecodeAuthorizationMessage", input, options)
end
@doc """
Returns details about the IAM identity whose credentials are used to call
the API.
"""
def get_caller_identity(client, input, options \\ []) do
request(client, "GetCallerIdentity", input, options)
end
@doc """
Returns a set of temporary security credentials (consisting of an access
key ID, a secret access key, and a security token) for a federated user. A
typical use is in a proxy application that gets temporary security
credentials on behalf of distributed applications inside a corporate
network. Because you must call the `GetFederationToken` action using the
long-term security credentials of an IAM user, this call is appropriate in
contexts where those credentials can be safely stored, usually in a
server-based application. For a comparison of `GetFederationToken` with the
other APIs that produce temporary credentials, see [Requesting Temporary
Security
Credentials](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS
APIs](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
<note> If you are creating a mobile-based or browser-based app that can
authenticate users using a web identity provider like Login with Amazon,
Facebook, Google, or an OpenID Connect-compatible identity provider, we
recommend that you use [Amazon Cognito](http://aws.amazon.com/cognito/) or
`AssumeRoleWithWebIdentity`. For more information, see [Federation Through
a Web-based Identity
Provider](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
</note> The `GetFederationToken` action must be called by using the
long-term AWS security credentials of an IAM user. You can also call
`GetFederationToken` using the security credentials of an AWS root account,
but we do not recommended it. Instead, we recommend that you create an IAM
user for the purpose of the proxy application and then attach a policy to
the IAM user that limits federated users to only the actions and resources
that they need access to. For more information, see [IAM Best
Practices](http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
in the *IAM User Guide*.
The temporary security credentials that are obtained by using the long-term
credentials of an IAM user are valid for the specified duration, from 900
seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The
default is 43200 seconds (12 hours). Temporary credentials that are
obtained by using AWS root account credentials have a maximum duration of
3600 seconds (1 hour).
The temporary security credentials created by `GetFederationToken` can be
used to make API calls to any AWS service with the following exceptions:
<ul> <li> You cannot use these credentials to call any IAM APIs.
</li> <li> You cannot call any STS APIs except `GetCallerIdentity`.
</li> </ul> **Permissions**
The permissions for the temporary security credentials returned by
`GetFederationToken` are determined by a combination of the following:
<ul> <li> The policy or policies that are attached to the IAM user whose
credentials are used to call `GetFederationToken`.
</li> <li> The policy that is passed as a parameter in the call.
</li> </ul> The passed policy is attached to the temporary security
credentials that result from the `GetFederationToken` API call--that is, to
the *federated user*. When the federated user makes an AWS request, AWS
evaluates the policy attached to the federated user in combination with the
policy or policies attached to the IAM user whose credentials were used to
call `GetFederationToken`. AWS allows the federated user's request only
when both the federated user * **and** * the IAM user are explicitly
allowed to perform the requested action. The passed policy cannot grant
more permissions than those that are defined in the IAM user policy.
A typical use case is that the permissions of the IAM user whose
credentials are used to call `GetFederationToken` are designed to allow
access to all the actions and resources that any federated user will need.
Then, for individual users, you pass a policy to the operation that scopes
down the permissions to a level that's appropriate to that individual user,
using a policy that allows only a subset of permissions that are granted to
the IAM user.
If you do not pass a policy, the resulting temporary security credentials
have no effective permissions. The only exception is when the temporary
security credentials are used to access a resource that has a
resource-based policy that specifically allows the federated user to access
the resource.
For more information about how permissions work, see [Permissions for
GetFederationToken](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
For information about using `GetFederationToken` to create temporary
security credentials, see [GetFederationToken—Federation Through a Custom
Identity
Broker](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
"""
def get_federation_token(client, input, options \\ []) do
request(client, "GetFederationToken", input, options)
end
@doc """
Returns a set of temporary credentials for an AWS account or IAM user. The
credentials consist of an access key ID, a secret access key, and a
security token. Typically, you use `GetSessionToken` if you want to use MFA
to protect programmatic calls to specific AWS APIs like Amazon EC2
`StopInstances`. MFA-enabled IAM users would need to call `GetSessionToken`
and submit an MFA code that is associated with their MFA device. Using the
temporary security credentials that are returned from the call, IAM users
can then make programmatic calls to APIs that require MFA authentication.
If you do not supply a correct MFA code, then the API returns an access
denied error. For a comparison of `GetSessionToken` with the other APIs
that produce temporary credentials, see [Requesting Temporary Security
Credentials](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS
APIs](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
The `GetSessionToken` action must be called by using the long-term AWS
security credentials of the AWS account or an IAM user. Credentials that
are created by IAM users are valid for the duration that you specify, from
900 seconds (15 minutes) up to a maximum of 129600 seconds (36 hours), with
a default of 43200 seconds (12 hours); credentials that are created by
using account credentials can range from 900 seconds (15 minutes) up to a
maximum of 3600 seconds (1 hour), with a default of 1 hour.
The temporary security credentials created by `GetSessionToken` can be used
to make API calls to any AWS service with the following exceptions:
<ul> <li> You cannot call any IAM APIs unless MFA authentication
information is included in the request.
</li> <li> You cannot call any STS API *except* `AssumeRole` or
`GetCallerIdentity`.
</li> </ul> <note> We recommend that you do not call `GetSessionToken` with
root account credentials. Instead, follow our [best
practices](http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
by creating one or more IAM users, giving them the necessary permissions,
and using IAM users for everyday interaction with AWS.
</note> The permissions associated with the temporary security credentials
returned by `GetSessionToken` are based on the permissions associated with
account or IAM user whose credentials are used to call the action. If
`GetSessionToken` is called using root account credentials, the temporary
credentials have root account permissions. Similarly, if `GetSessionToken`
is called using the credentials of an IAM user, the temporary credentials
have the same permissions as the IAM user.
For more information about using `GetSessionToken` to create temporary
credentials, go to [Temporary Credentials for Users in Untrusted
Environments](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
in the *IAM User Guide*.
"""
def get_session_token(client, input, options \\ []) do
request(client, "GetSessionToken", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "sts"}
host = get_host("sts", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-"},
{"X-Amz-Target", ".#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/sts.ex
| 0.825203 | 0.471832 |
sts.ex
|
starcoder
|
defmodule Pokerap.Url do
@moduledoc """
Holds utility functions to actually make HTTP calls
"""
alias Pokerap.Env, as: Env
#Builds option array (currently only timeouts) for HTTPoison
defp get_options() do
[timeout: Env.timeout, recv_timeout: Env.recv_timeout]
end
@doc """
Makes call to Httpoision and wraps results in tuple.
Make sure `url` has a trailing slash.
This is an intermediary step in `Pokerap.Url.get_endpoint/2`, and only
meant to be used when you can _only_ get a full Url (such as `evolution-chain`
url from `pokemon-species`) See `Pokerap.Url.get_endpoint/2` for full details.
"""
def get_url(url) do
case HTTPoison.get(url, [], get_options()) do
{:ok, %HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.decode!(body)}
{:ok, %HTTPoison.Response{status_code: status_code}} ->
{:error, status_code}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, reason}
end
end
@doc """
Makes call to Httpoision and returns results.
Make sure `url` has a trailing slash. Raises exeptions upon error. ! version
of `Pokerap.Url.get_url`.
This is an intermediary step in `Pokerap.Url.get_endpoint!/2`, and only
meant to be used when you can _only_ get a full Url (such as `evolution-chain`
url from `pokemon-species`) See `Pokerap.Url.get_endpoint!/2` for full details.
"""
def get_url!(url) do
case get_url(url) do
{:ok, body} -> body
{:error, status_code} when is_integer(status_code) ->
raise Integer.to_string(status_code)
{_, error} -> raise error
end
end
# Builds URL string based on params.
defp build_url(endpoint, value) do
downcase?= fn
value when is_bitstring(value) -> String.downcase(value)
value when is_integer(value) -> Integer.to_string(value)
value when is_atom(value) -> Atom.to_string(value)
end
"https://pokeapi.co/api/v2/#{endpoint}/#{downcase?.(value)}/"
end
@doc """
Calls HTTPoison after assembling URL to get resources from API. Returns tuple of request status,
and data arranged in different ways depending on endpoint.
Takes endpoint and value, constructs URL, then makes HTTPoison request.
## Example
```
iex(1)> Pokerap.Url.get_endpoint("berry","cheri")
{:ok, %{"firmness" => %{"name" => "soft",
"url" => "http://pokeapi.co/api/v2/berry-firmness/2/"},
"flavors" => [%{"flavor" => %{"name" => "spicy",
"url" => "http://pokeapi.co/api/v2/berry-flavor/1/"}, "potency" => 10},
%{"flavor" => %{"name" => "dry",
"url" => "http://pokeapi.co/api/v2/berry-flavor/2/"}, "potency" => 0},
%{"flavor" => %{"name" => "sweet",
"url" => "http://pokeapi.co/api/v2/berry-flavor/3/"}, "potency" => 0},
%{"flavor" => %{"name" => "bitter",
"url" => "http://pokeapi.co/api/v2/berry-flavor/4/"}, "potency" => 0},
%{"flavor" => %{"name" => "sour",
"url" => "http://pokeapi.co/api/v2/berry-flavor/5/"}, "potency" => 0}],
"growth_time" => 3, "id" => 1,
"item" => %{"name" => "cheri-berry",
"url" => "http://pokeapi.co/api/v2/item/126/"}, "max_harvest" => 5,
"name" => "cheri", "natural_gift_power" => 60,
"natural_gift_type" => %{"name" => "fire",
"url" => "http://pokeapi.co/api/v2/type/10/"}, "size" => 20,
"smoothness" => 25, "soil_dryness" => 15}}
```
"""
def get_endpoint(endpoint, value) do
get_url(build_url(endpoint,value))
end
@doc """
Calls HTTPoison after assembling URL get resources from API. Returns data arranged in different ways depending on endpoint.
Takes endpoint and value, constructs URL, then makes HTTPoison request.
Raises exceptions upon error. `!` version of `Pokerap.Url.get_endpoint/1`
## Example
```
iex(1)> Pokerap.Url.get_endpoint!("berry","cheri")
%{"firmness" => %{"name" => "soft",
"url" => "http://pokeapi.co/api/v2/berry-firmness/2/"},
"flavors" => [%{"flavor" => %{"name" => "spicy",
"url" => "http://pokeapi.co/api/v2/berry-flavor/1/"}, "potency" => 10},
%{"flavor" => %{"name" => "dry",
"url" => "http://pokeapi.co/api/v2/berry-flavor/2/"}, "potency" => 0},
%{"flavor" => %{"name" => "sweet",
"url" => "http://pokeapi.co/api/v2/berry-flavor/3/"}, "potency" => 0},
%{"flavor" => %{"name" => "bitter",
"url" => "http://pokeapi.co/api/v2/berry-flavor/4/"}, "potency" => 0},
%{"flavor" => %{"name" => "sour",
"url" => "http://pokeapi.co/api/v2/berry-flavor/5/"}, "potency" => 0}],
"growth_time" => 3, "id" => 1,
"item" => %{"name" => "cheri-berry",
"url" => "http://pokeapi.co/api/v2/item/126/"}, "max_harvest" => 5,
"name" => "cheri", "natural_gift_power" => 60,
"natural_gift_type" => %{"name" => "fire",
"url" => "http://pokeapi.co/api/v2/type/10/"}, "size" => 20,
"smoothness" => 25, "soil_dryness" => 15}
```
"""
def get_endpoint!(endpoint, value) do
get_url!(build_url(endpoint,value))
end
end
|
lib/Pokerap/Url.ex
| 0.78842 | 0.505188 |
Url.ex
|
starcoder
|
defmodule Alembic.PluginManager do
@moduledoc """
Manages the list of currently enabled plugins, issuing callbacks to each
plugin's main callback module in response to client-issued requests and
mediating interactions between plugins with potentially conflicting spheres
of responsibility.
"""
use ExActor
alias Alembic.Config
@doc """
Initializes the plugin manager, taking the following steps to locate and
load plugins:
1. Invokes `load_plugins/1` on each plugin directory name specified in the
server config, producing a list whose every element is itself a list of
plugins that the manager attempted to load from one of these plugin
directories.
2. Concatenates the list of lists into a single list of plugins. Each
element of this single list is either a tuple `{:ok, plugin}`, where
`plugin` is itself a tuple mapping a loaded plugin's name to that
plugin's main callback module, or `{:error, reason}` in the event that a
particular plugin could not be loaded.
3. Filters the list of plugins, removing each error tuple such that only
successfully loaded plugins remain. During this step, each error message
produced by attempting and failing to load a particular plugin may also
be logged.
4. Produces a new list containing only the second element (a name–module
mapping) of each tuple `{:ok, plugin}` remaining in the filtered list.
This new list, which contains every plugin that was successfully loaded
from the plugin directories specified in the server config, is then used
as the plugin manager's internal state.
Each currently enabled plugin is represented in the plugin manager's state
by a tuple `{name, module}`, where `name` is the plugin's unique name
(specified in the plugin's manifest) and `module` is the plugin's main
callback module (responsible for implementing `Alembic.Plugin.Behaviour`
callback functions on that plugin's behalf).
"""
definit _ do
Enum.map(Config.get[:plugins], &load_plugins/1)
|> Enum.concat
|> Enum.filter(fn(plugin) ->
case plugin do
{:ok, plugin} ->
true
{:error, reason} ->
# TODO: log the error
false
end
end)
|> Enum.map(&elem(&1, 1))
end
@doc """
Serves the specified request, taking the following steps to identify
currently enabled plugins capable of handling the request and delegate the
handling of the request to these plugins:
1. Associates each plugin with a _priority value_ for the request by
invoking the `Alembic.Plugin.Behaviour.screen/2` plugin callback on each
plugin, passing the request and client as arguments and making note of
the priority value each plugin returns.
2. Sorts the list of currently enabled plugins by priority and discards
those plugins that declared a priority of `:ignore` (indicating an
intention to ignore the request), producing an appropriately ordered
list of plugins that should each be given an opportunity to handle the
request.
3. Takes plugins from the list – in order of priority – and invokes the
`Alembic.Plugin.Behaviour.handle/2` plugin callback on each plugin,
passing the request and client as arguments. Stops either when every
plugin in the list has been given a chance to handle the request, or
when one of the plugins consumes the request by returning `:consume`
from the `handle` callback.
Any currently enabled plugin that errors out during this process (by
returning `{:error, reason}` from either callback, or by throwing an
exception of any sort) should be disabled.
"""
defcast serve(request, client), state: plugins do
lc plugin inlist plugins do
{plugin.screen(request, client), plugin}
end
|> Enum.sort(fn({first, _}, {second, _}) ->
# true if `first` is lower priority than `second`, otherwise false
cond do
first == :ignore -> true
second == :ignore -> false
true -> first < second
end
end)
|> Enum.filter_map(&(elem(&1, 0) != :ignore), &elem(&1, 1))
|> Enum.take_while(&(&1.handle(request, client) != :consume))
:ok
end
@doc """
Attempts to the file at the specified path as a plugin. Returns
`{:ok, plugin}` on success, `{:error, reason}` on failure. In the case of
success, `plugin` is a tuple whose first element is the plugin's name
(taken from the plugin's manifest) and whose second element is the callback
module responsible for implementing the `Alembic.Plugin.Behaviour` callback
functions on behalf of the loaded plugin.
"""
defp load_plugin(filename) do
filename = Path.expand(filename)
if File.exists?(filename) do
# TODO: `Code.require_file/1` is probably unsafe here
case get_callback_module(Code.require_file(filename)) do
nil ->
{:error, "couldn't find plugin callback module"}
module ->
{:ok, {module.alembic_plugin[:name], module}}
end
else
{:error, :enoent}
end
end
@doc """
Attempts to load every file in the directory at the specified path as a
plugin by calling `load_plugin/1` on every file in the directory in turn.
Returns a list of plugins on success, `{:error, reason}` on failure.
Note that this method is still considered to have succeeded if one or more
plugins in the directory failed to load. In fact, it is considered to have
failed if and only if the call to `File.ls/1` resulted in failure.
"""
defp load_plugins(dirname) do
case File.ls(Path.expand(dirname)) do
{:ok, files} ->
Enum.map files, &load_plugin/1
{:error, reason} ->
{:error, reason}
end
end
@doc """
Given a list of modules associated with a particular plugin, returns that
plugin's callback module – the module that is responsible for implementing
the `Alembic.Plugin.Behaviour` callback functions on that plugin's behalf.
If no callback module is found, `nil` is returned instead.
"""
defp get_callback_module(modules) do
Enum.find modules, fn(module) ->
behaviours = module.module_info(:attributes)[:behaviour]
Enum.member? behaviours, Alembic.Plugin.Behaviour
end
end
end
|
lib/plugin_sup.ex
| 0.679498 | 0.512998 |
plugin_sup.ex
|
starcoder
|
defmodule ExDoc.Formatter.EPUB.Templates do
@moduledoc """
Handle all template interfaces for the EPUB formatter.
"""
require EEx
alias ExDoc.Formatter.HTML.Templates, as: H
@doc """
Generate content from the module template for a given `node`
"""
def module_page(config, node) do
types = H.group_types(node)
module_template(config, node, types.types, types.functions, types.macros, types.callbacks)
end
@doc """
Creates the [Package Document Definition](http://www.idpf.org/epub/30/spec/epub30-publications.html#sec-package-def),
this definition encapsulates the publication metadata and the resource
information that constitute the EPUB publication. This definition also
includes the default reading order.
"""
EEx.function_from_file(:def, :content_template,
Path.expand("templates/content_template.eex", __DIR__),
[:config, :nodes, :uuid, :datetime])
@doc """
Creates a chapter which contains all the details about an individual module,
this chapter can include the following sections: *functions*, *macros*,
*types*, *callbacks*.
"""
EEx.function_from_file(:def, :module_template,
Path.expand("templates/module_template.eex", __DIR__),
[:config, :module, :types, :functions, :macros, :callbacks])
@doc """
Creates the table of contents. This template follows the
[EPUB Navigation Document Definition](http://www.idpf.org/epub/30/spec/epub30-contentdocs.html#sec-xhtml-nav).
"""
EEx.function_from_file(:def, :nav_template,
Path.expand("templates/nav_template.eex", __DIR__),
[:config, :nodes])
@doc """
Creates a new chapter when the user provides additional files.
"""
EEx.function_from_file(:def, :extra_template,
Path.expand("templates/extra_template.eex", __DIR__),
[:config, :content])
@doc """
Creates the cover page for the EPUB document.
"""
EEx.function_from_file(:def, :title_template,
Path.expand("templates/title_template.eex", __DIR__),
[:config])
@doc """
Creates an *Navigation Center eXtended* document (as defined in OPF 2.0.1),
this is for compatibility purposes with EPUB 2 Reading Systems. EPUB 3
Reading Systems must ignore the NCX in favor of the
[EPUB Navigation Document](http://www.idpf.org/epub/30/spec/epub30-contentdocs.html#sec-xhtml-nav).
"""
EEx.function_from_file(:def, :toc_template,
Path.expand("templates/toc_template.eex", __DIR__),
[:config, :nodes, :uuid])
EEx.function_from_file(:defp, :head_template,
Path.expand("templates/head_template.eex", __DIR__),
[:config, :page])
end
|
deps/ex_doc/lib/ex_doc/formatter/epub/templates.ex
| 0.67694 | 0.439507 |
templates.ex
|
starcoder
|
defmodule Harald.HCI.Commands.ControllerAndBaseband.SetEventMask do
@moduledoc """
Reference: version 5.2, Vol 4, Part E, 7.3.1.
"""
alias Harald.{HCI, HCI.Commands.Command}
@type t() :: %{
event_mask: %{
inquiry_complete_event: HCI.flag(),
inquiry_result_event: HCI.flag(),
connection_complete_event: HCI.flag(),
connection_request_event: HCI.flag(),
disconnection_complete_event: HCI.flag(),
authentication_complete_event: HCI.flag(),
remote_name_request_complete_event: HCI.flag(),
encryption_change_event: HCI.flag(),
change_connection_link_key_complete_event: HCI.flag(),
mast_link_key_complete_event: HCI.flag(),
read_remote_supported_features_complete_event: HCI.flag(),
read_remote_version_information_complete_event: HCI.flag(),
qos_setup_complete_event: HCI.flag(),
hardware_error_event: HCI.flag(),
flush_occurred_event: HCI.flag(),
role_change_event: HCI.flag(),
mode_change_event: HCI.flag(),
return_link_keys_event: HCI.flag(),
pin_code_request_event: HCI.flag(),
link_key_request_event: HCI.flag(),
link_key_notification_event: HCI.flag(),
loopback_command_event: HCI.flag(),
data_buffer_overflow_event: HCI.flag(),
max_slots_change_event: HCI.flag(),
read_clock_offset_complete_event: HCI.flag(),
connection_packet_type_changed_event: HCI.flag(),
qos_violation_event: HCI.flag(),
page_scan_mode_change_event: HCI.flag(),
page_scan_repition_mode_change_event: HCI.flag(),
flow_specification_complete_event: HCI.flag(),
inquiry_result_with_rssi_event: HCI.flag(),
read_remote_extended_features_complete_event: HCI.flag(),
synchronous_connection_complete_event: HCI.flag(),
synchronous_connection_changed_event: HCI.flag(),
sniff_subrating_event: HCI.flag(),
extended_inquiry_result_event: HCI.flag(),
encryption_key_refresh_complete_event: HCI.flag(),
io_capability_request_event: HCI.flag(),
io_capability_response_event: HCI.flag(),
user_confirmation_request_event: HCI.flag(),
user_passkey_request_event: HCI.flag(),
remote_oob_data_request_event: HCI.flag(),
simple_pairing_complete_event: HCI.flag(),
link_supervision_timeout_changed_event: HCI.flag(),
enhanced_flush_complete_event: HCI.flag(),
user_passkey_notification_event: HCI.flag(),
keypress_notification_event: HCI.flag(),
remote_host_supported_features_notification_event: HCI.flag(),
le_meta_event: HCI.flag(),
reserved_map: HCI.reserved_map()
}
}
@behaviour Command
@fields [
:inquiry_complete_event,
:inquiry_result_event,
:connection_complete_event,
:connection_request_event,
:disconnection_complete_event,
:authentication_complete_event,
:remote_name_request_complete_event,
:encryption_change_event,
:change_connection_link_key_complete_event,
:mast_link_key_complete_event,
:read_remote_supported_features_complete_event,
:read_remote_version_information_complete_event,
:qos_setup_complete_event,
:hardware_error_event,
:flush_occurred_event,
:role_change_event,
:mode_change_event,
:return_link_keys_event,
:pin_code_request_event,
:link_key_request_event,
:link_key_notification_event,
:loopback_command_event,
:data_buffer_overflow_event,
:max_slots_change_event,
:read_clock_offset_complete_event,
:connection_packet_type_changed_event,
:qos_violation_event,
:page_scan_mode_change_event,
:page_scan_repition_mode_change_event,
:flow_specification_complete_event,
:inquiry_result_with_rssi_event,
:read_remote_extended_features_complete_event,
:synchronous_connection_complete_event,
:synchronous_connection_changed_event,
:sniff_subrating_event,
:extended_inquiry_result_event,
:encryption_key_refresh_complete_event,
:io_capability_request_event,
:io_capability_response_event,
:user_confirmation_request_event,
:user_passkey_request_event,
:remote_oob_data_request_event,
:simple_pairing_complete_event,
:link_supervision_timeout_changed_event,
:enhanced_flush_complete_event,
:user_passkey_notification_event,
:keypress_notification_event,
:remote_host_supported_features_notification_event,
:le_meta_event,
:reserved_map
]
@impl Command
def decode(<<encoded_set_event_mask::little-size(64)>>) do
<<
reserved_62_to_63::size(2),
le_meta_event::size(1),
remote_host_supported_features_notification_event::size(1),
keypress_notification_event::size(1),
user_passkey_notification_event::size(1),
reserved_57::size(1),
enhanced_flush_complete_event::size(1),
link_supervision_timeout_changed_event::size(1),
reserved_54::size(1),
simple_pairing_complete_event::size(1),
remote_oob_data_request_event::size(1),
user_passkey_request_event::size(1),
user_confirmation_request_event::size(1),
io_capability_response_event::size(1),
io_capability_request_event::size(1),
encryption_key_refresh_complete_event::size(1),
extended_inquiry_result_event::size(1),
sniff_subrating_event::size(1),
synchronous_connection_changed_event::size(1),
synchronous_connection_complete_event::size(1),
reserved_35_to_42::size(8),
read_remote_extended_features_complete_event::size(1),
inquiry_result_with_rssi_event::size(1),
flow_specification_complete_event::size(1),
page_scan_repition_mode_change_event::size(1),
page_scan_mode_change_event::size(1),
qos_violation_event::size(1),
connection_packet_type_changed_event::size(1),
read_clock_offset_complete_event::size(1),
max_slots_change_event::size(1),
data_buffer_overflow_event::size(1),
loopback_command_event::size(1),
link_key_notification_event::size(1),
link_key_request_event::size(1),
pin_code_request_event::size(1),
return_link_keys_event::size(1),
mode_change_event::size(1),
reserved_18::size(1),
role_change_event::size(1),
flush_occurred_event::size(1),
hardware_error_event::size(1),
reserved_13_to_14::size(2),
qos_setup_complete_event::size(1),
read_remote_version_information_complete_event::size(1),
read_remote_supported_features_complete_event::size(1),
mast_link_key_complete_event::size(1),
change_connection_link_key_complete_event::size(1),
encryption_change_event::size(1),
remote_name_request_complete_event::size(1),
authentication_complete_event::size(1),
disconnection_complete_event::size(1),
connection_request_event::size(1),
connection_complete_event::size(1),
inquiry_result_event::size(1),
inquiry_complete_event::size(1)
>> = <<encoded_set_event_mask::size(64)>>
encoded_event_mask = %{
inquiry_complete_event: inquiry_complete_event,
inquiry_result_event: inquiry_result_event,
connection_complete_event: connection_complete_event,
connection_request_event: connection_request_event,
disconnection_complete_event: disconnection_complete_event,
authentication_complete_event: authentication_complete_event,
remote_name_request_complete_event: remote_name_request_complete_event,
encryption_change_event: encryption_change_event,
change_connection_link_key_complete_event: change_connection_link_key_complete_event,
mast_link_key_complete_event: mast_link_key_complete_event,
read_remote_supported_features_complete_event:
read_remote_supported_features_complete_event,
read_remote_version_information_complete_event:
read_remote_version_information_complete_event,
qos_setup_complete_event: qos_setup_complete_event,
hardware_error_event: hardware_error_event,
flush_occurred_event: flush_occurred_event,
role_change_event: role_change_event,
mode_change_event: mode_change_event,
return_link_keys_event: return_link_keys_event,
pin_code_request_event: pin_code_request_event,
link_key_request_event: link_key_request_event,
link_key_notification_event: link_key_notification_event,
loopback_command_event: loopback_command_event,
data_buffer_overflow_event: data_buffer_overflow_event,
max_slots_change_event: max_slots_change_event,
read_clock_offset_complete_event: read_clock_offset_complete_event,
connection_packet_type_changed_event: connection_packet_type_changed_event,
qos_violation_event: qos_violation_event,
page_scan_mode_change_event: page_scan_mode_change_event,
page_scan_repition_mode_change_event: page_scan_repition_mode_change_event,
flow_specification_complete_event: flow_specification_complete_event,
inquiry_result_with_rssi_event: inquiry_result_with_rssi_event,
read_remote_extended_features_complete_event: read_remote_extended_features_complete_event,
synchronous_connection_complete_event: synchronous_connection_complete_event,
synchronous_connection_changed_event: synchronous_connection_changed_event,
sniff_subrating_event: sniff_subrating_event,
extended_inquiry_result_event: extended_inquiry_result_event,
encryption_key_refresh_complete_event: encryption_key_refresh_complete_event,
io_capability_request_event: io_capability_request_event,
io_capability_response_event: io_capability_response_event,
user_confirmation_request_event: user_confirmation_request_event,
user_passkey_request_event: user_passkey_request_event,
remote_oob_data_request_event: remote_oob_data_request_event,
simple_pairing_complete_event: simple_pairing_complete_event,
link_supervision_timeout_changed_event: link_supervision_timeout_changed_event,
enhanced_flush_complete_event: enhanced_flush_complete_event,
user_passkey_notification_event: user_passkey_notification_event,
keypress_notification_event: keypress_notification_event,
remote_host_supported_features_notification_event:
remote_host_supported_features_notification_event,
le_meta_event: le_meta_event,
reserved_map: %{
(13..14) => reserved_13_to_14,
(18..18) => reserved_18,
(35..42) => reserved_35_to_42,
(54..54) => reserved_54,
(57..57) => reserved_57,
(62..63) => reserved_62_to_63
}
}
decoded_event_mask =
Enum.into(encoded_event_mask, %{}, fn
{:reserved_map, reserved} -> {:reserved_map, reserved}
{key, 1} -> {key, true}
{key, 0} -> {key, false}
end)
parameters = %{event_mask: decoded_event_mask}
{:ok, parameters}
end
@impl Command
def decode_return_parameters(<<status>>), do: {:ok, %{status: status}}
@impl Command
def encode(%{
event_mask:
%{
inquiry_complete_event: _,
inquiry_result_event: _,
connection_complete_event: _,
connection_request_event: _,
disconnection_complete_event: _,
authentication_complete_event: _,
remote_name_request_complete_event: _,
encryption_change_event: _,
change_connection_link_key_complete_event: _,
mast_link_key_complete_event: _,
read_remote_supported_features_complete_event: _,
read_remote_version_information_complete_event: _,
qos_setup_complete_event: _,
hardware_error_event: _,
flush_occurred_event: _,
role_change_event: _,
mode_change_event: _,
return_link_keys_event: _,
pin_code_request_event: _,
link_key_request_event: _,
link_key_notification_event: _,
loopback_command_event: _,
data_buffer_overflow_event: _,
max_slots_change_event: _,
read_clock_offset_complete_event: _,
connection_packet_type_changed_event: _,
qos_violation_event: _,
page_scan_mode_change_event: _,
page_scan_repition_mode_change_event: _,
flow_specification_complete_event: _,
inquiry_result_with_rssi_event: _,
read_remote_extended_features_complete_event: _,
synchronous_connection_complete_event: _,
synchronous_connection_changed_event: _,
sniff_subrating_event: _,
extended_inquiry_result_event: _,
encryption_key_refresh_complete_event: _,
io_capability_request_event: _,
io_capability_response_event: _,
user_confirmation_request_event: _,
user_passkey_request_event: _,
remote_oob_data_request_event: _,
simple_pairing_complete_event: _,
link_supervision_timeout_changed_event: _,
enhanced_flush_complete_event: _,
user_passkey_notification_event: _,
keypress_notification_event: _,
remote_host_supported_features_notification_event: _,
le_meta_event: _,
reserved_map: %{
(13..14) => reserved_13_to_14,
(18..18) => reserved_18,
(35..42) => reserved_35_to_42,
(54..54) => reserved_54,
(57..57) => reserved_57,
(62..63) => reserved_62_to_63
}
} = decoded_event_mask
}) do
encoded_event_mask =
Enum.into(decoded_event_mask, %{}, fn
{:reserved_map, reserved} -> {:reserved_map, reserved}
{key, true} -> {key, 1}
{key, false} -> {key, 0}
end)
<<encoded_set_event_mask::little-size(64)>> = <<
reserved_62_to_63::size(2),
encoded_event_mask.le_meta_event::size(1),
encoded_event_mask.remote_host_supported_features_notification_event::size(1),
encoded_event_mask.keypress_notification_event::size(1),
encoded_event_mask.user_passkey_notification_event::size(1),
reserved_57::size(1),
encoded_event_mask.enhanced_flush_complete_event::size(1),
encoded_event_mask.link_supervision_timeout_changed_event::size(1),
reserved_54::size(1),
encoded_event_mask.simple_pairing_complete_event::size(1),
encoded_event_mask.remote_oob_data_request_event::size(1),
encoded_event_mask.user_passkey_request_event::size(1),
encoded_event_mask.user_confirmation_request_event::size(1),
encoded_event_mask.io_capability_response_event::size(1),
encoded_event_mask.io_capability_request_event::size(1),
encoded_event_mask.encryption_key_refresh_complete_event::size(1),
encoded_event_mask.extended_inquiry_result_event::size(1),
encoded_event_mask.sniff_subrating_event::size(1),
encoded_event_mask.synchronous_connection_changed_event::size(1),
encoded_event_mask.synchronous_connection_complete_event::size(1),
reserved_35_to_42::size(8),
encoded_event_mask.read_remote_extended_features_complete_event::size(1),
encoded_event_mask.inquiry_result_with_rssi_event::size(1),
encoded_event_mask.flow_specification_complete_event::size(1),
encoded_event_mask.page_scan_repition_mode_change_event::size(1),
encoded_event_mask.page_scan_mode_change_event::size(1),
encoded_event_mask.qos_violation_event::size(1),
encoded_event_mask.connection_packet_type_changed_event::size(1),
encoded_event_mask.read_clock_offset_complete_event::size(1),
encoded_event_mask.max_slots_change_event::size(1),
encoded_event_mask.data_buffer_overflow_event::size(1),
encoded_event_mask.loopback_command_event::size(1),
encoded_event_mask.link_key_notification_event::size(1),
encoded_event_mask.link_key_request_event::size(1),
encoded_event_mask.pin_code_request_event::size(1),
encoded_event_mask.return_link_keys_event::size(1),
encoded_event_mask.mode_change_event::size(1),
reserved_18::size(1),
encoded_event_mask.role_change_event::size(1),
encoded_event_mask.flush_occurred_event::size(1),
encoded_event_mask.hardware_error_event::size(1),
reserved_13_to_14::size(2),
encoded_event_mask.qos_setup_complete_event::size(1),
encoded_event_mask.read_remote_version_information_complete_event::size(1),
encoded_event_mask.read_remote_supported_features_complete_event::size(1),
encoded_event_mask.mast_link_key_complete_event::size(1),
encoded_event_mask.change_connection_link_key_complete_event::size(1),
encoded_event_mask.encryption_change_event::size(1),
encoded_event_mask.remote_name_request_complete_event::size(1),
encoded_event_mask.authentication_complete_event::size(1),
encoded_event_mask.disconnection_complete_event::size(1),
encoded_event_mask.connection_request_event::size(1),
encoded_event_mask.connection_complete_event::size(1),
encoded_event_mask.inquiry_result_event::size(1),
encoded_event_mask.inquiry_complete_event::size(1)
>>
{:ok, <<encoded_set_event_mask::size(64)>>}
end
@impl Command
def encode_return_parameters(%{status: status}), do: {:ok, <<status>>}
@doc """
Return a map ready for encoding.
Keys under `:event_mask` will be defaulted if not supplied.
## Options
`encoded` - `boolean()`. `false`. Whether the return value is encoded or not.
`:default` - `boolean()`. `false`. The default value for unspecified fields under the
`:event_mask` field.
"""
def new(%{event_mask: event_mask}, opts \\ []) do
default = Keyword.get(opts, :default, false)
with {:ok, mask} <- resolve_mask(event_mask, default) do
maybe_encode(%{event_mask: mask}, Keyword.get(opts, :encoded, false))
end
end
@impl Command
def ocf(), do: 0x01
defp maybe_encode(decoded_set_event_mask, true) do
encode(decoded_set_event_mask)
end
defp maybe_encode(decoded_set_event_mask, false), do: {:ok, decoded_set_event_mask}
defp resolve_mask(fields, default) do
truthy_reserved = %{
(13..14) => 3,
(18..18) => 1,
(35..42) => 255,
(54..54) => 1,
(57..57) => 1,
(62..63) => 3
}
falsey_reserved = %{
(13..14) => 0,
(18..18) => 0,
(35..42) => 0,
(54..54) => 0,
(57..57) => 0,
(62..63) => 0
}
reserved_default = if default, do: truthy_reserved, else: falsey_reserved
Enum.reduce_while(@fields, %{}, fn
:reserved_map, acc ->
case Map.fetch(fields, :reserved_map) do
{:ok, value} when is_integer(value) -> {:cont, Map.put(acc, :reserved_map, value)}
{:ok, _value} -> {:halt, {:error, :reserved_map}}
:error -> {:cont, Map.put(acc, :reserved_map, reserved_default)}
end
field, acc ->
{:cont, Map.put(acc, field, Map.get(fields, field, default))}
end)
|> case do
{:error, _} = e -> e
mask -> {:ok, mask}
end
end
end
|
src/lib/harald/hci/commands/controller_and_baseband/set_event_mask.ex
| 0.565539 | 0.454533 |
set_event_mask.ex
|
starcoder
|
defmodule Mechanize.Form do
@moduledoc """
Encapsulates all functionalities related to form handling and submission.
You can fetch a form from a page using `Mechanize.Page` module:
```
form = Page.form_with(page, name: "login")
```
"""
alias Mechanize.Page.Element
alias Mechanize.Form.{
TextInput,
ArbitraryField,
Checkbox,
ParameterizableField,
RadioButton,
SubmitButton,
Checkbox,
ImageInput,
SelectList
}
alias Mechanize.Query
@derive [Mechanize.Page.Elementable]
@enforce_keys [:element]
defstruct element: nil,
fields: []
@typedoc """
The HTML Form struct.
"""
@type t :: %__MODULE__{
element: Element.t(),
fields: list()
}
@doc false
def new(page, element) do
%Mechanize.Form{element: element, fields: parse_fields(page, element)}
end
@doc false
def put_field(form, field, value) do
put_field(form, ArbitraryField.new(field, value))
end
def put_field(form, field) do
%__MODULE__{form | fields: [field | form.fields]}
end
@doc """
Returns all fields from the given form.
"""
@spec fields(t()) :: list()
def fields(nil) do
raise ArgumentError, "form is nil"
end
def fields(form) do
form.fields
end
@doc """
Returns a list of text inputs or an empty list if no text inputs are found.
See related `fill_text/2`.
"""
@spec text_inputs(t()) :: [TextInput.t()]
defdelegate text_inputs(form), to: TextInput, as: :text_inputs_with
@doc """
Returns a list of text inputs matching the given `query`.
An empty list is returned in case no text input is matched by the given `query`.
See related `fill_text/2`.
## Example
Returns all text inputs with name "download".
```
Form.text_inputs(form, name: "download")
```
"""
@spec text_inputs_with(t(), Query.t()) :: [TextInput.t()]
defdelegate text_inputs_with(form, query), to: TextInput
@doc """
Fill a text input with a given value.
Text inputs are all inputs that can store text, not just limited to inputs of the `type="text"`.
Mechanize treats color, date, datetime, email, hidden, month, number, password, range, search,
tel, text, time, url, week and textarea as text inputs.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Example
You can fill a login form like this:
```
form
|> Form.fill_text(name: "username", with: "<EMAIL>")
|> Form.fill_text(name: "password", with: "<PASSWORD>")
|> Form.submit!()
```
"""
@spec fill_text(t(), Query.t()) :: t()
defdelegate fill_text(form, query), to: TextInput
@doc """
Returns a list of checkboxes or an empty list if no checkboxes are found.
See related `check_checkbox/2` and `uncheck_checkbox/2`.
"""
@spec checkboxes(t()) :: [Checkbox.t()]
defdelegate checkboxes(form), to: Checkbox, as: :checkboxes_with
@doc """
Returns a list of checkboxes matching the given `query`.
An empty list is returned in case no checkbox is matched by the given `query`.
See related `check_checkbox/2` and `uncheck_checkbox/2`.
## Example
Returns all checkboxes with name "download".
```
Form.text_inputs(form, name: "download")
```
"""
@spec checkboxes_with(t(), Query.t()) :: [Checkbox.t()]
defdelegate checkboxes_with(form, query), to: Checkbox
@doc """
Check all checkboxes matching the given query.
Raises `Mechanize.Query.BadQueryError` if no checkbox is matched by the query.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Example
You can check a checkbox and submit the for after:
```
form
|> Form.check_checkbox(name: "subscribe", value: "yes")
|> Form.submit!()
```
"""
@spec check_checkbox(t(), Query.t()) :: t()
defdelegate check_checkbox(form, query), to: Checkbox
@doc """
Uncheck all checkboxes matching the given query.
Raises `Mechanize.Query.BadQueryError` if no checkbox is matched by the query.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Example
You can uncheck a checkbox and submit the for after:
```
form
|> Form.uncheck_checkbox(name: "subscribe", value: "yes")
|> Form.submit!()
```
"""
@spec uncheck_checkbox(t(), Query.t()) :: t()
defdelegate uncheck_checkbox(form, query), to: Checkbox
@doc """
Returns a list of image inputs or an empty list if no image input are found.
See related `click_image!/2`.
"""
@spec image_inputs(t()) :: [ImageInput.t()]
defdelegate image_inputs(form), to: ImageInput, as: :image_inputs_with
@doc """
Returns a list of image inputs matching the given `query`.
An empty list is returned in case no image input is matched by the given `query`.
See related `click_image!/2`.
## Example
Returns all image inputs with name "america".
```
Form.image_inputs_with(form, name: "america")
```
"""
@spec image_inputs_with(t(), Query.t()) :: [ImageInput.t()]
defdelegate image_inputs_with(form, query), to: ImageInput
@doc """
Clicks on a image input matching the given query.
Mechanize submits the form when an image input is clicked and a `Mechanize.Page` struct is
returned as the result.
Raises `Mechanize.Query.BadQueryError` if none or more than one image input is matched by query.
Raises additional exceptions from `Mechanize.Browser.request!/5`.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Example
You can click on an image input:
```
Form.click_image!(form, name: "america")
```
You can also send x,y coordinates of the click:
```
Form.click_image!(form, name: "america", x: 120, y: 120)
```
"""
@spec click_image!(t(), Query.t()) :: Page.t()
defdelegate click_image!(form, query), to: ImageInput
@doc """
Returns a list of radio buttons or an empty list if no radio buttons are found.
See related `check_radio_button/2` and `uncheck_radio_button/2`.
"""
@spec radio_buttons(t()) :: [RadioButton.t()]
defdelegate radio_buttons(form), to: RadioButton, as: :radio_buttons_with
@doc """
Returns a list of radio buttons matching the given `query`.
An empty list is returned in case no radio button is matched by the given `query`.
See related `check_radio_button/2` and `uncheck_radio_button/2`.
## Example
Returns all radio buttons with name "subscribe".
```
Form.radio_buttons_with(form, name: "subscribe")
```
"""
@spec radio_buttons_with(t(), Query.t()) :: [RadioButton.t()]
defdelegate radio_buttons_with(form, query), to: RadioButton
@doc """
Checks a radio button matching the given query.
When you check a radio button, Mechanize does the job to uncheck all radios from the same radio
group (i.e. same name attribute) before check the radio button in the query.
Raises `Mechanize.Query.BadQueryError` if no radio button is matched by query. Also raises if
two or more radio buttons from the same radio group are checked by the query.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Example
Checks a radio button and submit the form:
```
form
|> Form.check_checkbox(name: "subscribe", value: "yes")
|> Form.submit!()
```
"""
@spec check_radio_button(t(), Query.t()) :: t()
defdelegate check_radio_button(form, query), to: RadioButton
@doc """
Unchecks a radio button matching the given query.
Raises `Mechanize.Query.BadQueryError` if no radio button is matched by query.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Example
Unchecks a radio button and submit the form:
```
form
|> Form.uncheck_checkbox(name: "subscribe", value: "yes")
|> Form.submit!()
```
"""
@spec uncheck_radio_button(t(), Query.t()) :: t()
defdelegate uncheck_radio_button(form, query), to: RadioButton
@doc """
Returns a list of selects or an empty list if no selects are found.
See related `select/2` and `unselect/2`.
"""
@spec select_lists(t()) :: [SelectList.t()]
defdelegate select_lists(form), to: SelectList, as: :select_lists_with
@doc """
Returns a list of selects matching the given `query`.
An empty list is returned in case no selects is matched by the given `query`.
See related `select/2` and `unselect/2`.
## Example
Returns all selects with name "category".
```
Form.select_lists_with(form, name: "category")
```
"""
@spec select_lists_with(t(), Query.t()) :: [SelectList.t()]
defdelegate select_lists_with(form, query), to: SelectList
@doc """
Selects an option from select list matching the given query.
In case of selects without `multiple` attribute, Mechanize does the job to unselect all
options from the same select list before it selects the given option.
Raises `Mechanize.Query.BadQueryError` if no select or option is matched by query. Also raises
when two or more options from the same select list are selected by the query and `multiple`
attribute is not present.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Examples
Selects an `option` with text "Option 1" on a `select` with `name="select1"`.
```elixir
Form.select(form, name: "select1", option: "Option 1")
```
Select by `value` attribute:
```elixir
Form.select(form, name: "select1", option: [value: "1"])
```
Or select the third option of a `select` (note that Mechanize uses a zero-based index):
```elixir
Form.select(form, name: "select1", option: 2)
```
"""
@spec select(t(), Query.t()) :: t()
defdelegate select(form, query), to: SelectList
@doc """
Unselects an option from select list matching the given query.
Raises `Mechanize.Query.BadQueryError` if no select or option is matched by query.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Examples
By `option` with text "Option 1" on a `select` with `name="select1"`.
```elixir
Form.select(form, name: "select1", option: "Option 1")
```
By `value` attribute:
```elixir
Form.select(form, name: "select1", option: [value: "1"])
```
Or unselect the third option of a `select` (note that Mechanize uses a zero-based index):
```elixir
Form.select(form, name: "select1", option: 2)
```
"""
@spec unselect(t(), Query.t()) :: t()
defdelegate unselect(form, query), to: SelectList
@doc """
Returns a list of submit buttons or an empty list if no submit buttons are found.
See related `select/2` and `unselect/2`.
"""
@spec submit_buttons(t()) :: [SubmitButton.t()]
defdelegate submit_buttons(form), to: SubmitButton, as: :submit_buttons_with
@doc """
Returns a list of submit buttons matching the given `query`.
An empty list is returned in case no submit button is matched by the given `query`.
See related `click_button!/2`.
## Example
Returns all submit buttons with name "send".
```
Form.submit_buttons_with(form, name: "send")
```
"""
@spec submit_buttons_with(t(), Query.t()) :: [SubmitButton.t()]
defdelegate submit_buttons_with(form, query), to: SubmitButton
@doc """
Clicks on a submit button matching the given query.
Mechanize submits the form when an submit button is clicked and a `Mechanize.Page` struct is
returned as the result.
Raises `Mechanize.Query.BadQueryError` if none or more than one submit button is matched by
query.
Raises additional exceptions from `Mechanize.Browser.request!/5`.
See `Mechanize.Query` module documentation to know all query capabilities in depth.
## Example
You can click on an submit button by its visible text:
```
SubmitButton.click_button!(form, "OK")
```
You can also click by attribute name:
```
SubmitButton.click_button!(form, name: "submit1")
```
Fill a login form and submit by clicking in "OK" submit button:
```
form
|> Form.fill_text(name: "username", with: "<EMAIL>")
|> Form.fill_text(name: "password", with: "<PASSWORD>")
|> Form.click_button!("OK")
```
"""
@spec click_button!(t(), Query.t()) :: Page.t()
defdelegate click_button!(form, query), to: SubmitButton
@doc """
Submits the given form.
Mechanize submits the form and a `Mechanize.Page` struct is returned as the result.
To simulate a form submited by a button click, pass the button as the second parameter
or use any of our helper functions `click_button!/2` or
`click_image!/2`. To simulate a form submited by enter key press, ignore the
second parameter.
Raises additional exceptions from `Mechanize.Browser.request!/5`.
## Example
Simulate a login form submission by pressing "enter":
```
form
|> Form.fill_text(name: "username", with: "<EMAIL>")
|> Form.fill_text(name: "password", with: "<PASSWORD>")
|> Form.submit!()
```
Simulate a login form submission by clicking the submit button:
```
button =
form
|> Form.submit_buttons()
|> List.first()
form
|> Form.fill_text(name: "username", with: "<EMAIL>")
|> Form.fill_text(name: "password", with: "<PASSWORD>")
|> Form.submit!(button)
```
See `click_button!/2` for a simpler way to do this.
"""
@spec submit!(t(), SubmitButton.t() | ImageInput.t(), keyword()) :: Page.t()
def submit!(form, button \\ nil, opts \\ []) do
{options, _opts} = Keyword.pop(opts, :options, [])
case method(form) do
:post ->
Mechanize.Browser.request!(
browser(form),
:post,
action_url(form),
{:form, params(form.fields, button)},
opts
)
:get ->
Mechanize.Browser.request!(
browser(form),
:get,
action_url(form),
"",
params: params(form.fields, button), options: options
)
end
end
defp method(form) do
method =
form
|> Element.attr(:method)
|> Kernel.||("")
|> String.trim()
|> String.downcase()
if method == "post", do: :post, else: :get
end
defp action_url(form) do
form
|> Element.attr(:action)
|> Kernel.||("")
|> String.trim()
|> (&URI.merge(form.element.page.url, &1)).()
|> URI.to_string()
end
defp params(fields, button) do
fields
|> Enum.reject(&is_submit?/1)
|> maybe_add_clicked_button(button)
|> Enum.reject(fn f -> Element.attr_present?(f, :disabled) or f.name == nil end)
|> Enum.flat_map(&ParameterizableField.to_param/1)
end
defp is_submit?(field) do
match?(%SubmitButton{}, field) or match?(%ImageInput{}, field)
end
defp maybe_add_clicked_button(params, nil), do: params
defp maybe_add_clicked_button(params, button), do: [button | params]
defp browser(form) do
form.element.page.browser
end
defp parse_fields(page, element) do
element
|> parse_inner_fields()
|> parse_outer_fields(page, element)
|> Enum.map(&create_field/1)
|> Enum.reject(&is_nil/1)
end
defp parse_inner_fields(element) do
Query.search(element, "input, textarea, button, select")
end
defp parse_outer_fields(fields, page, element) do
case Element.attr(element, :id) do
nil ->
fields
form_id ->
page
|> Query.filter_out(~s(form[id="#{form_id}"]))
|> Query.search(~s([form="#{form_id}"]))
|> Kernel.++(fields)
end
end
defp create_field(el) do
tag = Element.name(el)
type = Element.attr(el, :type, normalize: true)
cond do
type == "reset" ->
nil
tag == "button" and (type == "submit" or type == nil or type == "") ->
SubmitButton.new(el)
tag == "input" and type == "radio" ->
RadioButton.new(el)
tag == "input" and type == "checkbox" ->
Checkbox.new(el)
tag == "input" and type == "submit" ->
SubmitButton.new(el)
tag == "input" and type == "image" ->
ImageInput.new(el)
tag == "textarea" or tag == "input" ->
TextInput.new(el)
tag == "select" ->
SelectList.new(el)
true ->
nil
end
end
end
|
lib/mechanize/form.ex
| 0.930655 | 0.909184 |
form.ex
|
starcoder
|
defmodule Day10 do
@challange_input "lib/input.txt"
alias Day10.{
Parser,
ParseResult,
Node,
Node.Configuration,
Node.Supervisor
}
def solve do
parsed = Parser.parse(read_input())
# extract & spawn the output nodes
outputs = extract_outputs(parsed)
Supervisor.spawn_nodes(:output, outputs)
# extract & spawn the bot nodes
bots = extract_bots(parsed)
Supervisor.spawn_nodes(:bot, bots)
# once we've spawned the bots and outputs, assign the chips to the bots.
process_assignments(parsed)
# get answer of part B
:output
|> collect_chips_from_nodes([0, 1, 2])
|> multiply()
|> IO.inspect(label: "Answer of part B")
end
defp collect_chips_from_nodes(type, nodes) do
nodes
|> Enum.map(&Node.node_name(type, &1))
|> Enum.flat_map(&Node.list/1)
end
defp multiply(list_of_integers) do
Enum.reduce(list_of_integers, 1, fn integer, total -> integer * total end)
end
# process_assignments/1 takes the %ParseResult's assignments and processes
# them by sending the correct values to the bots.
defp process_assignments(%ParseResult{chip_assignments: assignments}) do
for assignment <- assignments do
node_name =
Node.node_name(
assignment.target_address.type,
assignment.target_address.identifier
)
Node.receive(node_name, assignment.value)
end
end
defp extract_bots(%ParseResult{nodes: bots}) do
bots
|> uniqify_by(:identifier)
|> assign_bot_configuration()
end
# iterate over the bots and parse their `%Configuration{}`. Convert into
# a list with two element tuples so the bots can be spawned with their
# associated configurations.
defp assign_bot_configuration(bots) do
Enum.map(bots, fn bot ->
high_destination =
Node.node_name(
bot.high_destination.type,
bot.high_destination.identifier
)
low_destination =
Node.node_name(
bot.low_destination.type,
bot.low_destination.identifier
)
configuration =
Configuration.new(
low_destination,
high_destination
)
{bot, configuration}
end)
end
# From the ParseResult get the nodes and return a list of nodes where type
# equals `:output`. The list is made unique by looking at the identifier.
defp extract_outputs(%ParseResult{nodes: nodes}) do
nodes
|> get_destination_addresses()
|> filter_node_type(:output)
|> uniqify_by(:identifier)
end
defp get_destination_addresses(nodes) do
Enum.flat_map(nodes, fn node ->
[node.low_destination, node.high_destination]
end)
end
defp filter_node_type(nodes, type) do
Enum.filter(nodes, fn node ->
node.type == type
end)
end
defp uniqify_by(nodes, field) do
Enum.uniq_by(nodes, &Map.get(&1, field))
end
defp read_input(path \\ @challange_input) do
File.read!(path)
end
end
|
advent-of-code-2016/day_10/lib/day10.ex
| 0.649245 | 0.555616 |
day10.ex
|
starcoder
|
if Code.ensure_loaded?(Plug) do
defmodule Shapt.Plug do
@moduledoc """
This plug provides two endpoints:
- GET that will that will return the current value of your toggles on runtime.
- POST that will reload the current value of your toggles on runtime.
```
plug Shapt.Plug,
path: "/toggles",
modules: [TestModule]
```
"""
use Plug.Router
plug(:match)
plug(:dispatch, builder_opts())
get _ do
with true <- conn.request_path == opts[:path],
true <- Enum.all?(opts[:modules], &Code.ensure_loaded?/1),
true <- Enum.all?(opts[:modules], &(&1 |> Process.whereis() |> is_pid())) do
opts[:modules]
|> Enum.map(&{&1, &1.all_values()})
|> prepare_response(conn, 200, opts[:formatter])
else
_any ->
conn
end
end
post _ do
with true <- conn.request_path == opts[:path],
true <- Enum.all?(opts[:modules], &Code.ensure_loaded?/1) do
opts[:modules]
|> Enum.map(& &1.reload())
opts[:modules]
|> Enum.map(&{&1, &1.all_values()})
|> prepare_response(conn, 201, opts[:formatter])
else
_any ->
conn
end
end
match _ do
conn
end
defp halt_with_response(conn, type, status, body) do
conn
|> halt
|> put_resp_content_type(type)
|> send_resp(status, body)
end
defp prepare_response(modules, conn, status, Jason) do
body = format_jason(modules)
halt_with_response(conn, "application/json", status, body)
end
defp prepare_response(modules, conn, status, Poison) do
body = format_poison(modules)
halt_with_response(conn, "application/json", status, body)
end
defp prepare_response(modules, conn, status, _formatter) do
body = format_text(modules)
halt_with_response(conn, "text/plain", status, body)
end
defp format_text(modules) do
modules
|> Enum.map(&format_string/1)
|> Enum.join("\n")
end
defp format_string({mod, keys}) do
"#{inspect(mod)}: #{inspect(keys, pretty: true, width: 20)}"
end
defp format_jason(modules) do
modules
|> Enum.map(fn {k, v} -> {inspect(k), v} end)
|> Enum.into(%{})
|> Jason.encode!(escape: :html_safe, pretty: true)
end
defp format_poison(modules) do
modules
|> Enum.map(fn {k, v} -> {inspect(k), v} end)
|> Enum.into(%{})
|> Poison.encode!()
end
end
end
|
lib/shapt/plug.ex
| 0.679072 | 0.636678 |
plug.ex
|
starcoder
|
defmodule Day8 do
@moduledoc """
--- Day 8: I Heard You Like Registers ---
You receive a signal directly from the CPU. Because of your recent assistance with jump instructions, it would like
you to compute the result of a series of unusual register instructions.
Each instruction consists of several parts: the register to modify, whether to increase or decrease that register's
value, the amount by which to increase or decrease it, and a condition. If the condition fails, skip the instruction
without modifying the register. The registers all start at 0. The instructions look like this:
b inc 5 if a > 1
a inc 1 if b < 5
c dec -10 if a >= 1
c inc -20 if c == 10
These instructions would be processed as follows:
Because a starts at 0, it is not greater than 1, and so b is not modified.
a is increased by 1 (to 1) because b is less than 5 (it is 0).
c is decreased by -10 (to 10) because a is now greater than or equal to 1 (it is 1).
c is increased by -20 (to -10) because c is equal to 10.
After this process, the largest value in any register is 1.
You might also encounter <= (less than or equal to) or != (not equal to). However, the CPU doesn't have the bandwidth
to tell you what all the registers are named, and leaves that to you to determine.
What is the largest value in any register after completing the instructions in your puzzle input?
--- Part Two ---
To be safe, the CPU also needs to know the highest value held in any register during this process so that it can
decide how much memory to allocate to these operations. For example, in the above instructions, the highest value
ever held was 10 (in register c after the third instruction was evaluated).
"""
defp common_part(s) do
s |>
String.split("\n") |>
Enum.map(&String.split/1) |>
List.foldl(Map.new, fn(l, acc) -> process_instruction(l, acc) end)
end
def part_a do
File.read!("res/day8.input") |>
common_part |>
Enum.max_by(fn({_k, {v,_maxv}}) -> v end)
end
def test_a do
File.read!("res/day8_test.input") |>
common_part |>
Enum.max_by(fn({_k, {v,_maxv}}) -> v end)
end
def part_b do
File.read!("res/day8.input") |>
common_part |>
Enum.max_by(fn({_k, {_v,maxv}}) -> maxv end)
end
def test_b do
File.read!("res/day8_test.input") |>
common_part |>
Enum.max_by(fn({_k, {_v,maxv}}) -> maxv end)
end
defp process_instruction([reg1, instruction, value1, "if", reg2, compare, value2], registers) do
case compare_regs(Map.get(registers, reg2, {0,0}), String.to_integer(value2), compare) do
true ->
do_instruction(reg1, value1, instruction, registers)
false ->
registers
end
end
defp process_instruction([], registers) do
registers
end
defp do_instruction(reg1, newvalue, "inc", registers) do
{oldvalue,oldmaxval}=Map.get(registers, reg1, {0,0})
sumval=oldvalue+String.to_integer(newvalue)
newmaxval=case sumval > oldmaxval do
true ->
sumval
false ->
oldmaxval
end
Map.put(registers, reg1, {sumval, newmaxval})
end
defp do_instruction(reg1, newvalue, "dec", registers) do
{oldvalue,oldmaxval}=Map.get(registers, reg1, {0,0})
sumval=oldvalue-String.to_integer(newvalue)
newmaxval=case sumval > oldmaxval do
true ->
sumval
false ->
oldmaxval
end
Map.put(registers, reg1, {sumval, newmaxval})
end
defp compare_regs({reg,_}, value, ">") do
reg > value
end
defp compare_regs({reg,_}, value, "<") do
reg < value
end
defp compare_regs({reg,_}, value, "==") do
reg === value
end
defp compare_regs({reg,_}, value, ">=") do
reg >= value
end
defp compare_regs({reg,_}, value, "<=") do
reg <= value
end
defp compare_regs({reg,_}, value, "!=") do
reg != value
end
end
|
lib/day8.ex
| 0.712632 | 0.725211 |
day8.ex
|
starcoder
|
defmodule Hexate do
@moduledoc """
A simple module to convert to and from hex encoded strings.
Encodes / decodes both char-lists and strings.
"""
@doc """
Returns a hex encoded string from a char-list, string or integer.
## Examples
iex> Hexate.encode("This is a test.")
"54686973206973206120746573742e"
iex> Hexate.encode('This is a test.')
"54686973206973206120746573742e"
iex> Hexate.encode(123456)
"1e240"
iex> Hexate.encode(15, 4)
"000f"
iex> Hexate.encode(15.0, 2)
"0f"
iex> Hexate.encode(15.0)
"f"
"""
def encode(int, digits \\ 1)
def encode(int, digits) when is_integer(int) do
int
|> Integer.to_string(16)
|> String.downcase
|> String.pad_leading(digits, "0")
end
def encode(float, digits) when is_float(float) do
encode(round(float), digits)
end
def encode(str, _digits) when is_binary(str) do
str
|> binary_to_hex_list
|> IO.iodata_to_binary
end
def encode(str, _digits) when is_list(str) do
str
|> list_to_hex
|> IO.iodata_to_binary
end
@doc """
Returns a hex encoded list from a char-list, string or integer.
## Examples
iex> Hexate.encode_to_list("This is a test.")
'54686973206973206120746573742e'
iex> Hexate.encode_to_list('This is a test.')
'54686973206973206120746573742e'
iex> Hexate.encode_to_list(123456)
'1e240'
"""
def encode_to_list(str) when is_binary(str) do
binary_to_hex_list(str)
end
def encode_to_list(str) when is_list(str) do
list_to_hex(str)
end
def encode_to_list(int) when is_integer(int) do
int
|> Integer.to_charlist(16)
|> :string.to_lower
end
@doc """
Returns a decoded binary from a hex string in either char-list
or string form.
## Examples
iex> Hexate.decode("54686973206973206120746573742e")
"This is a test."
iex> Hexate.decode('54686973206973206120746573742e')
"This is a test."
"""
def decode(hex_str) when is_binary(hex_str) do
hex_str
|> :binary.bin_to_list
|> hex_str_to_list
|> IO.iodata_to_binary
end
def decode(hex_str) when is_list(hex_str) do
hex_str
|> hex_str_to_list
|> IO.iodata_to_binary
end
@doc """
Returns a decoded char-list from a hex string in either char-list
or string form.
## Examples
iex> Hexate.decode_to_list("54686973206973206120746573742e")
'This is a test.'
iex> Hexate.decode_to_list('54686973206973206120746573742e')
'This is a test.'
"""
def decode_to_list(hex_str) when is_binary(hex_str) do
hex_str
|> :binary.bin_to_list
|> hex_str_to_list
end
def decode_to_list(hex_str) when is_list(hex_str) do
hex_str_to_list(hex_str)
end
@doc """
Returns an integer representation of a given string of hex,
taking a char-list or a string as an argument.
## Examples
iex> Hexate.to_integer('54686973206973206120746573742e')
438270661302729020147902120434299950
iex> Hexate.to_integer("54686973206973206120746573742e")
438270661302729020147902120434299950
"""
def to_integer(hex_str) when is_list(hex_str) do
List.to_integer(hex_str, 16)
end
def to_integer(hex_str) when is_binary(hex_str) do
String.to_integer(hex_str, 16)
end
defp binary_to_hex_list(str) do
str
|> :binary.bin_to_list
|> list_to_hex
end
defp hex_str_to_list([]) do
[]
end
defp hex_str_to_list([x, y | tail]) do
[to_int(x) * 16 + to_int(y) | hex_str_to_list(tail)]
end
defp list_to_hex([]) do
[]
end
defp list_to_hex([head | tail]) do
to_hex_str(head) ++ list_to_hex(tail)
end
defp to_hex_str(n) when n < 256 do
[to_hex(div(n, 16)), to_hex(rem(n, 16))]
end
defp to_hex(i) when i < 10 do
0 + i + 48
end
defp to_hex(i) when i >= 10 and i < 16 do
?a + (i - 10)
end
defp to_int(c) when ?0 <= c and c <= ?9 do
c - ?0
end
defp to_int(c) when ?A <= c and c <= ?F do
c - ?A + 10
end
defp to_int(c) when ?a <= c and c <= ?f do
c - ?a + 10
end
end
|
lib/hexate.ex
| 0.88754 | 0.486027 |
hexate.ex
|
starcoder
|
defmodule Versionary.Plug.VerifyHeader do
@moduledoc """
Use this plug to verify a version string in the header.
## Example
```
plug Versionary.Plug.VerifyHeader, versions: ["application/vnd.app.v1+json"]
```
If multiple versions are passed to this plug and at least one matches the
version will be considered valid.
## Example
```
plug Versionary.Plug.VerifyHeader, versions: ["application/vnd.app.v1+json",
"application/vnd.app.v2+json"]
```
It's also possible to verify versions against configured mime types. If
multiple mime types are passed and at least one matches the version will be
considered valid.
## Example
```
config :mime, :types, %{
"application/vnd.app.v1+json" => [:v1]
}
```
```
plug Versionary.Plug.VerifyHeader, accepts: [:v1]
```
By default, this plug will look at the `Accept` header for the version string
to verify against. If you'd like to verify against another header specify the
header you'd like to verify against in the `header` option.
## Example
```
plug Versionary.Plug.VerifyHeader, header: "accept",
versions: ["application/vnd.app.v1+json"]
```
"""
import Plug.Conn
@default_header_opt "accept"
@doc false
def init(opts) do
%{
accepts: opts[:accepts] || [],
header: opts[:header] || @default_header_opt,
versions: opts[:versions] || []
}
end
@doc false
def call(conn, opts) do
conn
|> verify_version(opts)
|> store_version(opts)
end
# private
defp get_all_versions(opts) do
opts[:versions] ++ get_mime_versions(opts)
end
defp get_mime_versions(%{accepts: accepts}), do: get_mime_versions(accepts)
defp get_mime_versions([h|t]), do: [MIME.type(h)] ++ get_mime_versions(t)
defp get_mime_versions([]), do: []
defp get_mime_versions(nil), do: []
defp get_version(conn, opts) do
case get_req_header(conn, opts[:header]) do
[] -> nil
[version] -> version
end
end
defp verify_version(conn, opts) do
verified = Enum.member?(get_all_versions(opts), get_version(conn, opts))
conn
|> put_private(:version_verified, verified)
end
defp store_version(conn, opts) do
raw_version = get_version(conn, opts)
do_store_version(conn, raw_version)
end
defp do_store_version(conn, nil), do: conn
defp do_store_version(conn, raw_version) do
version = MIME.extensions(raw_version)
conn
|> put_private(:version, version)
|> put_private(:raw_version, raw_version)
end
end
|
lib/versionary/plug/verify_header.ex
| 0.877496 | 0.848471 |
verify_header.ex
|
starcoder
|
defmodule Fxnk.Map do
@moduledoc """
`Fxnk.Map` are functions that work with maps.
"""
import Fxnk.Functions, only: [curry: 1]
@doc """
Curried `assemble/2`
## Examples
iex> map = %{red: "red", green: "green", blue: "blue" }
iex> fnmap = %{
...> red: Fxnk.Flow.compose([&String.upcase/1, Fxnk.Map.prop(:red)]),
...> blue: Fxnk.Flow.compose([&String.reverse/1, Fxnk.Map.prop(:blue)])
...> }
iex> assembler = Fxnk.Map.assemble(fnmap)
iex> assembler.(map)
%{red: "RED", blue: "eulb"}
"""
@spec assemble(%{any() => function()}) :: (map() -> map())
def assemble(fn_map) do
fn map -> assemble(map, fn_map) end
end
@doc """
Takes an initial map and a "builder" map where each value is a function. Builds a new map by setting the keys in the function map to
the values returned by the function applied to the original map.
## Examples
iex> map = %{red: "red", green: "green", blue: "blue" }
iex> fnmap = %{
...> red: Fxnk.Flow.compose([&String.upcase/1, Fxnk.Map.prop(:red)]),
...> blue: Fxnk.Flow.compose([&String.reverse/1, Fxnk.Map.prop(:blue)])
...> }
iex> Fxnk.Map.assemble(map, fnmap)
%{red: "RED", blue: "eulb"}
"""
@spec assemble(map(), %{any() => function()}) :: any()
def assemble(map, fn_map) do
fn_map
|> Map.to_list()
|> Enum.reduce(%{}, fn {key, function}, acc ->
Map.put_new(acc, key, function.(map))
end)
end
@doc """
Takes a map and a function that accepts a map and returns a map. Runs the map against the function and merges the initial map into the result.
## Examples
iex> map = %{red: "red", green: "green", blue: "blue"}
iex> colorCombiner = Fxnk.Map.combine(fn %{red: red, blue: blue} -> %{purple: red <> blue} end)
iex> colorCombiner.(map)
%{red: "red", green: "green", blue: "blue", purple: "redblue"}
"""
@spec combine((map() -> map())) :: (map() -> map())
def combine(function) do
fn map -> combine(map, function) end
end
@doc """
Takes a map and a function that accepts a map and returns a map. Runs the map against the function and merges the initial map into the result.
## Examples
iex> map = %{red: "red", green: "green", blue: "blue"}
iex> colorCombiner = Fxnk.Functions.always(%{purple: "purple"})
iex> Fxnk.Map.combine(map, colorCombiner)
%{red: "red", green: "green", blue: "blue", purple: "purple"}
"""
@spec combine(map(), (map() -> map())) :: map()
def combine(map, function) do
Map.merge(function.(map), map)
end
@doc """
`combine/2` but also accepts a combining function as the last arguments.
## Examples
iex> map = %{colors: %{red: "red", green: "green", blue: "blue"}}
iex> colorCombiner = Fxnk.Functions.always(%{colors: %{red: "fire red", purple: "purple"}})
iex> Fxnk.Map.combine_with(map, colorCombiner, &Fxnk.Map.merge_deep_right/2)
%{colors: %{red: "fire red", green: "green", blue: "blue", purple: "purple"}}
"""
@spec combine_with(map(), (map() -> map()), (map(), map() -> map())) :: map()
def combine_with(map, function, combining_function) do
apply(combining_function, [map, function.(map)])
end
@doc """
Return a specific element in a nested map. If the path does not exist, returns the orignal map.
## Examples
iex> map = %{one: %{two: %{three: "three" }}}
iex> Fxnk.Map.path(map, [:one, :two, :three])
"three"
iex> Fxnk.Map.path(map, [:one, :two])
%{three: "three"}
iex> Fxnk.Map.path(map, [:one, :four])
%{one: %{two: %{three: "three" }}}
"""
@spec path(map(), [binary() | atom()]) :: map() | any()
def path(map, path_array) do
do_path(map, path_array, map)
end
@doc """
Like `path/2`, but returns the `or_value` when the path is not found.
## Examples
iex> map = %{one: %{two: %{three: "three" }}}
iex> Fxnk.Map.path_or(map, [:one, :two, :three], :foo)
"three"
iex> Fxnk.Map.path_or(map, [:one, :two], :foo)
%{three: "three"}
iex> Fxnk.Map.path_or(map, [:one, :four], :foo)
:foo
"""
@spec path_or(map(), [binary() | atom()], any()) :: map() | any()
def path_or(map, path_array, or_value) do
do_path_or(map, path_array, or_value)
end
@doc """
Accepts a string `key` and returns a function that takes a `map`. Returns the map's value at `key` or `nil`.
## Examples
iex> getProp = Fxnk.Map.prop("foo")
iex> getProp.(%{"foo" => "foo", "bar" => "bar"})
"foo"
iex> getProp2 = Fxnk.Map.prop(:foo)
iex> getProp2.(%{foo: "foo", bar: "bar"})
"foo"
"""
@spec prop(atom() | binary()) :: (map() -> any())
def prop(key) when is_binary(key) or is_atom(key) do
curry(fn map -> prop(map, key) end)
end
@doc """
Accepts a map and a key. Returns the map's value at `key` or `nil`
## Examples
iex> Fxnk.Map.prop(%{"foo" => "foo", "bar" => "bar"}, "foo")
"foo"
iex> Fxnk.Map.prop(%{foo: "foo", bar: "bar"}, :foo)
"foo"
"""
@spec prop(map(), atom() | binary()) :: any()
def prop(map, key) when is_map(map) and (is_binary(key) or is_atom(key)) do
map[key]
end
@doc """
Accepts a list of keys and returns a function that takes a map. Returns a list of the values associated with the keys in the map.
## Examples
iex> getProps = Fxnk.Map.props(["foo", "bar"])
iex> getProps.(%{"foo" => "foo", "bar" => "bar", "baz" => "baz"})
["foo", "bar"]
iex> getProps2 = Fxnk.Map.props([:foo, :bar])
iex> getProps2.(%{foo: "foo", bar: "bar", baz: "baz"})
["foo", "bar"]
"""
@spec props([atom() | binary(), ...]) :: (map() -> [any(), ...])
def props(keys) when is_list(keys) do
curry(fn map -> props(map, keys) end)
end
@doc """
Accepts a map and a list of keys and returns a list of the values associated with the keys in the map.
## Examples
iex> Fxnk.Map.props(%{"foo" => "foo", "bar" => "bar", "baz" => "baz"}, ["foo", "bar"])
["foo", "bar"]
iex> Fxnk.Map.props(%{foo: "foo", bar: "bar", baz: "baz"}, [:foo, :bar])
["foo", "bar"]
"""
@spec props(map(), [atom() | binary(), ...]) :: [any(), ...]
def props(map, keys) when is_list(keys) and is_map(map) do
for key <- keys, do: prop(map, key)
end
@doc """
Curried `prop_equals/3`, takes a value, returns a function that accepts a map and a key.
## Examples
iex> isFoo = Fxnk.Map.prop_equals("foo")
iex> isFoo.(%{foo: "foo"}, :foo)
true
"""
@spec prop_equals(any()) :: (map(), atom() | String.t() -> boolean())
def prop_equals(value) do
fn map, key -> prop_equals(map, key, value) end
end
@doc """
Curried `prop_equals/3`, takes a key and a value. Returns a function that accepts a map.
## Examples
iex> isKeyFoo = Fxnk.Map.prop_equals(:foo, "foo")
iex> isKeyFoo.(%{foo: "foo"})
true
"""
@spec prop_equals(atom | binary, any) :: (map() -> boolean())
def prop_equals(key, value) when is_atom(key) or is_binary(key) do
fn map -> prop_equals(map, key, value) end
end
@doc """
Accepts a map, key and value. Checks to see if the key on the map is equal to the value.any()
## Examples
iex> Fxnk.Map.prop_equals(%{foo: "foo"}, :foo, "foo")
true
iex> Fxnk.Map.prop_equals(%{foo: "bar"}, :foo, "foo")
false
"""
@spec prop_equals(map(), atom() | binary(), any()) :: boolean()
def prop_equals(map, key, value) when is_map(map) and (is_binary(key) or is_atom(key)) do
map[key] === value
end
@doc """
Accepts a list of args, returns a curried `pick/2`.
## Examples
iex> pickArgs = Fxnk.Map.pick([:red, :blue, :orange])
iex> pickArgs.(%{ red: "RED", green: "GREEN", blue: "BLUE", yellow: "YELLOW" })
%{red: "RED", blue: "BLUE"}
"""
@spec pick([atom(), ...]) :: (map() -> map())
def pick(args) when is_list(args) do
curry(fn map -> pick(map, args) end)
end
@doc """
`pick/2` takes a `Map` and a `List` of atoms, and returns a map of only the selected keys that exist. It will
return an empty map if passed an empty map or an empty list.
## Examples
iex> Fxnk.Map.pick(%{ red: "RED", green: "GREEN", blue: "BLUE", yellow: "YELLOW" }, [:red, :blue, :orange])
%{red: "RED", blue: "BLUE"}
"""
@spec pick(map(), [atom(), ...]) :: map()
def pick(map, _) when map_size(map) == 0, do: map
def pick(_, []), do: %{}
def pick(map, args) when is_map(map) and is_list(args) do
do_pick(map, args, %{})
end
@doc """
Curried `has_prop?/2`
## Examples
iex> hasFoo = Fxnk.Map.has_prop?(:foo)
iex> hasFoo.(%{foo: 'foo'})
true
iex> hasFoo.(%{bar: 'bar'})
false
"""
@spec has_prop?(atom() | String.t()) :: (map() -> boolean())
def has_prop?(property) when is_binary(property) or is_atom(property) do
curry(fn map -> has_prop?(map, property) end)
end
@doc """
Takes a map and a property, returns `true` if the property has a value in the map, `false` otherwise.
## Examples
iex> Fxnk.Map.has_prop?(%{foo: "foo"}, :foo)
true
iex> Fxnk.Map.has_prop?(%{foo: "foo"}, :bar)
false
"""
@spec has_prop?(map(), atom() | String.t()) :: boolean()
def has_prop?(map, property) when is_map(map) and (is_binary(property) or is_atom(property)) do
prop(map, property) !== nil
end
@doc """
Merges two maps together, if both maps have the same key, the value on the right will be used.
## Example
iex> Fxnk.Map.merge_right(%{red: "red", blue: "blue"}, %{red: "orange", green: "green"})
%{red: "orange", blue: "blue", green: "green"}
"""
@spec merge_right(map(), map()) :: map()
def merge_right(map1, map2) do
Map.merge(map1, map2)
end
@doc """
Merges two maps together deeply. If both maps have the same key, the value on the right will be used.
If both keys are a map, the maps will be merged together recursively, preferring values on the right.
## Example
iex> map1 = %{red: "red", green: %{green: "green", yellowish: "greenish", with_blue: %{turqoise: "blueish green"}}, blue: "blue"}
iex> map2 = %{red: "orange", green: %{green: "blue and yellow", yellowish: "more yellow than green"}}
iex> Fxnk.Map.merge_deep_right(map1, map2)
%{red: "orange", green: %{green: "blue and yellow", yellowish: "more yellow than green", with_blue: %{turqoise: "blueish green"}}, blue: "blue"}
"""
@spec merge_deep_right(map(), map()) :: map()
def merge_deep_right(map1, map2) do
Map.merge(map1, map2, fn _, v1, v2 ->
if is_map(v1) and is_map(v2) do
merge_deep_right(v1, v2)
else
v2
end
end)
end
@doc """
Merges two maps together, if both maps have the same key, the value on the left will be used.
## Example
iex> Fxnk.Map.merge_left(%{red: "red", blue: "blue"}, %{red: "orange", green: "green"})
%{red: "red", blue: "blue", green: "green"}
"""
@spec merge_left(map(), map()) :: map()
def merge_left(map1, map2) do
Map.merge(map2, map1)
end
@doc """
Merges two maps together deeply. If both maps have the same key, the value on the left will be used.
If both keys are a map, the maps will be merged together recursively, preferring values on the left.
## Example
iex> map1 = %{red: "red", green: %{green: "green", yellowish: "greenish", with_blue: %{turqoise: "blueish green"}}, blue: "blue"}
iex> map2 = %{red: "orange", green: %{green: "blue and yellow", yellowish: "more yellow than green"}}
iex> Fxnk.Map.merge_deep_left(map1, map2)
%{red: "red", green: %{green: "green", yellowish: "greenish", with_blue: %{turqoise: "blueish green"}}, blue: "blue"}
"""
@spec merge_deep_left(map(), map()) :: map()
def merge_deep_left(map1, map2) do
merge_deep_right(map2, map1)
end
@doc """
Rename a key in a map, takes the map, current key and replacement key. Returns the original map with the updated key.
## Example
iex> Fxnk.Map.rename(%{id: "1234"}, :id, :user_id)
%{user_id: "1234"}
iex> Fxnk.Map.rename(%{hello: "world", foo: "foo" }, :foo, :bar)
%{hello: "world", bar: "foo"}
"""
@spec rename(map(), String.t() | atom(), String.t() | atom()) :: map()
def rename(map, key, new_key) do
{value, popped_map} = Access.pop(map, key)
Map.merge(popped_map, %{new_key => value})
end
@doc """
Rename multiple keys in a map. Takes the original map and a map where the key is the original key and the value is the replacement key.
## Example
iex> Fxnk.Map.rename_all(%{user_id: "1234", foo: "foo", bar: "bar"}, %{user_id: :id, bar: :baz})
%{id: "1234", foo: "foo", baz: "bar"}
"""
@spec rename_all(map(), map()) :: map()
def rename_all(map, renames) do
renames
|> Map.to_list()
|> Enum.reduce(map, fn {old, new}, acc -> rename(acc, old, new) end)
end
defp do_pick(_, [], acc), do: acc
defp do_pick(map, [hd | tl], acc) do
case Map.fetch(map, hd) do
{:ok, val} -> do_pick(map, tl, Map.put(acc, hd, val))
_ -> do_pick(map, tl, acc)
end
end
defp do_path(map, _, nil), do: map
defp do_path(_, [], acc), do: acc
defp do_path(map, [hd | tl], acc), do: do_path(map, tl, prop(acc, hd))
defp do_path_or(nil, _, default_to), do: default_to
defp do_path_or(map, [], _), do: map
defp do_path_or(map, [hd | tl], default_to), do: do_path_or(prop(map, hd), tl, default_to)
end
|
lib/fxnk/map.ex
| 0.921176 | 0.531027 |
map.ex
|
starcoder
|
defmodule Strava.DetailedActivity do
@moduledoc """
"""
@derive [Poison.Encoder]
defstruct [
:id,
:external_id,
:upload_id,
:athlete,
:name,
:distance,
:moving_time,
:elapsed_time,
:total_elevation_gain,
:elev_high,
:elev_low,
:type,
:start_date,
:start_date_local,
:timezone,
:start_latlng,
:end_latlng,
:achievement_count,
:kudos_count,
:comment_count,
:athlete_count,
:photo_count,
:total_photo_count,
:map,
:trainer,
:commute,
:manual,
:private,
:flagged,
:workout_type,
:average_speed,
:max_speed,
:has_kudoed,
:gear_id,
:kilojoules,
:average_watts,
:device_watts,
:max_watts,
:weighted_average_watts,
:description,
:photos,
:gear,
:calories,
:segment_efforts,
:device_name,
:embed_token,
:splits_metric,
:splits_standard,
:laps,
:best_efforts
]
@type t :: %__MODULE__{
id: integer(),
external_id: String.t(),
upload_id: integer(),
athlete: Strava.MetaAthlete.t(),
name: String.t(),
distance: float(),
moving_time: integer(),
elapsed_time: integer(),
total_elevation_gain: float(),
elev_high: float(),
elev_low: float(),
type: String.t(),
start_date: DateTime.t(),
start_date_local: DateTime.t(),
timezone: String.t(),
start_latlng: list(float()),
end_latlng: list(float()),
achievement_count: integer(),
kudos_count: integer(),
comment_count: integer(),
athlete_count: integer(),
photo_count: integer(),
total_photo_count: integer(),
map: Strava.PolylineMap.t(),
trainer: boolean(),
commute: boolean(),
manual: boolean(),
private: boolean(),
flagged: boolean(),
workout_type: integer(),
average_speed: float(),
max_speed: float(),
has_kudoed: boolean(),
gear_id: String.t(),
kilojoules: float(),
average_watts: float(),
device_watts: boolean(),
max_watts: integer(),
weighted_average_watts: integer(),
description: String.t(),
photos: Strava.PhotosSummary.t(),
gear: Strava.SummaryGear.t(),
calories: float(),
segment_efforts: [Strava.DetailedSegmentEffort.t()],
device_name: String.t(),
embed_token: String.t(),
splits_metric: [Strava.Split.t()],
splits_standard: [Strava.Split.t()],
laps: [Strava.Lap.t()],
best_efforts: [Strava.DetailedSegmentEffort.t()]
}
end
defimpl Poison.Decoder, for: Strava.DetailedActivity do
import Strava.Deserializer
def decode(value, options) do
value
|> deserialize(:athlete, :struct, Strava.MetaAthlete, options)
|> deserialize(:start_date, :datetime, options)
|> deserialize(:start_date_local, :datetime, options)
|> deserialize(:map, :struct, Strava.PolylineMap, options)
|> deserialize(:photos, :struct, Strava.PhotosSummary, options)
|> deserialize(:gear, :struct, Strava.SummaryGear, options)
|> deserialize(:segment_efforts, :list, Strava.DetailedSegmentEffort, options)
|> deserialize(:splits_metric, :list, Strava.Split, options)
|> deserialize(:splits_standard, :list, Strava.Split, options)
|> deserialize(:laps, :list, Strava.Lap, options)
|> deserialize(:best_efforts, :list, Strava.DetailedSegmentEffort, options)
end
end
|
lib/strava/model/detailed_activity.ex
| 0.710628 | 0.449816 |
detailed_activity.ex
|
starcoder
|
defmodule Cardigan.Game do
alias Cardigan.{Deck, Card}
@behaviour Access
defstruct name: nil,
min_num_of_players: 1,
max_num_of_players: 20,
metadata: nil,
decks: [],
hands: [],
started: false
# Implementing the Access behaviour by delegating to Map
defdelegate fetch(data, key), to: Map
defdelegate get_and_update(data, key, fun), to: Map
defdelegate pop(data, key), to: Map
@doc """
Is the game ready to start?
"""
def startable?(game) do
n = length(game.hands)
not game.started and n >= game.min_num_of_players and n <= game.max_num_of_players
end
@doc """
Join a yet to start game, by hand id.
"""
def join(_, ""), do: {:error, :argument_error}
def join(%__MODULE__{started: false} = game, hand_id) do
if length(game.hands) < game.max_num_of_players do
if Enum.find(game.hands, &(&1.id == hand_id)) do
{:error, :not_unique}
else
hand = %Deck{id: hand_id}
{:ok, update_in(game.hands, &[hand | &1])}
end
else
{:error, :table_full}
end
end
@doc """
Start the game.
"""
def start(%__MODULE__{started: true}), do: {:error, :already_started}
def start(%__MODULE__{hands: hands, min_num_of_players: n}) when length(hands) < n do
{:error, :not_enough_players}
end
def start(game) do
game = update_in(game.hands, &Enum.shuffle/1)
{:ok, Map.put(game, :started, true)}
end
@doc """
Move a card from a deck/hand to a new deck by position.
"""
def move(game, from_is, from_id, card_id, pos) when is_list(pos) do
case pop_from(game, from_is, from_id, card_id) do
{:error, reason} ->
{:error, reason}
{:ok, card, game} ->
game
|> place([card], pos)
|> drop_empty_decks()
|> ok()
end
end
@doc """
Move entire deck/hand to another deck/hand.
"""
def move(game, from_is, from_id, to_is, to_id) do
case get_and_update_in(game, [from_is, id_access(from_id)], fn d ->
{d.cards, %{d | cards: []}}
end) do
{[], _} ->
{:error, :not_found}
{[cards], game} ->
case get_in(game, [to_is, id_access(to_id)]) do
[] ->
{:error, :not_found}
_ ->
game
|> update_in([to_is, id_access(to_id)], &Deck.put_cards(&1, cards))
|> drop_empty_decks()
|> ok()
end
end
end
@doc """
Move a card from a deck/hand to another deck/hand.
"""
def move(game, from_is, from_id, card_id, to_is, to_id) do
case get_in(game, [to_is, id_access(to_id)]) do
[] ->
{:error, :not_found}
_ ->
case pop_from(game, from_is, from_id, card_id) do
{:error, reason} ->
{:error, reason}
{:ok, card, game} ->
card =
case to_is do
:hands -> Map.put(card, :face, true)
:decks -> card
end
game
|> update_in([to_is, id_access(to_id)], &Deck.put(&1, card))
|> drop_empty_decks()
|> ok()
end
end
end
@doc """
Move entire deck to a new deck by position.
"""
def move(game, :decks, deck_id, pos) when is_list(pos) do
case get_in(game, [:decks, id_access(deck_id)]) do
[] -> {:error, :not_found}
_ -> {:ok, update_in(game, [:decks, id_access(deck_id)], &Map.put(&1, :pos, pos))}
end
end
@doc """
Move entire hand to a new deck by position.
"""
def move(game, :hands, hand_id, pos) when is_list(pos) do
case get_and_update_in(game, [:hands, id_access(hand_id)], fn hand ->
{hand.cards, %{hand | cards: []}}
end) do
{[], _} ->
{:error, :not_found}
{[cards], game} ->
game
|> place(cards, pos)
|> ok()
end
end
@doc """
Flip a card in deck/hand.
"""
def flip(game, where, where_id, card_id) when where in [:decks, :hands] do
game
|> update_in([where, id_access(where_id), :cards, id_access(card_id)], &Card.flip/1)
|> ok()
end
@doc """
Shuffle a deck/hand.
"""
def shuffle(game, where, where_id) do
{:ok, update_in(game, [where, id_access(where_id)], &Deck.shuffle/1)}
end
@doc """
Toggle a deck display mode between stack and fan.
"""
def toggle_deck_display_mode(game, where, where_id) do
game
|> update_in([where, id_access(where_id)], &Deck.toggle_display_mode/1)
|> ok()
end
@doc """
Set all cards in deck to face up.
"""
def deck_up(game, where, where_id) do
face = true
deck_side(game, where, where_id, face)
end
@doc """
Set all cards in deck to face down.
"""
def deck_down(game, where, where_id) do
face = false
deck_side(game, where, where_id, face)
end
# Internals
defp ok(x), do: {:ok, x}
defp pop_from(game, where, where_id, card_id) do
case get_and_update_in(game, [where, id_access(where_id)], &Deck.pop_card(&1, card_id)) do
# When there's no such hand
{[], _} ->
{:error, :not_found}
# When there's no such card
{[nil], _} ->
{:error, :not_found}
{[%Card{} = card], game} ->
{:ok, card, game}
end
end
defp place(game, cards, pos) do
deck = %Deck{id: Cardigan.Random.id(8), pos: pos, cards: cards}
update_in(game.decks, &[deck | &1])
end
defp id_access(id) do
Access.filter(&(&1.id == id))
end
defp drop_empty_decks(game) do
update_in(game.decks, fn decks ->
Enum.filter(decks, fn d -> not Enum.empty?(d.cards) end)
end)
end
defp deck_side(game, where, where_id, face) do
game
|> update_in([where, id_access(where_id)], &Deck.side(&1, face))
|> ok()
end
end
|
lib/cardigan/game.ex
| 0.68616 | 0.506713 |
game.ex
|
starcoder
|
defmodule StatesLanguage.AST.Default do
@moduledoc false
@behaviour StatesLanguage.AST
@impl true
def create(_) do
quote location: :keep do
defdelegate call(pid, event), to: :gen_statem
defdelegate call(pid, event, timeout), to: :gen_statem
defdelegate cast(pid, event), to: :gen_statem
@impl true
def handle_resource(_, _, _, data), do: {:ok, data, []}
@impl true
def handle_call(_, from, _, data), do: {:ok, data, {:reply, from, :ok}}
@impl true
def handle_cast(_, _, data), do: {:ok, data, []}
@impl true
def handle_info(_, _, data), do: {:ok, data, []}
@impl true
def handle_transition(_, _, data), do: {:ok, data, []}
@impl true
def handle_enter(_, _, data), do: {:ok, data, []}
@impl true
def handle_termination(_, _, data), do: :ok
@impl true
def handle_generic_timeout(_, _, data), do: {:ok, data, []}
@impl true
def handle_state_timeout(_, _, data), do: {:ok, data, []}
@impl true
def handle_event_timeout(_, _, data), do: {:ok, data, []}
defoverridable handle_resource: 4,
handle_call: 4,
handle_cast: 3,
handle_info: 3,
handle_transition: 3,
handle_enter: 3,
handle_termination: 3,
handle_generic_timeout: 3,
handle_state_timeout: 3,
handle_event_timeout: 3
@impl true
def handle_event(:internal, na_event, na_state, %StatesLanguage{} = data) do
Logger.warn("Unknown Event #{inspect(na_event)} while in state #{inspect(na_state)}")
:keep_state_and_data
end
@impl true
def handle_event(:enter, source, target, %StatesLanguage{} = data) do
Logger.warn("Unknown enter event from #{inspect(source)} to #{inspect(target)}")
:keep_state_and_data
end
@impl true
def handle_event(:info, event, state, %StatesLanguage{} = data) do
{:ok, data, actions} = handle_info(event, state, data)
Logger.debug(
"Handled info event: #{inspect(event)} in state #{state} with data #{inspect(data)}"
)
{:keep_state, data, actions}
end
@impl true
def handle_event({:call, from}, event, state, %StatesLanguage{} = data) do
{:ok, data, actions} = handle_call(event, from, state, data)
Logger.debug(
"Handled call event: #{inspect(event)} in state #{state} with data #{inspect(data)}"
)
{:keep_state, data, actions}
end
@impl true
def handle_event(:cast, event, state, %StatesLanguage{} = data) do
{:ok, data, actions} = handle_cast(event, state, data)
Logger.debug(
"Handled cast event: #{inspect(event)} in state #{state} with data #{inspect(data)}"
)
{:keep_state, data, actions}
end
@impl true
def handle_event({:timeout, :generic}, event, state, %StatesLanguage{} = data) do
{:ok, data, actions} = handle_generic_timeout(event, state, data)
Logger.debug(
"Handled generic timeout event: #{inspect(event)} in state #{state} with data #{
inspect(data)
}"
)
{:keep_state, data, actions}
end
@impl true
def handle_event(:state_timeout, event, state, %StatesLanguage{} = data) do
{:ok, data, actions} = handle_state_timeout(event, state, data)
Logger.debug(
"Handled state timeout event: #{inspect(event)} in state #{state} with data #{
inspect(data)
}"
)
{:keep_state, data, actions}
end
@impl true
def handle_event(:timeout, event, state, %StatesLanguage{} = data) do
{:ok, data, actions} = handle_event_timeout(event, state, data)
Logger.debug(
"Handled event timeout event: #{inspect(event)} in state #{state} with data #{
inspect(data)
}"
)
{:keep_state, data, actions}
end
end
end
end
|
lib/states_language/ast/default.ex
| 0.756042 | 0.471162 |
default.ex
|
starcoder
|
defmodule RecurringEvents.Weekly do
@moduledoc """
Handles `:weekly` frequency rule
"""
alias RecurringEvents.Date
@doc """
Returns weekly stream of dates with respect to `:interval`, `:count` and
`:until` rules. Date provided as `:until` is used to figure out week
in which it occurs, exact date is not respected.
# Example
iex> RecurringEvents.Weekly.unfold(~N[2017-01-22 10:11:11],
...> %{freq: :weekly, until: ~N[2017-01-23 15:00:00]})
...> |> Enum.take(10)
[~N[2017-01-22 10:11:11], ~N[2017-01-29 10:11:11]]
"""
def unfold(date, %{freq: :weekly} = rules), do: do_unfold(date, rules)
defp do_unfold(date, %{} = rules) do
step = get_step(rules)
count = get_count(rules)
until_date = until_date(rules)
Stream.resource(
fn -> {date, 0} end,
fn {date, iteration} ->
{[next_date], _} = next_result = next_iteration(date, step, iteration)
cond do
iteration == count -> {:halt, nil}
until_reached(next_date, until_date) -> {:halt, nil}
true -> next_result
end
end,
fn _ -> nil end
)
end
defp next_iteration(date, step, iteration) do
next_date = Date.shift_date(date, step * iteration, :weeks)
acc = {date, iteration + 1}
{[next_date], acc}
end
defp until_reached(_date, :forever), do: false
defp until_reached(date, until_date) do
Date.compare(date, until_date) == :gt
end
defp until_date(%{until: until_date} = rules) do
until_date
|> week_end_date(rules)
end
defp until_date(%{}), do: :forever
defp week_end_date(date, rules) do
current_day = Date.week_day(date)
end_day = week_end_day(rules)
if current_day == end_day do
date
else
date
|> Date.shift_date(1, :days)
|> week_end_date(rules)
end
end
defp week_end_day(%{week_start: start_day}) do
Date.prev_week_day(start_day)
end
defp week_end_day(%{}), do: :sunday
defp get_step(%{interval: interval}), do: interval
defp get_step(%{}), do: 1
defp add_count(%{exclude_date: dates}), do: dates |> Enum.count()
defp add_count(%{}), do: 0
defp get_count(%{count: count} = rules), do: count + add_count(rules)
defp get_count(%{}), do: :infinity
end
|
lib/recurring_events/weekly.ex
| 0.897607 | 0.458531 |
weekly.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.ScheduleEntryLockDailyRepeatingSet do
@moduledoc """
This command is used to set or erase a daily repeating schedule for an
identified user who already has valid user access code.
Params:
* `:set_action` - Indicates whether to erase or modify
* `:user_identifier` - The User Identifier is used to recognize the user
identity. A valid User Identifier MUST be a value starting from 1 to the
maximum number of users supported by the device
* `:schedule_slot_id` - A value from 1 to Number of Slots Daily Repeating
Supported
* `:week_days` - a list of scheduled week day's names
* `:start_hour` - A value from 0 to 23 representing the starting hour of the
time fence.
* `:start_minute` - A value from 0 to 59 representing the starting minute of
the time fence.
* `:duration_hour` - A value from 0 to 23 representing how many hours the
time fence will last
* `:duration_minute` - A value from 0 to 59 representing how many minutes
the time fence will last past the Duration Hour field.
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave.Command
alias Grizzly.ZWave.CommandClasses.ScheduleEntryLock
@type param ::
{:set_action, :erase | :modify}
| {:user_identifier, byte()}
| {:schedule_slot_id, byte()}
| {:week_days, [ScheduleEntryLock.week_day()]}
| {:start_hour, 0..23}
| {:start_minute, 0..59}
| {:duration_hour, 0..23}
| {:duration_minute, 0..59}
@impl true
@spec new([param()]) :: {:ok, Command.t()}
def new(params) do
command = %Command{
name: :schedule_entry_lock_daily_repeating_set,
command_byte: 0x10,
command_class: ScheduleEntryLock,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
@spec encode_params(Command.t()) :: binary()
def encode_params(command) do
set_action = Command.param!(command, :set_action)
user_identifier = Command.param!(command, :user_identifier)
schedule_slot_id = Command.param!(command, :schedule_slot_id)
week_days = Command.param!(command, :week_days)
start_hour = Command.param!(command, :start_hour)
start_minute = Command.param!(command, :start_minute)
duration_hour = Command.param!(command, :duration_hour)
duration_minute = Command.param!(command, :duration_minute)
week_day_bitmask = ScheduleEntryLock.weekdays_to_bitmask(week_days)
action_byte = action_to_byte(set_action)
<<action_byte, user_identifier, schedule_slot_id>> <>
week_day_bitmask <> <<start_hour, start_minute, duration_hour, duration_minute>>
end
@impl true
def decode_params(
<<action_byte, user_identifier, schedule_slot_id, week_day_bitmask, start_hour,
start_minute, duration_hour, duration_minute>>
) do
week_days = ScheduleEntryLock.bitmask_to_weekdays(week_day_bitmask)
{:ok,
[
set_action: byte_to_action(action_byte),
user_identifier: user_identifier,
schedule_slot_id: schedule_slot_id,
week_days: week_days,
start_hour: start_hour,
start_minute: start_minute,
duration_hour: duration_hour,
duration_minute: duration_minute
]}
end
defp action_to_byte(:erase), do: 0x00
defp action_to_byte(:modify), do: 0x01
defp byte_to_action(0x00), do: :erase
defp byte_to_action(0x01), do: :modify
end
|
lib/grizzly/zwave/commands/schedule_entry_lock_daily_repeating_set.ex
| 0.881997 | 0.432902 |
schedule_entry_lock_daily_repeating_set.ex
|
starcoder
|
defmodule AWS.XRay do
@moduledoc """
AWS X-Ray provides APIs for managing debug traces and retrieving service maps
and other data created by processing those traces.
"""
@doc """
Retrieves a list of traces specified by ID.
Each trace is a collection of segment documents that originates from a single
request. Use `GetTraceSummaries` to get a list of trace IDs.
"""
def batch_get_traces(client, input, options \\ []) do
path_ = "/Traces"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates a group resource with a name and a filter expression.
"""
def create_group(client, input, options \\ []) do
path_ = "/CreateGroup"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates a rule to control sampling behavior for instrumented applications.
Services retrieve rules with `GetSamplingRules`, and evaluate each rule in
ascending order of *priority* for each request. If a rule matches, the service
records a trace, borrowing it from the reservoir size. After 10 seconds, the
service reports back to X-Ray with `GetSamplingTargets` to get updated versions
of each in-use rule. The updated rule contains a trace quota that the service
can use instead of borrowing from the reservoir.
"""
def create_sampling_rule(client, input, options \\ []) do
path_ = "/CreateSamplingRule"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a group resource.
"""
def delete_group(client, input, options \\ []) do
path_ = "/DeleteGroup"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a sampling rule.
"""
def delete_sampling_rule(client, input, options \\ []) do
path_ = "/DeleteSamplingRule"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves the current encryption configuration for X-Ray data.
"""
def get_encryption_config(client, input, options \\ []) do
path_ = "/EncryptionConfig"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves group resource details.
"""
def get_group(client, input, options \\ []) do
path_ = "/GetGroup"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves all active group details.
"""
def get_groups(client, input, options \\ []) do
path_ = "/Groups"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves all sampling rules.
"""
def get_sampling_rules(client, input, options \\ []) do
path_ = "/GetSamplingRules"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves information about recent sampling results for all sampling rules.
"""
def get_sampling_statistic_summaries(client, input, options \\ []) do
path_ = "/SamplingStatisticSummaries"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Requests a sampling quota for rules that the service is using to sample
requests.
"""
def get_sampling_targets(client, input, options \\ []) do
path_ = "/SamplingTargets"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves a document that describes services that process incoming requests, and
downstream services that they call as a result.
Root services process incoming requests and make calls to downstream services.
Root services are applications that use the [AWS X-Ray SDK](https://docs.aws.amazon.com/xray/index.html). Downstream services can be
other applications, AWS resources, HTTP web APIs, or SQL databases.
"""
def get_service_graph(client, input, options \\ []) do
path_ = "/ServiceGraph"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Get an aggregation of service statistics defined by a specific time range.
"""
def get_time_series_service_statistics(client, input, options \\ []) do
path_ = "/TimeSeriesServiceStatistics"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves a service graph for one or more specific trace IDs.
"""
def get_trace_graph(client, input, options \\ []) do
path_ = "/TraceGraph"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves IDs and annotations for traces available for a specified time frame
using an optional filter.
To get the full traces, pass the trace IDs to `BatchGetTraces`.
A filter expression can target traced requests that hit specific service nodes
or edges, have errors, or come from a known user. For example, the following
filter expression targets traces that pass through `api.example.com`:
`service("api.example.com")`
This filter expression finds traces that have an annotation named `account` with
the value `12345`:
`annotation.account = "12345"`
For a full list of indexed fields and keywords that you can use in filter
expressions, see [Using Filter Expressions](https://docs.aws.amazon.com/xray/latest/devguide/xray-console-filters.html)
in the *AWS X-Ray Developer Guide*.
"""
def get_trace_summaries(client, input, options \\ []) do
path_ = "/TraceSummaries"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Returns a list of tags that are applied to the specified AWS X-Ray group or
sampling rule.
"""
def list_tags_for_resource(client, input, options \\ []) do
path_ = "/ListTagsForResource"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the encryption configuration for X-Ray data.
"""
def put_encryption_config(client, input, options \\ []) do
path_ = "/PutEncryptionConfig"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Used by the AWS X-Ray daemon to upload telemetry.
"""
def put_telemetry_records(client, input, options \\ []) do
path_ = "/TelemetryRecords"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Uploads segment documents to AWS X-Ray.
The [X-Ray SDK](https://docs.aws.amazon.com/xray/index.html) generates segment documents and sends them to the X-Ray daemon, which uploads them in batches. A
segment document can be a completed segment, an in-progress segment, or an array
of subsegments.
Segments must include the following fields. For the full segment document
schema, see [AWS X-Ray Segment
Documents](https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html)
in the *AWS X-Ray Developer Guide*.
## Required Segment Document Fields
* `name` - The name of the service that handled the request.
* `id` - A 64-bit identifier for the segment, unique among segments
in the same trace, in 16 hexadecimal digits.
* `trace_id` - A unique identifier that connects all segments and
subsegments originating from a single client request.
* `start_time` - Time the segment or subsegment was created, in
floating point seconds in epoch time, accurate to milliseconds. For example,
`1480615200.010` or `1.480615200010E9`.
* `end_time` - Time the segment or subsegment was closed. For
example, `1480615200.090` or `1.480615200090E9`. Specify either an `end_time` or
`in_progress`.
* `in_progress` - Set to `true` instead of specifying an `end_time`
to record that a segment has been started, but is not complete. Send an in
progress segment when your application receives a request that will take a long
time to serve, to trace the fact that the request was received. When the
response is sent, send the complete segment to overwrite the in-progress
segment.
A `trace_id` consists of three numbers separated by hyphens. For example,
1-58406520-a006649127e371903a2de979. This includes:
## Trace ID Format
* The version number, i.e. `1`.
* The time of the original request, in Unix epoch time, in 8
hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is
`1480615200` seconds, or `58406520` in hexadecimal.
* A 96-bit identifier for the trace, globally unique, in 24
hexadecimal digits.
"""
def put_trace_segments(client, input, options \\ []) do
path_ = "/TraceSegments"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Applies tags to an existing AWS X-Ray group or sampling rule.
"""
def tag_resource(client, input, options \\ []) do
path_ = "/TagResource"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Removes tags from an AWS X-Ray group or sampling rule.
You cannot edit or delete system tags (those with an `aws:` prefix).
"""
def untag_resource(client, input, options \\ []) do
path_ = "/UntagResource"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates a group resource.
"""
def update_group(client, input, options \\ []) do
path_ = "/UpdateGroup"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Modifies a sampling rule's configuration.
"""
def update_sampling_rule(client, input, options \\ []) do
path_ = "/UpdateSamplingRule"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "xray"}
host = build_host("xray", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/xray.ex
| 0.912568 | 0.467696 |
xray.ex
|
starcoder
|
defmodule DBConnection.LogEntry do
@moduledoc """
Struct containing log entry information.
"""
defstruct [:call, :query, :params, :result, :pool_time, :connection_time, :decode_time]
@typedoc """
Log entry information.
* `:call` - The `DBConnection` function called
* `:query` - The query used by the function
* `:params` - The params passed to the function (if any)
* `:result` - The result of the call
* `:pool_time` - The length of time awaiting a connection from the pool (if
the connection was not already checked out)
* `:connection_time` - The length of time using the connection (if a
connection was used)
* `:decode_time` - The length of time decoding the result (if decoded the
result using `DBConnection.Query.decode/3`)
All times are in the native time units of the VM, see
`System.monotonic_time/0`.
"""
@type t :: %__MODULE__{call: atom,
query: any,
params: any,
result: {:ok, any} | {:ok, any, any} | {:error, Exception.t},
pool_time: non_neg_integer | nil,
connection_time: non_neg_integer | nil,
decode_time: non_neg_integer | nil}
@doc false
def new(call, query, params, times, result) do
entry = %__MODULE__{call: call, query: query, params: params, result: result}
parse_times(times, entry)
end
## Helpers
defp parse_times([], entry), do: entry
defp parse_times(times, entry) do
stop = :erlang.monotonic_time()
{_, entry} = Enum.reduce(times, {stop, entry}, &parse_time/2)
entry
end
defp parse_time({:decode, start}, {stop, entry}) do
{start, %{entry | decode_time: stop - start}}
end
defp parse_time({:checkout, start}, {stop, entry}) do
{start, %{entry | pool_time: stop - start}}
end
defp parse_time({_, start}, {stop, entry}) do
%{connection_time: connection_time} = entry
{start, %{entry | connection_time: (connection_time || 0) + (stop - start)}}
end
end
|
teachme/deps/db_connection/lib/db_connection/log_entry.ex
| 0.903916 | 0.469277 |
log_entry.ex
|
starcoder
|
defmodule Tesla.Middleware.FormUrlencoded do
@behaviour Tesla.Middleware
@moduledoc """
Send request body as `application/x-www-form-urlencoded`.
Performs encoding of `body` from a `Map` such as `%{"foo" => "bar"}` into
url encoded data.
Performs decoding of the response into a map when urlencoded and content-type
is `application/x-www-form-urlencoded`, so `"foo=bar"` becomes
`%{"foo" => "bar"}`.
### Example usage
```
defmodule Myclient do
use Tesla
plug Tesla.Middleware.FormUrlencoded
end
Myclient.post("/url", %{key: :value})
```
### Options
- `:decode` - decoding function, defaults to `URI.decode_query/1`
- `:encode` - encoding function, defaults to `URI.encode_query/1`
### Nested Maps
Natively, nested maps are not supported in the body, so
`%{"foo" => %{"bar" => "baz"}}` won't be encoded and raise an error.
Support for this specific case is obtained by configuring the middleware to
encode (and decode) with `Plug.Conn.Query`
```
defmodule Myclient do
use Tesla
plug Tesla.Middleware.FormUrlencoded,
encode: &Plug.Conn.Query.encode/1,
decode: &Plug.Conn.Query.decode/1
end
Myclient.post("/url", %{key: %{nested: "value"}})
"""
@content_type "application/x-www-form-urlencoded"
@doc false
def call(env, next, opts) do
env
|> encode(opts)
|> Tesla.run(next)
|> case do
{:ok, env} -> {:ok, decode(env, opts)}
error -> error
end
end
defp encode(env, opts) do
if encodable?(env) do
env
|> Map.update!(:body, &encode_body(&1, opts))
|> Tesla.put_headers([{"content-type", @content_type}])
else
env
end
end
defp encodable?(%{body: nil}), do: false
defp encodable?(%{body: %Tesla.Multipart{}}), do: false
defp encodable?(_), do: true
defp encode_body(body, _opts) when is_binary(body), do: body
defp encode_body(body, opts), do: do_encode(body, opts)
defp decode(env, opts) do
if decodable?(env) do
env
|> Map.update!(:body, &decode_body(&1, opts))
else
env
end
end
defp decodable?(env), do: decodable_body?(env) && decodable_content_type?(env)
defp decodable_body?(env) do
(is_binary(env.body) && env.body != "") || (is_list(env.body) && env.body != [])
end
defp decodable_content_type?(env) do
case Tesla.get_header(env, "content-type") do
nil -> false
content_type -> String.starts_with?(content_type, @content_type)
end
end
defp decode_body(body, opts), do: do_decode(body, opts)
defp do_encode(data, opts) do
encoder = Keyword.get(opts, :encode, &URI.encode_query/1)
encoder.(data)
end
defp do_decode(data, opts) do
decoder = Keyword.get(opts, :decode, &URI.decode_query/1)
decoder.(data)
end
end
|
lib/tesla/middleware/form_urlencoded.ex
| 0.905692 | 0.704109 |
form_urlencoded.ex
|
starcoder
|
defmodule Ecto.Adapters.SQL do
@moduledoc """
Behaviour and implementation for SQL adapters.
The implementation for SQL adapter provides a
pooled based implementation of SQL and also expose
a query function to developers.
Developers that use `Ecto.Adapters.SQL` should implement
the connection module with specifics on how to connect
to the database and also how to translate the queries
to SQL. See `Ecto.Adapters.SQL.Connection` for more info.
"""
@doc false
defmacro __using__(adapter) do
quote do
@behaviour Ecto.Adapter
@behaviour Ecto.Adapter.Migrations
@behaviour Ecto.Adapter.Transactions
@conn __MODULE__.Connection
@adapter unquote(adapter)
## Worker
@doc false
defmacro __using__(_) do
quote do
def __pool__ do
__MODULE__.Pool
end
end
end
@doc false
def start_link(repo, opts) do
Ecto.Adapters.SQL.start_link(@conn, @adapter, repo, opts)
end
@doc false
def stop(repo) do
Ecto.Adapters.SQL.stop(repo)
end
## Query
@doc false
def all(repo, query, params, opts) do
Ecto.Adapters.SQL.all(repo, @conn.all(query), query, params, opts)
end
@doc false
def update_all(repo, query, values, params, opts) do
Ecto.Adapters.SQL.count_all(repo, @conn.update_all(query, values), params, opts)
end
@doc false
def delete_all(repo, query, params, opts) do
Ecto.Adapters.SQL.count_all(repo, @conn.delete_all(query), params, opts)
end
@doc false
def insert(repo, source, params, returning, opts) do
{fields, values} = :lists.unzip(params)
sql = @conn.insert(source, fields, returning)
Ecto.Adapters.SQL.model(repo, sql, values, opts)
end
@doc false
def update(repo, source, filter, fields, returning, opts) do
{filter, values1} = :lists.unzip(filter)
{fields, values2} = :lists.unzip(fields)
sql = @conn.update(source, filter, fields, returning)
Ecto.Adapters.SQL.model(repo, sql, values1 ++ values2, opts)
end
@doc false
def delete(repo, source, filter, opts) do
{filter, values} = :lists.unzip(filter)
Ecto.Adapters.SQL.model(repo, @conn.delete(source, filter), values, opts)
end
## Transaction
@doc false
def transaction(repo, opts, fun) do
Ecto.Adapters.SQL.transaction(repo, opts, fun)
end
@doc false
def rollback(_repo, value) do
throw {:ecto_rollback, value}
end
## Migration
@doc false
def execute_ddl(repo, definition, opts) do
sql = @conn.execute_ddl(definition)
Ecto.Adapters.SQL.query(repo, sql, [], opts)
:ok
end
@doc false
def ddl_exists?(repo, object, opts) do
sql = @conn.ddl_exists(object)
{[{count}], _} = Ecto.Adapters.SQL.query(repo, sql, [], opts)
count > 0
end
end
end
@timeout 5000
alias Ecto.Adapters.SQL.Worker
@doc """
Runs custom SQL query on given repo.
## Options
* `:timeout` - The time in milliseconds to wait for the call to finish,
`:infinity` will wait indefinitely (default: 5000);
* `:log` - When false, does not log the query
## Examples
iex> Ecto.Adapters.SQL.query(MyRepo, "SELECT $1 + $2", [40, 2])
{[{42}], 1}
"""
def query(repo, sql, params, opts) do
opts = Keyword.put_new(opts, :timeout, @timeout)
log(repo, {:query, sql}, opts, fn ->
use_worker(repo, opts[:timeout], fn worker ->
Worker.query!(worker, sql, params, opts)
end)
end)
end
defp log(repo, tuple, opts, fun) do
if Keyword.get(opts, :log, true) do
repo.log(tuple, fun)
else
fun.()
end
end
defp pool!(repo) do
pid = Process.whereis(repo.__pool__)
if is_nil(pid) or not Process.alive?(pid) do
raise ArgumentError, "repo #{inspect repo} is not started"
end
pid
end
defp use_worker(repo, timeout, fun) do
pool = pool!(repo)
key = {:ecto_transaction_pid, pool}
{in_transaction, worker} =
case Process.get(key) do
{worker, _} ->
{true, worker}
nil ->
{false, :poolboy.checkout(pool, true, timeout)}
end
try do
fun.(worker)
after
if not in_transaction do
:poolboy.checkin(pool, worker)
end
end
end
@doc ~S"""
Starts a transaction for test.
This function work by starting a transaction and storing the connection
back in the pool with an open transaction. At the end of the test, the
transaction must be rolled back with `rollback_test_transaction`,
reverting all data added during tests.
**IMPORTANT:** Test transactions only work if the connection pool has
size of 1 and does not support any overflow.
## Example
The first step is to configure your database pool to have size of
1 and no max overflow. You set those options in your `config/config.exs`:
config :my_app, Repo,
size: 1,
max_overflow: 0
Since you don't want those options in your production database, we
typically recommend to create a `config/test.exs` and add the
following to the bottom of your `config/config.exs` file:
import_config "config/#{Mix.env}.exs"
Now with the test database properly configured, you can write
transactional tests:
# All tests in this module will be wrapped in transactions
defmodule PostTest do
# Tests that use the shared repository cannot be async
use ExUnit.Case
setup do
Ecto.Adapters.SQL.begin_test_transaction(TestRepo)
on_exit fn ->
Ecto.Adapters.SQL.rollback_test_transaction(TestRepo)
end
end
test "create comment" do
assert %Post{} = TestRepo.insert(%Post{})
end
end
"""
def begin_test_transaction(repo, opts \\ []) do
pool = pool!(repo)
opts = Keyword.put_new(opts, :timeout, @timeout)
:poolboy.transaction(pool, fn worker ->
do_begin(repo, worker, opts)
end, opts[:timeout])
end
@doc """
Ends a test transaction, see `begin_test_transaction/2`.
"""
def rollback_test_transaction(repo, opts \\ []) do
pool = pool!(repo)
opts = Keyword.put_new(opts, :timeout, @timeout)
:poolboy.transaction(pool, fn worker ->
do_rollback(repo, worker, opts)
end, opts[:timeout])
end
## Worker
@doc false
def start_link(connection, adapter, repo, opts) do
{pool_opts, worker_opts} = split_opts(repo, opts)
unless Code.ensure_loaded?(connection) do
raise """
could not find #{inspect connection}.
Please verify you have added #{inspect adapter} as a dependency:
{#{inspect adapter}, ">= 0.0.0"}
And remember to recompile Ecto afterwards by cleaning the current install:
mix deps.clean ecto
"""
end
:poolboy.start_link(pool_opts, {connection, worker_opts})
end
@doc false
def stop(repo) do
:poolboy.stop pool!(repo)
end
defp split_opts(repo, opts) do
pool_name = repo.__pool__
{pool_opts, worker_opts} = Keyword.split(opts, [:size, :max_overflow])
pool_opts = pool_opts
|> Keyword.put_new(:size, 5)
|> Keyword.put_new(:max_overflow, 10)
pool_opts =
[name: {:local, pool_name},
worker_module: Worker] ++ pool_opts
{pool_opts, worker_opts}
end
## Query
@doc false
def all(repo, sql, query, params, opts) do
{rows, _} = query(repo, sql, Map.values(params), opts)
fields = extract_fields(query.select.fields, query.sources)
Enum.map(rows, &process_row(&1, fields))
end
@doc false
def count_all(repo, sql, params, opts) do
{_, num} = query(repo, sql, Map.values(params), opts)
num
end
@doc false
def model(repo, sql, values, opts) do
case query(repo, sql, values, opts) do
{nil, 1} ->
{:ok, {}}
{[values], 1} ->
{:ok, values}
{_, 0} ->
{:error, :stale}
end
end
defp extract_fields(fields, sources) do
Enum.map fields, fn
{:&, _, [idx]} ->
{_source, model} = elem(sources, idx)
{length(model.__schema__(:fields)), model}
_ ->
{1, nil}
end
end
defp process_row(row, fields) do
Enum.map_reduce(fields, 0, fn
{1, nil}, idx ->
{elem(row, idx), idx + 1}
{count, model}, idx ->
if all_nil?(row, idx, count) do
{nil, idx + count}
else
{model.__schema__(:load, idx, row), idx + count}
end
end) |> elem(0)
end
defp all_nil?(_tuple, _idx, 0), do: true
defp all_nil?(tuple, idx, _count) when elem(tuple, idx) != nil, do: false
defp all_nil?(tuple, idx, count), do: all_nil?(tuple, idx + 1, count - 1)
## Transactions
@doc false
def transaction(repo, opts, fun) do
pool = pool!(repo)
opts = Keyword.put_new(opts, :timeout, @timeout)
worker = checkout_worker(pool, opts[:timeout])
try do
do_begin(repo, worker, opts)
value = fun.()
do_commit(repo, worker, opts)
{:ok, value}
catch
:throw, {:ecto_rollback, value} ->
do_rollback(repo, worker, opts)
{:error, value}
type, term ->
stacktrace = System.stacktrace
do_rollback(repo, worker, opts)
:erlang.raise(type, term, stacktrace)
after
checkin_worker(pool)
end
end
defp checkout_worker(pool, timeout) do
key = {:ecto_transaction_pid, pool}
case Process.get(key) do
{worker, counter} ->
Process.put(key, {worker, counter + 1})
worker
nil ->
worker = :poolboy.checkout(pool, true, timeout)
Worker.link_me(worker)
Process.put(key, {worker, 1})
worker
end
end
defp checkin_worker(pool) do
key = {:ecto_transaction_pid, pool}
case Process.get(key) do
{worker, 1} ->
Worker.unlink_me(worker)
:poolboy.checkin(pool, worker)
Process.delete(key)
{worker, counter} ->
Process.put(key, {worker, counter - 1})
end
:ok
end
## TODO: Make those in sync with the actual query
defp do_begin(repo, worker, opts) do
log(repo, {:query, "BEGIN TRANSACTION"}, opts, fn ->
Worker.begin!(worker, opts)
end)
end
defp do_rollback(repo, worker, opts) do
log(repo, {:query, "ROLLBACK"}, opts, fn ->
Worker.rollback!(worker, opts)
end)
end
defp do_commit(repo, worker, opts) do
log(repo, {:query, "COMMIT"}, opts, fn ->
Worker.commit!(worker, opts)
end)
end
end
|
lib/ecto/adapters/sql.ex
| 0.828835 | 0.515681 |
sql.ex
|
starcoder
|
defmodule Snitch.Data.Model.HostedPayment do
@moduledoc """
Hosted Payment API and utilities.
`HostedPayment` is a concrete payment subtype in Snitch. By `create/4`ing a
HostedPayment, the supertype Payment is automatically created in the same
transaction.
"""
use Snitch.Data.Model
alias Ecto.Multi
alias Snitch.Data.Schema.{HostedPayment, Payment}
alias Snitch.Data.Model.Payment, as: PaymentModel
alias Snitch.Data.Model.PaymentMethod, as: PaymentMethodModel
alias SnitchPayments.PaymentMethodCode, as: Codes
alias Snitch.Core.Tools.MultiTenancy.MultiQuery
@doc """
Creates both `Payment` and `HostedPayment` records in a transaction for Order
represented by `order_id`.
* `payment_params` are validated using
`Snitch.Data.Schema.Payment.changeset/3` with the `:create` action and
because `slug` and `order_id` are passed explicitly to this function,
they'll be ignored if present in `payment_params`.
* `hosted_payment_params` are validated using
`Snitch.Data.Schema.HostedPayment.changeset/3` with the `:create` action.
"""
@spec create(String.t(), non_neg_integer(), map, map, non_neg_integer()) ::
{:ok, %{card_payment: HostedPayment.t(), payment: Payment.t()}}
| {:error, Ecto.Changeset.t()}
def create(slug, order_id, payment_params, hosted_method_params, payment_method_id) do
payment = struct(Payment, payment_params)
{:ok, hosted_method} = PaymentMethodModel.get(payment_method_id)
more_payment_params = %{
order_id: order_id,
payment_type: Codes.hosted_payment(),
payment_method_id: hosted_method.id,
slug: slug
}
payment_changeset = Payment.create_changeset(payment, more_payment_params)
Multi.new()
|> MultiQuery.insert(:payment, payment_changeset)
|> Multi.run(:hosted_payment, fn %{payment: payment} ->
hosted_method_params = Map.put(hosted_method_params, :payment_id, payment.id)
QH.create(HostedPayment, hosted_method_params, Repo)
end)
|> Repo.transaction()
|> case do
{:ok, data} ->
{:ok, data}
{:error, _, error_data, _} ->
{:error, error_data}
end
end
@doc """
Updates `HostedPayment` and `Payment` together.
Everything except the `:payment_type` and `amount` can be changed, because by
changing the type, `HostedPayment` will have to be deleted.
* `hosted_method_params` are validated using `HostedPayment.changeset/3` with the
`:update` action.
* `payment_params` are validated using `Schema.Payment.changeset/3` with the
`:update` action.
"""
@spec update(HostedPayment.t(), map, map) ::
{:ok, %{card_payment: HostedPayment.t(), payment: Payment.t()}}
| {:error, Ecto.Changeset.t()}
def update(hosted_payment, hosted_method_params, payment_params) do
hosted_payment_changeset =
HostedPayment.update_changeset(hosted_payment, hosted_method_params)
Multi.new()
|> MultiQuery.update(:hosted_payment, hosted_payment_changeset)
|> Multi.run(:payment, fn _ ->
PaymentModel.update(nil, Map.put(payment_params, :id, hosted_payment.payment_id))
end)
|> Repo.transaction()
|> case do
{:ok, data} ->
{:ok, data}
{:error, _, error_data, _} ->
{:error, error_data}
end
end
@doc """
Fetches the struct but does not preload `:payment` association.
"""
@spec get(map | non_neg_integer) :: {:ok, HostedPayment.t()} | {:error, atom}
def get(query_fields_or_primary_key) do
QH.get(HostedPayment, query_fields_or_primary_key, Repo)
end
@spec get_all() :: [HostedPayment.t()]
def get_all, do: Repo.all(HostedPayment)
@doc """
Fetch the HostedPayment identified by the `payment_id`.
> Note that the `:payment` association is not loaded.
"""
@spec from_payment(non_neg_integer) :: HostedPayment.t()
def from_payment(payment_id) do
{:ok, hosted_payment} = get(%{payment_id: payment_id})
hosted_payment
end
end
|
apps/snitch_core/lib/core/data/model/payment/hosted_payment.ex
| 0.874981 | 0.495728 |
hosted_payment.ex
|
starcoder
|
defmodule Limiter.Result do
@moduledoc """
The struct is the result of calling `Limiter.checkout/5` function.
"""
@typedoc """
Indicates if an action is allowed or rate limited.
"""
@type allowed :: boolean
@typedoc """
The number of actions that is allowed before reaching the rate limit.
"""
@type remaining :: non_neg_integer
@typedoc """
How long (in milliseconds) it will take for the given key to get to the
initial state.
"""
@type reset_after :: non_neg_integer
@typedoc """
How long (in milliseconds) it will take for the next action (associated with
the given key) to be allowed.
"""
@type retry_after :: non_neg_integer
@typedoc """
The result map.
"""
@type t :: %__MODULE__{allowed: allowed, remaining: remaining,
reset_after: reset_after, retry_after: retry_after}
defstruct allowed: true, remaining: 0, reset_after: 0, retry_after: 0
end
defmodule Limiter do
@moduledoc """
Rate limiter implementation based on Generic Cell Rate Algorithm (GCRA).
The limiter checks if a given key exceeded a rate limit and returns the
result with additional info.
For more details, see the below links:
* [Rate limiting, Cells and GCRA](https://brandur.org/rate-limiting)
* [Go throttled library](https://github.com/throttled/throttled)
* [GCRA algorithm](https://en.wikipedia.org/wiki/Generic_cell_rate_algorithm)
Example usage:
Limiter.checkout({Limiter.Storage.ConCache, :storage}, "key", 10_000, 5)
"""
alias Limiter.Result
@typedoc """
Tuple that contains the module used for storage and options for the given
storage.
"""
@type storage :: {storage_module, storage_options}
@typedoc """
Storage module that implements `Limiter.Storage` behaviour.
"""
@type storage_module :: module
@typedoc """
Options for a storage module.
These options may differ for different storage implementations.
"""
@type storage_options :: term
@typedoc """
The key associated with an action which is rate limited.
"""
@type key :: term
@typedoc """
The weight of an action.
Typically it's set to `1`. The more expensive actions may use greater
value.
"""
@type quantity :: pos_integer
@typedoc """
The period of time that along with `limit` defines the rate limit.
"""
@type period :: pos_integer
@typedoc """
The number of actions (in case the `quantity` is `1`) that along with
the `period` defines the rate limit.
"""
@type limit :: non_neg_integer
@doc """
Checks if an action associated with a key is allowed.
"""
@spec checkout(storage, key, quantity, period, limit) :: Result.t
def checkout(storage, key, quantity \\ 1, period, limit) do
now = time()
dvt = period * limit
inc = quantity * period
max_tat = now + dvt
tat = get_and_store(storage, key, now, inc, max_tat)
new_tat = tat + inc
allow_at = new_tat - dvt
diff = now - allow_at
{result, ttl} = if diff < 0 do
retry_after = if inc <= dvt, do: -diff, else: 0
{%Result{allowed: false, retry_after: retry_after}, tat - now}
else
{%Result{}, new_tat - now}
end
next = dvt - ttl
if next > -period do
rem = round(next / period)
%{result | remaining: max(rem, 0), reset_after: ttl}
else
%{result | reset_after: ttl}
end
end
@doc """
Resets the value associated with the key.
"""
@spec reset(storage, key) :: :ok
def reset({mod, opts}, key),
do: mod.reset(opts, key)
defp get_and_store({mod, opts}, key, now, inc, max_tat),
do: mod.get_and_store(opts, key, now, inc, max_tat)
defp time(),
do: System.system_time(:milliseconds)
end
|
lib/limiter.ex
| 0.885207 | 0.643665 |
limiter.ex
|
starcoder
|
defmodule Pow.Store.CredentialsCache do
@moduledoc """
Default module for credentials session storage.
A key (session id) is used to store, fetch, or delete credentials. The
credentials are expected to take the form of
`{credentials, session_metadata}`, where session metadata is data exclusive
to the session id.
This module also adds two utility methods:
* `users/2` - to list all current users
* `sessions/2` - to list all current sessions
## Custom credentials cache module
Pow may use the utility methods in this module. To ensure all required
methods has been implemented in a custom credentials cache module, the
`@behaviour` of this module should be used:
defmodule MyApp.CredentialsStore do
use Pow.Store.Base,
ttl: :timer.minutes(30),
namespace: "credentials"
@behaviour Pow.Store.CredentialsCache
@impl Pow.Store.CredentialsCache
def users(config, struct) do
# ...
end
@impl Pow.Store.CredentialsCache
def put(config, key, value) do
# ...
end
end
## Configuration options
* `:reload` - boolean value for whether the user object should be loaded
from the context. Defaults false.
"""
alias Pow.{Operations, Store.Base}
@callback users(Base.config(), module()) :: [any()]
@callback sessions(Base.config(), map()) :: [binary()]
@callback put(Base.config(), binary(), {map(), list()}) :: :ok
use Base,
ttl: :timer.minutes(30),
namespace: "credentials"
@doc """
List all user for a certain user struct.
Sessions for a user can be looked up with `sessions/3`.
"""
@spec users(Base.config(), module()) :: [any()]
def users(config, struct) do
config
|> Base.all(backend_config(config), [struct, :user, :_])
|> Enum.map(fn {[^struct, :user, _id], user} ->
user
end)
end
@doc """
List all existing sessions for the user fetched from the backend store.
"""
@spec sessions(Base.config(), map()) :: [binary()]
def sessions(config, user), do: fetch_sessions(config, backend_config(config), user)
# TODO: Refactor by 1.1.0
defp fetch_sessions(config, backend_config, user) do
{struct, id} = user_to_struct_id!(user, [])
config
|> Base.all(backend_config, [struct, :user, id, :session, :_])
|> Enum.map(fn {[^struct, :user, ^id, :session, session_id], _value} ->
session_id
end)
end
@doc """
Add user credentials with the session id to the backend store.
The credentials are expected to be in the format of
`{credentials, metadata}`.
This following three key-value will be inserted:
- `{session_id, {[user_struct, :user, user_id], metadata}}`
- `{[user_struct, :user, user_id], user}`
- `{[user_struct, :user, user_id, :session, session_id], inserted_at}`
If metadata has `:fingerprint` any active sessions for the user with the same
`:fingerprint` in metadata will be deleted.
"""
@spec put(Base.config(), binary(), {map(), list()}) :: :ok
def put(config, session_id, {user, metadata}) do
{struct, id} = user_to_struct_id!(user, [])
user_key = [struct, :user, id]
session_key = [struct, :user, id, :session, session_id]
records = [
{session_id, {user_key, metadata}},
{user_key, user},
{session_key, :os.system_time(:millisecond)}
]
delete_user_sessions_with_fingerprint(config, user, metadata)
Base.put(config, backend_config(config), records)
end
@doc """
Delete the user credentials data from the backend store.
This following two key-value will be deleted:
- `{session_id, {[user_struct, :user, user_id], metadata}}`
- `{[user_struct, :user, user_id, :session, session_id], inserted_at}`
The `{[user_struct, :user, user_id], user}` key-value is expected to expire
when reaching its TTL.
"""
@impl true
def delete(config, session_id) do
backend_config = backend_config(config)
case Base.get(config, backend_config, session_id) do
{[struct, :user, key_id], _metadata} ->
session_key = [struct, :user, key_id, :session, session_id]
Base.delete(config, backend_config, session_id)
Base.delete(config, backend_config, session_key)
# TODO: Remove by 1.1.0
{user, _metadata} when is_map(user) ->
Base.delete(config, backend_config, session_id)
:not_found ->
:ok
end
end
@doc """
Fetch user credentials from the backend store from session id.
"""
@impl true
@spec get(Base.config(), binary()) :: {map(), list()} | :not_found
def get(config, session_id) do
backend_config = backend_config(config)
with {user_key, metadata} when is_list(user_key) <- Base.get(config, backend_config, session_id),
user when is_map(user) <- Base.get(config, backend_config, user_key),
user when not is_nil(user) <- maybe_reload(user, config) do
{user, metadata}
else
# TODO: Remove by 1.1.0
{user, metadata} when is_map(user) -> {user, metadata}
:not_found -> :not_found
nil -> :not_found
end
end
defp maybe_reload(user, config) do
# TODO: By 1.1.0 set this to `true` and update docs
case Keyword.get(config, :reload, false) do
true -> Operations.reload(user, fetch_pow_config!(config))
_any -> user
end
end
defp fetch_pow_config!(config), do: Keyword.get(config, :pow_config) || raise "No `:pow_config` value found in the store config."
defp user_to_struct_id!(%mod{} = user, config) do
key_values =
user
|> fetch_primary_key_values!(config)
|> Enum.sort(&elem(&1, 0) < elem(&2, 0))
|> case do
[id: id] -> id
clauses -> clauses
end
{mod, key_values}
end
defp user_to_struct_id!(_user, _config), do: raise "Only structs can be stored as credentials"
defp fetch_primary_key_values!(user, config) do
pow_config = Keyword.get(config, :pow_config)
user
|> Operations.fetch_primary_key_values(pow_config)
|> case do
{:error, error} -> raise error
{:ok, clauses} -> clauses
end
end
defp delete_user_sessions_with_fingerprint(config, user, metadata) do
case Keyword.get(metadata, :fingerprint) do
nil -> :ok
fingerprint -> do_delete_user_sessions_with_fingerprint(config, user, fingerprint)
end
end
defp do_delete_user_sessions_with_fingerprint(config, user, fingerprint) do
backend_config = backend_config(config)
config
|> sessions(user)
|> Enum.each(fn session_id ->
with {_user_key, metadata} when is_list(metadata) <- Base.get(config, backend_config, session_id),
^fingerprint <- Keyword.get(metadata, :fingerprint) do
delete(config, session_id)
end
end)
end
# TODO: Remove by 1.1.0
@doc false
@deprecated "Use `users/2` or `sessions/2` instead"
def user_session_keys(config, backend_config, struct) do
config
|> Base.all(backend_config, [struct, :user, :_, :session, :_])
|> Enum.map(fn {key, _value} ->
key
end)
end
# TODO: Remove by 1.1.0
@doc false
@deprecated "Use `sessions/2` instead"
def sessions(config, backend_config, user), do: fetch_sessions(config, backend_config, user)
end
|
lib/pow/store/credentials_cache.ex
| 0.693058 | 0.459137 |
credentials_cache.ex
|
starcoder
|
defmodule Prove do
@moduledoc """
Prove provides the macros `prove` and `batch` to write simple tests in `ExUnit`
shorter.
A `prove` is just helpful for elementary tests. Prove generates one test with
one assert for every `prove`.
The disadvantage of these macros is that the tests are containing fewer
descriptions. For this reason and also if a `prove` looks too complicated, a
regular `test` is to prefer.
## Example
```elixir
defmodule NumTest do
use ExUnit.Case
import Prove
defmodule Num do
def check(0), do: :zero
def check(x) when is_integer(x) do
case rem(x, 2) do
0 -> :even
1 -> :odd
end
end
def check(_), do: :error
end
describe "check/1" do
prove Num.check(0) == :zero
batch "returns :odd or :even" do
prove Num.check(1) == :odd
prove Num.check(2) == :even
prove "for big num", Num.check(2_000) == :even
end
batch "returns :error" do
prove Num.check("1") == :error
prove Num.check(nil) == :error
end
end
end
```
The example above generates the following tests:
```shell
$> mix test test/num_test.exs --trace --seed 0
NumTest [test/num_test.exs]
* prove check/1 (1) (0.00ms) [L#20]
* prove check/1 returns :odd or :even (1) (0.00ms) [L#23]
* prove check/1 returns :odd or :even (2) (0.00ms) [L#24]
* prove check/1 returns :odd or :even for big num (1) (0.00ms) [L#25]
* prove check/1 returns :error (1) (0.00ms) [L#29]
* prove check/1 returns :error (2) (0.00ms) [L#30]
Finished in 0.08 seconds (0.00s async, 0.08s sync)
6 proves, 0 failures
Randomized with seed 0
```
The benefit of `prove` is that tests with multiple asserts can be avoided.
The example above with regular `test`s:
```elixir
...
describe "check/1" do
test "returns :zero" do
assert Num.check(0) == :zero
end
test "returns :odd or :even" do
assert Num.check(1) == :odd
assert Num.check(2) == :even
assert Num.check(2_000) == :even
end
test "returns :error" do
assert Num.check("1") == :error
assert Num.check(nil) == :error
end
end
...
```
```shell
$> mix test test/num_test.exs --trace --seed 0
NumTest [test/num_test.exs]
* test check/1 returns :zero (0.00ms) [L#36]
* test check/1 returns :odd or :even (0.00ms) [L#40]
* test check/1 returns :error (0.00ms) [L#46]
Finished in 0.03 seconds (0.00s async, 0.03s sync)
3 tests, 0 failures
Randomized with seed 0
```
"""
@operators [:==, :!=, :===, :!==, :<=, :>=, :<, :>, :=~]
@doc """
A macro to write simple a simple test shorter.
Code like:
```elxir
prove identity(5) == 5
prove identity(6) > 5
prove "check:", identity(7) == 7
```
is equivalent to:
```elixir
test "(1)" do
assert identity(5) == 5
end
test "(2)" do
assert identity(6) > 5
end
test "check: (1)" do
assert identity(7) == 7
end
```
`prove` supports the operators `==`, `!=`, `===`, `!==`, `<`, `<=`, `>`, `>=`,
and `=~`.
"""
defmacro prove(description \\ "", expr)
defmacro prove(description, {operator, _, [_, _]} = expr)
when is_binary(description) and operator in @operators do
quote_prove(
update_description(description, __CALLER__),
expr,
__CALLER__
)
end
defmacro prove(_description, expr) do
raise ArgumentError, message: "Unsupported do: #{Macro.to_string(expr)}"
end
@doc """
Creates a batch of proves.
A batch adds the `description` to every `prove`. This can be used to
group`proves`s with a context. Every prove is still an own `test`.
Code like:
```
batch "valid" do
prove 1 == 1
prove "really", 2 == 2
end
```
is equivalent to:
```
test "valid (1)" do
assert 1 == 1
end
test "valid really (1)" do
assert 2 == 2
end
```
"""
defmacro batch(description, do: {:__block__, _meta, block}) do
{:__block__, [], quote_block(description, block, __CALLER__)}
end
defmacro batch(description, do: block) when is_tuple(block) do
{:__block__, [], quote_block(description, [block], __CALLER__)}
end
defp quote_block(description, block, caller) do
Enum.map(block, fn
{:prove, meta, [op]} ->
quote_block_prove(description, op, meta)
{:prove, meta, [prove_description, op]} ->
quote_block_prove("#{description} #{prove_description}", op, meta)
_error ->
raise CompileError,
file: caller.file,
line: caller.line,
description: "A batch can only contain prove/1/2 functions"
end)
end
defp quote_block_prove(description, op, meta) do
{marker, _meta, children} =
quote do
prove unquote(description), unquote(op)
end
{marker, meta, children}
end
defp quote_prove(
description,
{operator, _meta, [_, _]} = expr,
%{module: mod, file: file, line: line}
)
when is_binary(description) and operator in @operators do
assertion = quote_assertion(expr)
quote bind_quoted: [
assertion: Macro.escape(assertion),
description: description,
file: file,
line: line,
mod: mod
] do
name = ExUnit.Case.register_test(mod, file, line, :prove, description, [])
def unquote(name)(_) do
unquote(assertion)
rescue
error in [ExUnit.AssertionError] -> reraise(error, __STACKTRACE__)
end
end
end
defp quote_assertion({op, meta, [left, right]} = expr) do
{marker, _meta, children} =
quote do
unless unquote(expr) do
raise ExUnit.AssertionError,
expr: unquote(Macro.escape(expr)),
message: "Prove with #{to_string(unquote(op))} failed",
left: unquote(left),
right: unquote(right)
end
end
{marker, meta, children}
end
defp update_description(description, caller) do
case Module.get_attribute(caller.module, :prove_counter) do
nil ->
Module.register_attribute(caller.module, :count, persist: false)
Module.put_attribute(caller.module, :prove_counter, Map.put(%{}, description, 1))
join(description, 1)
%{^description => value} = map ->
inc = value + 1
Module.put_attribute(caller.module, :prove_counter, Map.put(map, description, inc))
join(description, inc)
map ->
Module.put_attribute(caller.module, :prove_counter, Map.put(map, description, 1))
join(description, 1)
end
end
defp join("", b), do: "(#{b})"
defp join(a, b), do: "#{a} (#{b})"
end
|
lib/prove.ex
| 0.839997 | 0.929696 |
prove.ex
|
starcoder
|
defmodule Ecto.Query.BuilderUtil do
@moduledoc false
alias Ecto.Query.Query
@expand_sigils [:sigil_c, :sigil_C, :sigil_s, :sigil_S, :sigil_w, :sigil_W]
@doc """
Smart escapes a query expression.
Everything that is a query expression will be escaped, foreign (elixir)
expressions will not be escaped so that they will be evaluated in their
place. This means that everything foreign will be inserted as-is into
the query.
"""
@spec escape(Macro.t, Keyword.t) :: Macro.t
def escape(expr, vars)
# var.x - where var is bound
def escape({ { :., _, [{ var, _, context}, right] }, _, [] }, vars)
when is_atom(var) and is_atom(context) and is_atom(right) do
left_escaped = escape_var(var, vars)
dot_escaped = { :{}, [], [:., [], [left_escaped, right]] }
{ :{}, [], [dot_escaped, [], []] }
end
# interpolation
def escape({ :^, _, [arg] }, _vars) do
arg
end
# ecto types
def escape({ :binary, _, [arg] }, vars) do
arg_escaped = escape(arg, vars)
{ Ecto.Binary, arg_escaped }
end
def escape({ :array, _, [arg, type] }, vars) do
arg = escape(arg, vars)
type = escape(type, vars)
type = quote(do: :"Elixir.Ecto.Query.BuilderUtil".check_array(unquote(type)))
{ :{}, [], [Ecto.Array, arg, type] }
# TODO: Check that arg is and type is an atom
end
# field macro
def escape({ :field, _, [{ var, _, context }, field] }, vars)
when is_atom(var) and is_atom(context) do
var = escape_var(var, vars)
field = escape(field, vars)
field = quote(do: :"Elixir.Ecto.Query.BuilderUtil".check_field(unquote(field)))
dot = { :{}, [], [:., [], [var, field]] }
{ :{}, [], [dot, [], []] }
end
# binary literal
def escape({ :<<>>, _, _ } = bin, _vars), do: bin
# sigils
def escape({ name, _, _ } = sigil, _vars)
when name in @expand_sigils do
sigil
end
# ops & functions
def escape({ name, meta, args }, vars)
when is_atom(name) and is_list(args) do
args = Enum.map(args, &escape(&1, vars))
{ :{}, [], [name, meta, args] }
end
# list
def escape(list, vars) when is_list(list) do
Enum.map(list, &escape(&1, vars))
end
# literals
def escape(literal, _vars) when is_binary(literal), do: literal
def escape(literal, _vars) when is_boolean(literal), do: literal
def escape(literal, _vars) when is_number(literal), do: literal
def escape(nil, _vars), do: nil
# everything else is not allowed
def escape(other, _vars) do
raise Ecto.QueryError, reason: "`#{Macro.to_string(other)}` is not a valid query expression"
end
@doc """
Escapes a variable according to the given binds.
A escaped variable is represented internally as `&0`, `&1` and
so on. This function is also responsible for handling join vars
which use a `count_binds` variable assigned to the `Ecto.Query`
to pass the required index information.
"""
@spec escape_var(atom, Keyword.t) :: Macro.t | no_return
def escape_var(var, vars)
def escape_var(var, vars) do
ix = vars[var]
if var != :_ and ix do
{ :{}, [], [:&, [], [ix]] }
else
raise Ecto.QueryError, reason: "unbound variable `#{var}` in query"
end
end
@doc """
Escapes dot calls in query expressions.
A dot may be in three formats, all shown in the examples below.
Returns :error if it isn't a dot expression.
## Examples
iex> escape_dot(quote(do: x.y), [x: 0])
{{:{}, [], [:&, [], [0]]}, :y}
iex> escape_dot(quote(do: x.y()), [x: 0])
{{:{}, [], [:&, [], [0]]}, :y}
iex> escape_dot(quote(do: field(x, ^:y)), [x: 0])
{ {:{}, [], [:&, [], [0]]},
{{:., [], [:"Elixir.Ecto.Query.BuilderUtil", :check_field]}, [], [:y]} }
iex> escape_dot(quote(do: x), [x: 0])
:error
"""
@spec escape_dot(Macro.t, Keyword.t) :: { Macro.t, Macro.t } | :error
def escape_dot({ :field, _, [{ var, _, context }, field] }, vars)
when is_atom(var) and is_atom(context) do
var = escape_var(var, vars)
field = escape(field, vars)
field = quote(do: :"Elixir.Ecto.Query.BuilderUtil".check_field(unquote(field)))
{ var, field }
end
def escape_dot({ { :., _, [{ var, _, context }, field] }, _, [] }, vars)
when is_atom(var) and is_atom(context) and is_atom(field) do
{ escape_var(var, vars), field }
end
def escape_dot(_, _vars) do
:error
end
@doc """
Escapes a list of bindings as a list of atoms.
## Examples
iex> escape_binding(quote do: [x, y, z])
[x: 0, y: 1, z: 2]
iex> escape_binding(quote do: [x, y, x])
** (Ecto.QueryError) variable `x` is bound twice
"""
def escape_binding(binding) when is_list(binding) do
vars = binding |> Stream.with_index |> Enum.map(&escape_bind(&1))
bound_vars = vars |> Keyword.keys |> Enum.filter(&(&1 != :_))
dup_vars = bound_vars -- Enum.uniq(bound_vars)
unless dup_vars == [] do
raise Ecto.QueryError, reason: "variable `#{hd dup_vars}` is bound twice"
end
vars
end
def escape_binding(bind) do
raise Ecto.QueryError, reason: "binding should be list of variables, got: #{Macro.to_string(bind)}"
end
defp escape_bind({ { var, _ } = tuple, _ }) when is_atom(var),
do: tuple
defp escape_bind({ { var, _, context }, ix }) when is_atom(var) and is_atom(context),
do: { var, ix }
defp escape_bind({ bind, _ix }),
do: raise(Ecto.QueryError, reason: "binding list should contain only variables, got: #{Macro.to_string(bind)}")
@doc """
Escapes simple expressions.
An expression may be a single variable `x`, representing all fields in that
entity, a field `x.y`, or a list of fields and variables.
## Examples
iex> escape_fields_and_vars(quote(do: [x.x, y.y]), [x: 0, y: 1])
[{{:{}, [], [:&, [], [0]]}, :x},
{{:{}, [], [:&, [], [1]]}, :y}]
iex> escape_fields_and_vars(quote(do: x), [x: 0, y: 1])
[{:{}, [], [:&, [], [0]]}]
"""
@spec escape_fields_and_vars(Macro.t, Keyword.t) :: Macro.t | no_return
def escape_fields_and_vars(ast, vars) do
Enum.map(List.wrap(ast), &do_escape_expr(&1, vars))
end
defp do_escape_expr({ var, _, context }, vars) when is_atom(var) and is_atom(context) do
escape_var(var, vars)
end
defp do_escape_expr(dot, vars) do
case escape_dot(dot, vars) do
{ _, _ } = var_field ->
var_field
:error ->
raise Ecto.QueryError, reason: "malformed query expression"
end
end
@doc """
Counts the bindings in a query expression.
## Examples
iex> count_binds(Ecto.Query.Query[joins: [1,2,3]])
3
iex> count_binds(Ecto.Query.Query[from: 0, joins: [1,2]])
3
"""
def count_binds(Query[from: from, joins: joins]) do
count = if from, do: 1, else: 0
count + length(joins)
end
@doc """
Applies a query at compilation time or at runtime.
This function is responsible to check if a given query is an
`Ecto.Query.Query` record at compile time or not and act
accordingly.
If a query is available, it invokes the `apply` function in the
given `module`, otherwise, it delegates the call to runtime.
It is important to keep in mind the complexities introduced
by this function. In particular, a Query[] is mixture of escaped
and unescaped expressions which makes it impossible for this
function to properly escape or unescape it at compile/runtime.
For this reason, the apply function should be ready to handle
arguments in both escaped and unescaped form.
For example, take into account the `SelectBuilder`:
select = Ecto.Query.QueryExpr[expr: expr, file: env.file, line: env.line]
BuilderUtil.apply_query(query, __MODULE__, [select], env)
`expr` is already an escaped expression and we must not escape
it again. However, it is wrapped in an Ecto.Query.QueryExpr,
which must be escaped! Furthermore, the `apply/2` function
in `SelectBuilder` very likely will inject the QueryExpr inside
Query, which again, is a mixture of escaped and unescaped expressions.
That said, you need to obey the following rules:
1. In order to call this function, the arguments must be escapable
values supported by the `escape/1` function below;
2. The apply function may not manipulate the given arguments,
with exception to the query.
In particular, when invoked at compilation time, all arguments
(except the query) will be escaped, so they can be injected into
the query properly, but they will be in their runtime form
when invoked at runtime.
"""
def apply_query(query, module, args, env) do
query = Macro.expand(query, env)
args = lc i inlist args, do: escape_query(i)
case unescape_query(query) do
Query[] = unescaped ->
apply(module, :apply, [unescaped|args]) |> escape_query
_ ->
quote do: unquote(module).apply(unquote_splicing([query|args]))
end
end
# Unescapes an `Ecto.Query.Query` record.
defp unescape_query({ :{}, _meta, [Query|_] = query }),
do: list_to_tuple(query)
defp unescape_query(other),
do: other
# Escapes an `Ecto.Query.Query` and associated records.
defp escape_query(Query[] = query),
do: { :{}, [], tuple_to_list(query) }
defp escape_query(other),
do: other
@doc """
Called by escaper at runtime to verify that `field/2` is given an atom.
"""
def check_field(field) do
if is_atom(field) do
field
else
raise Ecto.QueryError, reason: "field name should be an atom, given: `#{inspect field}`"
end
end
@doc """
Called by escaper at runtime to verify that `array/2` is given an atom.
"""
def check_array(type) do
if is_atom(type) do
type
else
raise Ecto.QueryError, reason: "array type should be an atom, given: `#{inspect type}`"
end
end
end
|
lib/ecto/query/builder_util.ex
| 0.597608 | 0.424054 |
builder_util.ex
|
starcoder
|
defmodule Instruments.CustomFunctions do
@moduledoc """
Creates custom prefixed functions
Often, a module will have functions that all have a common prefix.
It's somewhat tedious to have to put this prefix in every call to
every metric function. Using this module can help somewhat.
When you `use` this module, it defines custom, module-specific metrics
functions that include your prefix. For example:
```
defmodule Prefixed do
use Instruments.CustomFunctions, prefix: "my.module"
def do_something() do
increment("do_something_counts")
do_another_thing()
end
def long_running() do
measure("timed_fn", &compute/0)
end
defp compute(), do: Process.sleep(10_000)
defp do_another_thing, do: 3
end
```
In the above example, we increment `do_something_counts` and `timed_fn`, yet
the metrics emitted are `my.module.do_something_counts` and `my.module.timed_fn`.
"""
defmacro __using__(opts) do
prefix =
case Keyword.fetch!(opts, :prefix) do
prefix_string when is_bitstring(prefix_string) ->
prefix_string
ast ->
{computed_prefix, _} = Code.eval_quoted(ast)
computed_prefix
end
prefix_with_dot = "#{prefix}."
quote do
use Instruments
@doc false
def increment(key, value \\ 1, options \\ []) do
Instruments.increment([unquote(prefix_with_dot), key], value, options)
end
@doc false
def decrement(key, value \\ 1, options \\ []) do
Instruments.decrement([unquote(prefix_with_dot), key], value, options)
end
@doc false
def gauge(key, value, options \\ []) do
Instruments.gauge([unquote(prefix_with_dot), key], value, options)
end
@doc false
def histogram(key, value, options \\ []) do
Instruments.histogram([unquote(prefix_with_dot), key], value, options)
end
@doc false
def timing(key, value, options \\ []) do
Instruments.timing([unquote(prefix_with_dot), key], value, options)
end
@doc false
def set(key, value, options \\ []) do
Instruments.set([unquote(prefix_with_dot), key], value, options)
end
@doc false
def measure(key, options \\ [], func) do
Instruments.measure([unquote(prefix_with_dot), key], options, func)
end
end
end
end
|
lib/custom_functions.ex
| 0.894424 | 0.793666 |
custom_functions.ex
|
starcoder
|
defmodule ExAws.Transcribe do
@moduledoc """
Operations for AWS Transcribe
"""
import ExAws.Utils, only: [camelize_keys: 2]
@version "2017-10-26"
@doc """
Starts an asynchronous job to transcribe speech to text.
Doc: <https://docs.aws.amazon.com/transcribe/latest/dg/API_StartTranscriptionJob.html>
Example:
```
Transcribe.start_transcription_job("MyJob", "s3://mybucket/myfile.mp3", "mp3", "en-US", settings: [show_speaker_labels: true])
```
"""
@type media :: [
media_file_uri: binary
]
@type start_transcription_job_settings :: [
channel_identification: boolean,
max_speaker_labels: integer,
show_speaker_labels: boolean,
vocabulary_name: binary
]
@type start_transcription_job_execution_settings :: [
allow_deferred_execution: boolean,
data_access_role_arn: binary
]
@type start_transcription_job_opts :: [
language_code: binary,
media: media,
media_format: binary,
transcription_job_name: binary,
media_sample_rate_hertz: integer,
output_bucket_name: binary,
settings: start_transcription_job_settings,
job_execution_settings: start_transcription_job_execution_settings
]
@spec start_transcription_job(
name :: binary,
uri :: binary,
format :: binary,
language :: binary,
start_transcription_job_opts
) :: ExAws.Operation.JSON.t()
def start_transcription_job(name, uri, format, language, opts \\ []) do
params =
%{
"TranscriptionJobName" => name,
"Media" => %{"MediaFileUri" => uri},
"MediaFormat" => format,
"LanguageCode" => language
}
|> Map.merge(normalize_opts(opts))
request(:start_transcription_job, params)
end
@doc """
Lists transcription jobs with the specified status.
Doc: <https://docs.aws.amazon.com/transcribe/latest/dg/API_ListTranscriptionJobs.html>
Examples:
```
# List jobs
ExAws.Transcribe.list_transcription_jobs()
# List completed jobs
ExAws.Transcribe.list_transcription_jobs(status: "COMPLETED")
```
"""
@type list_transcription_jobs_opts :: [
job_name_contains: binary,
max_results: integer,
next_token: binary,
status: binary
]
@spec list_transcription_jobs(opts :: list_transcription_jobs_opts) :: ExAws.Operation.JSON.t()
def list_transcription_jobs(opts \\ []) do
request(:list_transcription_jobs, normalize_opts(opts))
end
@doc """
Returns information about a transcription job.
Doc: <https://docs.aws.amazon.com/transcribe/latest/dg/API_GetTranscriptionJob.html>
Example:
```
ExAws.Transcribe.get_transcription_job("Job1")
```
"""
@spec get_transcription_job(name :: binary) :: ExAws.Operation.JSON.t()
def get_transcription_job(name) do
params = %{"TranscriptionJobName" => name}
request(:get_transcription_job, params)
end
defp request(action, params) do
operation =
action
|> Atom.to_string()
|> Macro.camelize()
ExAws.Operation.JSON.new(:transcribe, %{
data: Map.merge(%{"Version" => @version}, params),
headers: [
{"X-Amz-Target", "Transcribe.#{operation}"},
{"content-type", "application/x-amz-json-1.1"}
]
})
end
defp normalize_opts(opts) do
opts
|> Enum.into(%{})
|> camelize_keys(deep: true)
end
end
|
lib/ex_aws/transcribe.ex
| 0.858348 | 0.593963 |
transcribe.ex
|
starcoder
|
defmodule PixelFont.RectilinearShape.EdgeGenerator do
@moduledoc false
alias PixelFont.RectilinearShape.Edge
@spec get_edges([charlist()], Edge.orientation()) :: [[Edge.t()]]
def get_edges(bmp, orientation) do
data =
case orientation do
:horizontal -> bmp
:vertical -> bmp |> Enum.zip() |> Enum.map(&Tuple.to_list/1)
end
data
|> Stream.with_index()
|> Stream.map(fn {line, row} ->
line
|> Enum.chunk_by(& &1)
|> Enum.map(&{hd(&1), length(&1)})
|> Enum.reduce({0, []}, &get_edges_reduce_fun(&1, row, &2, orientation))
|> elem(1)
|> Enum.reverse()
|> Enum.reduce([], fn edges, edges2 -> edges2 ++ edges end)
end)
|> Enum.reject(fn x -> x == [] end)
end
defp get_edges_reduce_fun(chunk, row, acc, orientation)
defp get_edges_reduce_fun({?0, len}, _, {pos, list}, _) do
{pos + len, list}
end
defp get_edges_reduce_fun({?1, len}, row, {pos, list}, :horizontal) do
edges = [
{:horizontal, row + 1, pos, pos + len, :negative},
{:horizontal, row, pos, pos + len, :positive}
]
{pos + len, [edges | list]}
end
defp get_edges_reduce_fun({?1, len}, row, {pos, list}, :vertical) do
edges = [
{:vertical, row + 1, pos, pos + len, :positive},
{:vertical, row, pos, pos + len, :negative}
]
{pos + len, [edges | list]}
end
@spec union([Edge.t()], [Edge.t()]) :: [Edge.t()]
def union(edges1, edges2) do
e1_by_u = Enum.group_by(edges1, &elem(&1, 1))
edges2
|> Enum.reduce(e1_by_u, &union_reduce_fun/2)
|> Map.values()
|> List.flatten()
end
@spec union_reduce_fun(Edge.t(), map()) :: map()
defp union_reduce_fun(edge, target) do
{_type, u, v1, v2, _dir} = edge
case target[u] do
nil ->
Map.put(target, u, [edge])
edges ->
vs = edges |> Enum.map(&[elem(&1, 2), elem(&1, 3)]) |> List.flatten()
cut_edges = cut_edge(edge, vs)
cut_src_edges =
edges
|> Enum.map(&cut_edge(&1, [v1, v2]))
|> List.flatten()
map =
cut_edges
|> Kernel.++(cut_src_edges)
|> Enum.group_by(&elem(&1, 4), &Tuple.delete_at(&1, 4))
|> Enum.into(%{}, fn {k, v} -> {k, MapSet.new(v)} end)
Map.put(target, u, simplify_edges(map))
end
end
@spec cut_edge(Edge.t(), [integer()]) :: [Edge.t()]
defp cut_edge(edge, vs) do
{t, p, q1, q2, d} = edge
if q2 == q1 + 1 do
[edge]
else
range = (q1 + 1)..(q2 - 1)
vs
|> Enum.filter(&(&1 in range))
|> Enum.uniq()
|> Enum.sort()
|> case do
[] -> [edge]
[v] -> [{t, p, q1, v, d}, {t, p, v, q2, d}]
[v | vs] -> [{t, p, q1, v, d} | cut_edge({t, p, v, q2, d}, vs)]
end
end
end
@spec simplify_edges(map()) :: [Edge.t()]
defp simplify_edges(map) do
pos_edges = map[:positive] || MapSet.new()
neg_edges = map[:negative] || MapSet.new()
common = MapSet.intersection(pos_edges, neg_edges)
fun = fn set, orientation ->
set
|> MapSet.difference(common)
|> Enum.map(&Tuple.append(&1, orientation))
end
fun.(pos_edges, :positive) ++ fun.(neg_edges, :negative)
end
end
|
lib/pixel_font/rectilinear_shape/edge_generator.ex
| 0.812533 | 0.619097 |
edge_generator.ex
|
starcoder
|
defmodule DataFrame.Statistics do
@moduledoc """
Functions with statistics processing of Frames
"""
alias DataFrame.Table
# Goal is to achieve something like this
# count 6.000000 6.000000 6.000000 6.000000
# mean 0.073711 -0.431125 -0.687758 -0.233103
# std 0.843157 0.922818 0.779887 0.973118
# min -0.861849 -2.104569 -1.509059 -1.135632
# 25% -0.611510 -0.600794 -1.368714 -1.076610
# 50% 0.022070 -0.228039 -0.767252 -0.386188
# 75% 0.658444 0.041933 -0.034326 0.461706
# max 1.212112 0.567020 0.276232 1.071804
def describe(frame) do
values = frame.values |> Table.map_columns(&describe_column/1)
DataFrame.new(values, frame.columns, [
"count",
"mean",
"std",
"min",
"25%",
"50%",
"75%",
"max"
])
end
defp describe_column(column) do
count = Enum.count(column)
mean = Enum.sum(column) / count
min = Enum.min(column)
max = Enum.max(column)
variance_sum = column |> Enum.map(fn x -> :math.pow(mean - x, 2) end) |> Enum.sum()
std = :math.sqrt(variance_sum / count)
twenty_five = percentile(column, 0.25)
fifty = percentile(column, 0.5)
seventy_five = percentile(column, 0.75)
[count, mean, std, min, twenty_five, fifty, seventy_five, max]
end
defp percentile(values, percentile) do
values_sorted = Enum.sort(values)
# Given we have for instance 80 elements, this is something like 36.2
k = percentile * (Enum.count(values_sorted) - 1)
previous_index = round(Float.floor(k))
next_index = round(Float.ceil(k))
# Then this would be 0.2 and whatever number is in the 36th position
previous_number_weight = k - previous_index
previous_number = Enum.at(values_sorted, previous_index)
# And this would be 0.8 and the number in that position
next_number_weight = next_index - k
next_number = Enum.at(values_sorted, next_index)
# Weight sum the previous calculations
previous_number_weight * previous_number + next_number_weight * next_number
end
end
|
lib/dataframe/statistics.ex
| 0.715225 | 0.446012 |
statistics.ex
|
starcoder
|
defmodule Bodyguard.Action do
@moduledoc """
Execute authorized actions in a composable way.
An Action can be built up over the course of a request, providing a means to
specify authorization parameters in the steps leading up to actually
executing the job.
When authorization fails, there is an opportunity to handle it using a
fallback function before returning the final result.
Authorization is performed by deferring to a `Bodyguard.Policy`.
#### Fields
* `:context` – Context for the action
* `:policy` – Implementation of `Bodyguard.Policy` behaviour; defaults to the `:context`
* `:user` – The user to authorize
* `:name` – The name of the authorized action
* `:auth_run?` – If an authorization check has been performed
* `:auth_result` – Result of the authorization check
* `:authorized?` – If authorization has succeeded (default `false`)
* `:job` – Function to execute if authorization passes; signature `job(action)`
* `:fallback` – Function to execute if authorization fails; signature `fallback(action)`
* `:assigns` – Generic parameters along for the ride
#### Controller Example
defmodule MyApp.Web.PostController do
use MyApp.Web, :controller
import Bodyguard.Action
alias MyApp.Blog
action_fallback MyApp.FallbackController
plug Bodyguard.Plug.BuildAction, context: Blog, user: &get_current_user/1
def index(conn, _) do
run conn.assigns.action, fn(action) ->
posts = Blog.list_posts(action.user)
render(conn, "index.html", posts: posts)
end
end
defp get_current_user(conn) do
# ...
end
end
#### Verbose Example
import Bodyguard.Action
alias MyApp.Blog
act(Blog)
|> put_user(get_current_user())
|> put_policy(Blog.SomeSpecialPolicy)
|> assign(:drafts, true)
|> authorize(:list_posts)
|> put_job(fn action ->
Blog.list_posts(action.user, drafts_only: action.assigns.drafts)
end)
|> put_fallback(fn _action -> {:error, :not_found} end)
|> run()
"""
defstruct context: nil,
policy: nil,
user: nil,
name: nil,
auth_run?: false,
auth_result: nil,
authorized?: false,
job: nil,
fallback: nil,
assigns: %{}
alias Bodyguard.Action
@type job :: (action :: t -> any)
@type fallback :: (action :: t -> any)
@type assigns :: %{atom => any}
@type t :: %__MODULE__{
context: module | nil,
policy: module | nil,
name: atom | nil,
user: any,
auth_run?: boolean,
auth_result: Bodyguard.Policy.auth_result() | nil,
authorized?: boolean,
job: job | nil,
fallback: fallback | nil,
assigns: assigns
}
@doc """
Initialize an Action.
The `context` is assumed to implement `Bodyguard.Policy` callbacks. To
specify a unique policy, use `put_policy/2`.
The Action is considered unauthorized by default, until authorization is
run.
"""
@spec act(context :: module) :: t
def act(context) do
%Action{}
|> put_context(context)
|> put_policy(context)
end
@doc """
Change the context.
"""
@spec put_context(action :: t, context :: module) :: t
def put_context(%Action{} = action, context) when is_atom(context) do
%{action | context: context}
end
@doc """
Change the policy.
"""
@spec put_policy(action :: t, policy :: module) :: t
def put_policy(%Action{} = action, policy) when is_atom(policy) do
%{action | policy: policy}
end
@doc """
Change the user to authorize.
"""
@spec put_user(action :: t, user :: any) :: t
def put_user(%Action{} = action, user) do
%{action | user: user}
end
@doc """
Change the job to execute.
"""
@spec put_job(action :: t, job :: job | nil) :: t
def put_job(%Action{} = action, job) when is_function(job, 1) or is_nil(job) do
%{action | job: job}
end
@doc """
Change the fallback handler.
"""
@spec put_fallback(action :: t, fallback :: fallback | nil) :: t
def put_fallback(%Action{} = action, fallback)
when is_function(fallback, 1) or is_nil(fallback) do
%{action | fallback: fallback}
end
@doc """
Replace the assigns.
"""
@spec put_assigns(action :: t, assigns :: assigns) :: t
def put_assigns(%Action{} = action, %{} = assigns) do
%{action | assigns: assigns}
end
@doc """
Put a new assign.
"""
@spec assign(action :: t, key :: atom, value :: any) :: t
def assign(%Action{assigns: assigns} = action, key, value) when is_atom(key) do
%{action | assigns: Map.put(assigns, key, value)}
end
@doc """
Mark the Action as authorized, regardless of previous authorization.
"""
@spec force_authorized(action :: t) :: t
def force_authorized(%Action{} = action) do
%{action | authorized?: true, auth_result: :ok}
end
@doc """
Mark the Action as unauthorized, regardless of previous authorization.
"""
@spec force_unauthorized(action :: t, error :: any) :: t
def force_unauthorized(%Action{} = action, error) do
%{action | authorized?: false, auth_result: error}
end
@doc """
Use the policy to perform authorization.
The `opts` are merged in to the Action's `assigns` and passed as the
`params`.
See `Bodyguard.permit/3` for details.
"""
@spec permit(action :: t, name :: atom, opts :: keyword | assigns) :: t
def permit(action, name, opts \\ [])
def permit(%Action{policy: nil}, name, _opts) do
raise RuntimeError, "Policy not specified for #{inspect(name)} action"
end
def permit(%Action{auth_run?: true, authorized?: false} = action, _name, _opts) do
# Don't attempt to auth again, since we already failed
action
end
def permit(%Action{} = action, name, opts) do
params = Enum.into(opts, action.assigns)
case Bodyguard.permit(action.policy, name, action.user, params) do
:ok ->
%{action | name: name, auth_run?: true, authorized?: true, auth_result: :ok}
error ->
%{action | name: name, auth_run?: true, authorized?: false, auth_result: error}
end
end
@doc """
Same as `authorize/3` but raises on failure.
"""
@spec permit!(action :: t, name :: atom, opts :: keyword | assigns) :: t
def permit!(action, name, opts \\ [])
def permit!(%Action{policy: nil}, name, _opts) do
raise RuntimeError, "Policy not specified for #{inspect(name)} action"
end
def permit!(%Action{auth_run?: true, authorized?: false} = action, _name, _opts) do
# Don't attempt to auth again, since we already failed
action
end
def permit!(%Action{} = action, name, opts) do
params = Enum.into(opts, action.assigns)
Bodyguard.permit!(action.policy, name, action.user, params)
%{action | name: name, auth_run?: true, authorized?: true, auth_result: :ok}
end
@doc """
Execute the Action's job.
The `job` must have been previously assigned using `put_job/2`.
If authorized, the job is run and its value is returned.
If unauthorized, and a fallback has been provided, the fallback is run and
its value returned.
Otherwise, the result of the authorization is returned (something like
`{:error, reason}`).
"""
@spec run(action :: t) :: any
def run(%Action{job: nil}) do
raise RuntimeError, "Job not specified for action"
end
def run(%Action{} = action) do
cond do
# Success!
action.authorized? ->
action.job.(action)
# Failure, but with a fallback
action.fallback ->
action.fallback.(action)
# Failure without a fallback
true ->
action.auth_result
end
end
@doc """
Execute the given job.
If authorized, the job is run and its value is returned.
If unauthorized, and a fallback has been provided, the fallback is run and
its value returned.
Otherwise, the result of the authorization is returned (something like
`{:error, reason}`).
"""
@spec run(action :: t, job :: job) :: any
def run(%Action{} = action, job) when is_function(job, 1) do
action
|> put_job(job)
|> run()
end
@doc """
Execute the given job and fallback.
If authorized, the job is run and its value is returned.
If unauthorized, the fallback is run and its value returned.
"""
@spec run(action :: t, job :: job, fallback :: fallback) :: any
def run(%Action{} = action, job, fallback)
when is_function(job, 1) and is_function(fallback, 1) do
action
|> put_job(job)
|> put_fallback(fallback)
|> run()
end
@doc """
Execute the job, raising on failure.
The `job` must have been previously assigned using `put_job/2`.
"""
@spec run!(action :: t) :: any
def run!(%Action{job: nil}) do
raise RuntimeError, "Job not specified for action"
end
def run!(%Action{} = action) do
if action.authorized? do
action.job.(action)
else
raise Bodyguard.NotAuthorizedError,
message: "Not authorized",
status: 403,
reason: action.auth_result
end
end
@doc """
Execute the given job, raising on failure.
"""
@spec run!(action :: t, job :: job) :: any
def run!(%Action{} = action, job) when is_function(job, 1) do
action
|> put_job(job)
|> run!()
end
end
|
lib/bodyguard/action.ex
| 0.904616 | 0.417212 |
action.ex
|
starcoder
|
defmodule Membrane.RawAudio.SampleFormat do
@moduledoc """
This module defines sample formats used in `Membrane.RawAudio`
and some helpers to deal with them.
"""
use Bunch.Typespec
import Bitwise
@compile {:inline,
[
to_tuple: 1,
from_tuple: 1
]}
@list_type t :: [
:s8,
:u8,
:s16le,
:u16le,
:s16be,
:u16be,
:s24le,
:u24le,
:s24be,
:u24be,
:s32le,
:u32le,
:s32be,
:u32be,
:f32le,
:f32be,
:f64le,
:f64be
]
@spec values() :: [t()]
def values(), do: @t
@type sample_type_t :: :s | :u | :f
@type sample_size_t :: 8 | 16 | 24 | 32 | 64
@type endianness_t :: :le | :be | :any
@doc """
Converts format atom to an equivalent 3-tuple form
"""
@spec to_tuple(t) :: {sample_type_t, sample_size_t, endianness_t}
def to_tuple(:s8), do: {:s, 8, :any}
def to_tuple(:u8), do: {:u, 8, :any}
def to_tuple(:s16le), do: {:s, 16, :le}
def to_tuple(:u16le), do: {:u, 16, :le}
def to_tuple(:s16be), do: {:s, 16, :be}
def to_tuple(:u16be), do: {:u, 16, :be}
def to_tuple(:s24le), do: {:s, 24, :le}
def to_tuple(:u24le), do: {:u, 24, :le}
def to_tuple(:s24be), do: {:s, 24, :be}
def to_tuple(:u24be), do: {:u, 24, :be}
def to_tuple(:s32le), do: {:s, 32, :le}
def to_tuple(:u32le), do: {:u, 32, :le}
def to_tuple(:s32be), do: {:s, 32, :be}
def to_tuple(:u32be), do: {:u, 32, :be}
def to_tuple(:f32le), do: {:f, 32, :le}
def to_tuple(:f32be), do: {:f, 32, :be}
def to_tuple(:f64le), do: {:f, 64, :le}
def to_tuple(:f64be), do: {:f, 64, :be}
@doc """
Converts 3-tuple format to an equivalent atom form
"""
@spec from_tuple({sample_type_t, sample_size_t, endianness_t}) :: t
def from_tuple({:s, 8, :any}), do: :s8
def from_tuple({:u, 8, :any}), do: :u8
def from_tuple({:s, 16, :le}), do: :s16le
def from_tuple({:u, 16, :le}), do: :u16le
def from_tuple({:s, 16, :be}), do: :s16be
def from_tuple({:u, 16, :be}), do: :u16be
def from_tuple({:s, 24, :le}), do: :s24le
def from_tuple({:u, 24, :le}), do: :u24le
def from_tuple({:s, 24, :be}), do: :s24be
def from_tuple({:u, 24, :be}), do: :u24be
def from_tuple({:s, 32, :le}), do: :s32le
def from_tuple({:u, 32, :le}), do: :u32le
def from_tuple({:s, 32, :be}), do: :s32be
def from_tuple({:u, 32, :be}), do: :u32be
def from_tuple({:f, 32, :le}), do: :f32le
def from_tuple({:f, 32, :be}), do: :f32be
def from_tuple({:f, 64, :le}), do: :f64le
def from_tuple({:f, 64, :be}), do: :f64be
# Serialization constants
@sample_types BiMap.new(s: 0b01 <<< 30, u: 0b00 <<< 30, f: 0b11 <<< 30)
@endianness_mapping BiMap.new(le: 0b0 <<< 29, be: 0b1 <<< 29)
@sample_type 0b11 <<< 30
@endianness_bitmask 0b1 <<< 29
@sample_size (0b1 <<< 8) - 1
@doc """
converts audio format to 32-bit unsigned integer consisting of (from oldest bit):
* first bit for type (int/float)
* then bit for encoding (unsigned/signed)
* then bit for endianity (little/big)
* then sequence of zeroes
* last 8 bits for size (in bits)
expects atom format
returns format encoded as integer
"""
@spec serialize(t) :: pos_integer
def serialize(format) do
{type, size, endianness} = format |> to_tuple
0 ||| @sample_types[type] ||| (@endianness_mapping[endianness] || @endianness_mapping[:le]) |||
size
end
# Workaround for dialyzer not handling opaque term creation at compile time
# See: https://github.com/elixir-lang/elixir/issues/8463
@dialyzer [{:no_opaque, deserialize: 1}]
@doc """
Converts positive integer containing serialized format to atom.
expects serialized format
returns format atom (See `t:t/0`)
"""
@spec deserialize(pos_integer) :: t
def deserialize(serialized_format) do
type = @sample_types |> BiMap.get_key(serialized_format &&& @sample_type)
size = serialized_format &&& @sample_size
endianness =
case size do
8 ->
:any
_otherwise ->
@endianness_mapping |> BiMap.get_key(serialized_format &&& @endianness_bitmask)
end
{type, size, endianness} |> from_tuple
end
end
|
lib/membrane_raw_audio/sample_format.ex
| 0.842199 | 0.561004 |
sample_format.ex
|
starcoder
|
defmodule ExampleWeb.Email do
@moduledoc """
A module for sending emails to the user.
This module provides functions to be used with the Phauxth authentication
library when confirming users or handling password resets.
This example uses Bamboo to email users. If you do not want to use Bamboo,
see the `Using another email library` for instructions on how to adapt this
example.
For more information about Bamboo, see the [Bamboo README](https://github.com/thoughtbot/bamboo).
## Bamboo adapters
Bamboo provides adapters for many popular emailing services, and you
can also create custom adapters by implementing the Bamboo.Adapter behaviour.
This example is configured to use the MandrillAdapter in production, the
LocalAdapter in development, and the TestAdapter for tests. To use a
different adapter, edit the relevant config file.
## Email delivery
All emails in this module use the `deliver_later` function, which sends the
email straight away, but in the background. The behavior of this function
can be customized by implementing your own `Bamboo.DeliverLaterStrategy`
behaviour.
## Viewing sent emails
The `Bamboo.SentEmailViewerPlug` has been added to the `router.ex` file. With this,
you can view the sent emails in your browser.
## Using another email library
If you do not want to use Bamboo, follow the instructions below:
1. Edit this file, using the email library of your choice
2. Remove the lib/forks_the_egg_sample/mailer.ex file
3. Remove the Bamboo entries in the config/config.exs and config/test.exs files
4. Remove bamboo from the deps section in the mix.exs file
"""
import Bamboo.Email
alias ExampleWeb.Mailer
@doc """
An email with a confirmation link in it.
"""
def confirm_request(address, link) do
address
|> base_email()
|> subject("Confirm email address")
|> html_body(
"<h3>Click on the link below to confirm this email address</h3><p><a href=#{link}>Confirm email</a></p>"
)
|> Mailer.deliver_later()
end
@doc """
An email with a link to reset the password.
"""
def reset_request(address, nil) do
address
|> base_email()
|> subject("Reset your password")
|> text_body(
"You requested a password reset, but no user is associated with the email you provided."
)
|> Mailer.deliver_later()
end
def reset_request(address, link) do
address
|> base_email()
|> subject("Reset your password")
|> html_body(
"<h3>Click on the link below to reset your password</h3><p><a href=#{link}>Password reset</a></p>"
)
|> Mailer.deliver_later()
end
@doc """
An email acknowledging that the account has been successfully confirmed.
"""
def confirm_success(address) do
address
|> base_email()
|> subject("Confirmed account")
|> text_body("Your account has been confirmed.")
|> Mailer.deliver_later()
end
@doc """
An email acknowledging that the password has been successfully reset.
"""
def reset_success(address) do
address
|> base_email()
|> subject("Password reset")
|> text_body("Your password has been reset.")
|> Mailer.deliver_later()
end
defp base_email(address) do
new_email()
|> to(address)
|> from("<EMAIL>")
end
end
|
lib/example_web/email.ex
| 0.871146 | 0.640031 |
email.ex
|
starcoder
|
defmodule ExMpesa.AccountBalance do
@moduledoc """
The Account Balance API requests for the account balance of a shortcode.
"""
import ExMpesa.MpesaBase
import ExMpesa.Util
@doc """
Initiates account balnce request
## Configuration
Add below config to dev.exs / prod.exs files
This asumes you have a clear understanding of how Daraja API works. See docs here https://developer.safaricom.co.ke/docs#account-balance-api
`config.exs`
```elixir
config :ex_mpesa,
cert: "",
balance: [
short_code: "",
initiator_name: "Safaricom1",
password: "<PASSWORD>",
timeout_url: "",
result_url: "",
security_credential: ""
]
```
To generate security_credential, head over to https://developer.safaricom.co.ke/test_credentials, then Initiator Security Password for your environment.
Alternatively, generate security credential using certificate
`cert` - This is the M-Pesa public key certificate used to encrypt your plain password.
There are 2 types of certificates.
- sandox - https://developer.safaricom.co.ke/sites/default/files/cert/cert_sandbox/cert.cer .
- production - https://developer.safaricom.co.ke/sites/default/files/cert/cert_prod/cert.cer .
`password` - This is a plain unencrypted password.
Environment
- production - set password from the organization portal.
- sandbox - use your own custom password
## Example
iex> ExMpesa.AccountBalance.request()
{:ok,
%{
"ConversationID" => "AG_20201010_00007d6021022d396df6",
"OriginatorConversationID" => "28290-142922216-2",
"ResponseCode" => "0",
"ResponseDescription" => "Accept the service request successfully."
}}
"""
def request() do
case get_security_credential_for(:balance) do
nil -> {:error, "cannot generate security_credential due to missing configuration fields"}
security_credential -> account_balance(security_credential)
end
end
defp account_balance(security_credential) do
payload = %{
"Initiator" => Application.get_env(:ex_mpesa, :balance)[:initiator_name],
"SecurityCredential" => security_credential,
"CommandID" => "AccountBalance",
"PartyA" => Application.get_env(:ex_mpesa, :balance)[:short_code],
"IdentifierType" => 4,
"Remarks" => "Checking account balance",
"QueueTimeOutURL" => Application.get_env(:ex_mpesa, :balance)[:timeout_url],
"ResultURL" => Application.get_env(:ex_mpesa, :balance)[:result_url]
}
make_request("/mpesa/accountbalance/v1/query", payload)
end
end
|
lib/ex_mpesa/account_balance.ex
| 0.776072 | 0.623291 |
account_balance.ex
|
starcoder
|
defmodule Callisto.GraphDB.Queryable do
alias Callisto.{Edge, Query, Triple, Vertex}
def query(adapter, cypher, parser \\ nil) do
do_query(adapter, cypher, parser)
end
# Straight up cypher string, no parser...
defp do_query(adapter, cypher, parser)
when is_binary(cypher) and is_nil(parser) do
adapter.query(cypher)
end
# Straight up cypher, but with a parsing function
defp do_query(adapter, cypher, parser)
when is_binary(cypher) and is_function(parser) do
case do_query(adapter, cypher, nil) do
{:ok, result} -> {:ok, parser.(result) }
result -> result
end
end
# Cypher struct, possible parser.
defp do_query(adapter, cypher=%Query{}, parser) do
do_query(adapter, to_string(cypher), fn(r) ->
result = Callisto.GraphDB.handle_return(r, cypher)
(parser||&(&1)).(result)
end)
end
def query!(adapter, cypher, parser \\ nil) do
{:ok, response} = query(adapter, cypher, parser)
response
end
def count(adapter, matcher) do
cypher = %Query{}
|> Query.match(matcher)
|> Query.returning("count(x)")
query(adapter, cypher, &(hd(&1)["count(x)"]))
end
def count!(adapter, matcher) do
with {:ok, c} <- count(adapter, matcher),
do: c
end
def exists?(adapter, matcher) do
cypher = %Query{}
|> Query.match(matcher)
|> Query.returning(:x)
|> Query.limit(1)
{:ok, c} = query(adapter, cypher)
Enum.count(c) > 0
end
# Convert properties to map.
def get(adapter, kind, labels, props) when is_list(props) do
get(adapter, kind, labels, Map.new(props))
end
# If passed a non-blank ID in the hash...
def get(adapter, kind, labels, %{id: id}) when is_nil(id) != true do
get_by_id(adapter, kind, labels, id)
end
# ...or a straight up integer or string value, get by ID.
def get(adapter, kind, labels, id) when is_binary(id) or is_integer(id) do
get_by_id(adapter, kind, labels, id)
end
# Otherwise, get based on the given properties.
def get(adapter, kind, labels, props), do: do_get(adapter, kind, labels, props)
def do_get(adapter, kind=Vertex, labels, props) do
cypher = %Query{}
|> Query.match(x: kind.cast(labels, props))
|> Query.returning(x: kind, "labels(x)": nil)
query(adapter, cypher, &deref_all/1)
end
def do_get(adapter, kind=Edge, labels, props) do
cypher = %Query{}
|> Query.match(x: kind.cast(labels, props))
|> Query.returning(x: kind, "type(x)": nil)
query(adapter, cypher, &deref_all/1)
end
def get_by_id(adapter, kind, labels, id) do
do_get(adapter, kind, labels, %{id: id})
end
def get!(adapter, kind, labels, props) do
with {:ok, rows} <- get(adapter, kind, labels, props),
do: rows
end
def get_path(adapter, from=%Vertex{}, to=%Vertex{}, edge=%Edge{}) do
cypher = %Query{}
|> Query.match("#{Vertex.to_cypher(from, "from")}-#{Edge.to_cypher(edge, "x")}->#{Vertex.to_cypher(to, "to")}")
|> Query.returning(x: Edge, "type(x)": nil)
query(adapter, cypher, &deref_all/1)
end
def create(adapter, vertex=%Vertex{}) do
cypher = %Query{}
|> Query.create(vertex)
|> Query.returning(x: Vertex, "labels(x)": nil)
query(adapter, cypher, &deref_all/1)
end
def create(adapter, triple=%Triple{}) do
cypher = %Query{}
|> Query.match(from: triple.from, to: triple.to)
|> Query.create("(from)-#{Edge.to_cypher(triple.edge,"r")}->(to)")
|> Query.returning(from: Vertex, "labels(from)": nil,
r: Edge, "type(r)": nil,
to: Vertex, "labels(to)": nil)
query(adapter, cypher, fn(rows) ->
Enum.map(rows, fn(r) ->
Triple.new(from: r["from"], edge: r["r"], to: r["to"])
end)
end)
end
def get_or_create(adapter, vertex=%Vertex{}, props) do
cypher = %Query{}
|> Query.merge(x: vertex)
|> Query.set(on_create: [x: props || vertex.props])
|> Query.returning(x: Vertex, "labels(x)": nil)
query(adapter, cypher, &deref_all/1)
end
defp deref_all(rows, key \\ "x") do
Enum.map(rows, &(&1[key]))
end
def delete(adapter, vertex=%Vertex{}, opts) do
cypher = %Query{}
|> Query.match(x: vertex)
|> Query.delete(if(Keyword.get(opts, :detach),
do: [detach: :x],
else: :x))
query(adapter, cypher)
end
def update(adapter, vertex=%Vertex{}, opts) do
cypher = %Query{}
|> Query.match(x: vertex)
|> Query.set(x: opts || vertex.props)
|> Query.returning(x: Vertex, "labels(x)": nil)
query(adapter, cypher)
end
end
|
lib/callisto/graph_db/queryable.ex
| 0.720958 | 0.446253 |
queryable.ex
|
starcoder
|
defprotocol Gyx.Core.Spaces do
@moduledoc """
This protocol defines basic functions to interact with
action and observation spaces.
"""
alias Gyx.Core.Spaces.{Discrete, Box, Tuple}
@type space :: Discrete.t() | Box.t() | Tuple.t()
@type discrete_point :: integer
@type box_point :: list(list(float))
@type tuple_point :: list(discrete_point | box_point())
@type point :: box_point | discrete_point | tuple_point
@doc """
Samples a random point from a space.
Note that sampled points are very different in nature
depending on the underlying space.
This sampling is pretty important for an agent, as
it is the way the agent might decide which actions to take
from an action space defined on the environment the agent is
interacting with.
## Parameters
- space: Any module representing a space.
## Examples
iex> discrete_space = %Gyx.Core.Spaces.Discrete{n: 42}
%Gyx.Core.Spaces.Discrete{n: 42, random_algorithm: :exsplus, seed: {1,2,3}}
iex> Gyx.Core.Spaces.set_seed(discrete_space)
{%{
jump: #Function<16.10897371/1 in :rand.ml_alg/1>
max: 288230376151711743,
next: #Function<15.1089737/1 in :rand.mk_alg/1>
type: :explus
}, [72022415603679006 | 144185572652843231]}
iex> Gyx.Core.Spaces.sample(discrete_space)
{:ok, 35}
iex> Gyx.Core.Spaces.sample(%Gyx.Core.Spaces.Box{shape: {2}, high: 7}
{:ok, [[3.173570417347619, 0.286615818442874]]}
"""
@spec sample(space()) :: {atom(), point()}
def sample(space)
@doc """
Verifies if a particular action or observation point lies inside a given space.
## Examples
iex> box_space = %Box{shape: {1, 2}}
iex> {:ok, box_point} = Spaces.sample(box_space)
iex> Spaces.contains(box_space, box_point)
true
"""
@spec contains?(space(), point()) :: bool()
def contains?(space, point)
@doc """
Sets the random generator used by `sample/1` with the
space defined seed.
"""
defdelegate set_seed(space), to: Gyx.Core.Spaces.Shared
end
defimpl Gyx.Core.Spaces, for: Gyx.Core.Spaces.Discrete do
def sample(discrete_space) do
{:ok, :rand.uniform(discrete_space.n) - 1}
end
def contains?(discrete_space, discrete_point) do
discrete_point in 0..(discrete_space.n - 1)
end
end
defimpl Gyx.Core.Spaces, for: Gyx.Core.Spaces.Box do
def sample(box_space) do
random_action =
box_space.shape
|> Tuple.to_list()
|> Enum.map(&get_rands(&1, box_space))
{:ok, random_action}
end
def contains?(box_space, box_point) do
with shape_expected <- Tuple.to_list(box_space.shape),
zip <- Enum.zip(shape_expected, box_point),
{len, len} <- {length(shape_expected), length(box_point)} do
not Enum.any?(zip, fn {e, v} ->
e != length(v) or Enum.any?(v, &(not (box_space.low <= &1 and &1 <= box_space.high)))
end)
else
_ -> false
end
end
defp get_rands(n, box_space) do
Enum.map(1..n, fn _ -> :rand.uniform() * (box_space.high - box_space.low) + box_space.low end)
end
end
defmodule Gyx.Core.Spaces.Shared do
@moduledoc """
This module contains functions to be shared
across all types considered by all Gyx.Core.Spaces protocols
"""
def set_seed(%{random_algorithm: algo, seed: seed}) do
:rand.seed(algo, seed)
end
end
|
lib/core/spaces/protocols.ex
| 0.914577 | 0.708326 |
protocols.ex
|
starcoder
|
defmodule AWS.AutoScaling do
@moduledoc """
Amazon EC2 Auto Scaling
Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2
instances based on user-defined scaling policies, scheduled actions, and health
checks.
Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load
Balancing.
For more information, including information about granting IAM users required
permissions for Amazon EC2 Auto Scaling actions, see the [Amazon EC2 Auto Scaling User
Guide](https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2011-01-01",
content_type: "application/x-www-form-urlencoded",
credential_scope: nil,
endpoint_prefix: "autoscaling",
global?: false,
protocol: "query",
service_id: "Auto Scaling",
signature_version: "v4",
signing_name: "autoscaling",
target_prefix: nil
}
end
@doc """
Attaches one or more EC2 instances to the specified Auto Scaling group.
When you attach instances, Amazon EC2 Auto Scaling increases the desired
capacity of the group by the number of instances being attached. If the number
of instances being attached plus the desired capacity of the group exceeds the
maximum size of the group, the operation fails.
If there is a Classic Load Balancer attached to your Auto Scaling group, the
instances are also registered with the load balancer. If there are target groups
attached to your Auto Scaling group, the instances are also registered with the
target groups.
For more information, see [Attach EC2 instances to your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-instance-asg.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def attach_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AttachInstances", input, options)
end
@doc """
Attaches one or more target groups to the specified Auto Scaling group.
This operation is used with the following load balancer types:
* Application Load Balancer - Operates at the application layer
(layer 7) and supports HTTP and HTTPS.
* Network Load Balancer - Operates at the transport layer (layer 4)
and supports TCP, TLS, and UDP.
* Gateway Load Balancer - Operates at the network layer (layer 3).
To describe the target groups for an Auto Scaling group, call the
`DescribeLoadBalancerTargetGroups` API. To detach the target group from the Auto
Scaling group, call the `DetachLoadBalancerTargetGroups` API.
For more information, see [Elastic Load Balancing and Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def attach_load_balancer_target_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AttachLoadBalancerTargetGroups", input, options)
end
@doc """
To attach an Application Load Balancer, Network Load Balancer, or Gateway Load
Balancer, use the `AttachLoadBalancerTargetGroups` API operation instead.
Attaches one or more Classic Load Balancers to the specified Auto Scaling group.
Amazon EC2 Auto Scaling registers the running instances with these Classic Load
Balancers.
To describe the load balancers for an Auto Scaling group, call the
`DescribeLoadBalancers` API. To detach the load balancer from the Auto Scaling
group, call the `DetachLoadBalancers` API.
For more information, see [Elastic Load Balancing and Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def attach_load_balancers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AttachLoadBalancers", input, options)
end
@doc """
Deletes one or more scheduled actions for the specified Auto Scaling group.
"""
def batch_delete_scheduled_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDeleteScheduledAction", input, options)
end
@doc """
Creates or updates one or more scheduled scaling actions for an Auto Scaling
group.
"""
def batch_put_scheduled_update_group_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchPutScheduledUpdateGroupAction", input, options)
end
@doc """
Cancels an instance refresh operation in progress.
Cancellation does not roll back any replacements that have already been
completed, but it prevents new replacements from being started.
For more information, see [Replacing Auto Scaling instances based on an instance refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def cancel_instance_refresh(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelInstanceRefresh", input, options)
end
@doc """
Completes the lifecycle action for the specified token or instance with the
specified result.
This step is a part of the procedure for adding a lifecycle hook to an Auto
Scaling group:
1. (Optional) Create a Lambda function and a rule that allows
CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling
launches or terminates instances.
2. (Optional) Create a notification target and an IAM role. The
target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows
Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
3. Create the lifecycle hook. Specify whether the hook is used when
the instances launch or terminate.
4. If you need more time, record the lifecycle action heartbeat to
keep the instance in a pending state.
5. ## If you finish before the timeout period ends, complete the
lifecycle action.
For more information, see [Amazon EC2 Auto Scaling lifecycle hooks](https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def complete_lifecycle_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CompleteLifecycleAction", input, options)
end
@doc """
## We strongly recommend using a launch template when calling this operation to
ensure full functionality for Amazon EC2 Auto Scaling and Amazon EC2.
Creates an Auto Scaling group with the specified name and attributes.
If you exceed your maximum limit of Auto Scaling groups, the call fails. To
query this limit, call the `DescribeAccountLimits` API. For information about
updating this limit, see [Amazon EC2 Auto Scaling service quotas](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html)
in the *Amazon EC2 Auto Scaling User Guide*.
For introductory exercises for creating an Auto Scaling group, see [Getting started with Amazon EC2 Auto
Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/GettingStartedTutorial.html)
and [Tutorial: Set up a scaled and load-balanced application](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-register-lbs-with-asg.html)
in the *Amazon EC2 Auto Scaling User Guide*. For more information, see [Auto Scaling
groups](https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html)
in the *Amazon EC2 Auto Scaling User Guide*.
Every Auto Scaling group has three size parameters (`DesiredCapacity`,
`MaxSize`, and `MinSize`). Usually, you set these sizes based on a specific
number of instances. However, if you configure a mixed instances policy that
defines weights for the instance types, you must specify these sizes with the
same units that you use for weighting instances.
"""
def create_auto_scaling_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAutoScalingGroup", input, options)
end
@doc """
Creates a launch configuration.
If you exceed your maximum limit of launch configurations, the call fails. To
query this limit, call the `DescribeAccountLimits` API. For information about
updating this limit, see [Amazon EC2 Auto Scaling service quotas](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html)
in the *Amazon EC2 Auto Scaling User Guide*.
For more information, see [Launch configurations](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def create_launch_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLaunchConfiguration", input, options)
end
@doc """
Creates or updates tags for the specified Auto Scaling group.
When you specify a tag with a key that already exists, the operation overwrites
the previous tag definition, and you do not get an error message.
For more information, see [Tagging Auto Scaling groups and instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def create_or_update_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateOrUpdateTags", input, options)
end
@doc """
Deletes the specified Auto Scaling group.
If the group has instances or scaling activities in progress, you must specify
the option to force the deletion in order for it to succeed.
If the group has policies, deleting the group deletes the policies, the
underlying alarm actions, and any alarm that no longer has an associated action.
To remove instances from the Auto Scaling group before deleting it, call the
`DetachInstances` API with the list of instances and the option to decrement the
desired capacity. This ensures that Amazon EC2 Auto Scaling does not launch
replacement instances.
To terminate all instances before deleting the Auto Scaling group, call the
`UpdateAutoScalingGroup` API and set the minimum size and desired capacity of
the Auto Scaling group to zero.
"""
def delete_auto_scaling_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAutoScalingGroup", input, options)
end
@doc """
Deletes the specified launch configuration.
The launch configuration must not be attached to an Auto Scaling group. When
this call completes, the launch configuration is no longer available for use.
"""
def delete_launch_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLaunchConfiguration", input, options)
end
@doc """
Deletes the specified lifecycle hook.
If there are any outstanding lifecycle actions, they are completed first
(`ABANDON` for launching instances, `CONTINUE` for terminating instances).
"""
def delete_lifecycle_hook(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLifecycleHook", input, options)
end
@doc """
Deletes the specified notification.
"""
def delete_notification_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteNotificationConfiguration", input, options)
end
@doc """
Deletes the specified scaling policy.
Deleting either a step scaling policy or a simple scaling policy deletes the
underlying alarm action, but does not delete the alarm, even if it no longer has
an associated action.
For more information, see [Deleting a scaling policy](https://docs.aws.amazon.com/autoscaling/ec2/userguide/deleting-scaling-policy.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def delete_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeletePolicy", input, options)
end
@doc """
Deletes the specified scheduled action.
"""
def delete_scheduled_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteScheduledAction", input, options)
end
@doc """
Deletes the specified tags.
"""
def delete_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTags", input, options)
end
@doc """
Describes the current Amazon EC2 Auto Scaling resource quotas for your AWS
account.
For information about requesting an increase, see [Amazon EC2 Auto Scaling service
quotas](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def describe_account_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAccountLimits", input, options)
end
@doc """
Describes the available adjustment types for Amazon EC2 Auto Scaling scaling
policies.
These settings apply to step scaling policies and simple scaling policies; they
do not apply to target tracking scaling policies.
The following adjustment types are supported:
* ChangeInCapacity
* ExactCapacity
* PercentChangeInCapacity
"""
def describe_adjustment_types(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAdjustmentTypes", input, options)
end
@doc """
Describes one or more Auto Scaling groups.
"""
def describe_auto_scaling_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAutoScalingGroups", input, options)
end
@doc """
Describes one or more Auto Scaling instances.
"""
def describe_auto_scaling_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAutoScalingInstances", input, options)
end
@doc """
Describes the notification types that are supported by Amazon EC2 Auto Scaling.
"""
def describe_auto_scaling_notification_types(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeAutoScalingNotificationTypes",
input,
options
)
end
@doc """
Describes one or more instance refreshes.
You can determine the status of a request by looking at the `Status` parameter.
The following are the possible statuses:
* `Pending` - The request was created, but the operation has not
started.
* `InProgress` - The operation is in progress.
* `Successful` - The operation completed successfully.
* `Failed` - The operation failed to complete. You can troubleshoot
using the status reason and the scaling activities.
* `Cancelling` - An ongoing operation is being cancelled.
Cancellation does not roll back any replacements that have already been
completed, but it prevents new replacements from being started.
* `Cancelled` - The operation is cancelled.
For more information, see [Replacing Auto Scaling instances based on an instance refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def describe_instance_refreshes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeInstanceRefreshes", input, options)
end
@doc """
Describes one or more launch configurations.
"""
def describe_launch_configurations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLaunchConfigurations", input, options)
end
@doc """
Describes the available types of lifecycle hooks.
The following hook types are supported:
* autoscaling:EC2_INSTANCE_LAUNCHING
* autoscaling:EC2_INSTANCE_TERMINATING
"""
def describe_lifecycle_hook_types(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLifecycleHookTypes", input, options)
end
@doc """
Describes the lifecycle hooks for the specified Auto Scaling group.
"""
def describe_lifecycle_hooks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLifecycleHooks", input, options)
end
@doc """
Describes the target groups for the specified Auto Scaling group.
"""
def describe_load_balancer_target_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLoadBalancerTargetGroups", input, options)
end
@doc """
Describes the load balancers for the specified Auto Scaling group.
This operation describes only Classic Load Balancers. If you have Application
Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the
`DescribeLoadBalancerTargetGroups` API instead.
"""
def describe_load_balancers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLoadBalancers", input, options)
end
@doc """
Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.
The `GroupStandbyInstances` metric is not returned by default. You must
explicitly request this metric when calling the `EnableMetricsCollection` API.
"""
def describe_metric_collection_types(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeMetricCollectionTypes", input, options)
end
@doc """
Describes the notification actions associated with the specified Auto Scaling
group.
"""
def describe_notification_configurations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeNotificationConfigurations", input, options)
end
@doc """
Describes the policies for the specified Auto Scaling group.
"""
def describe_policies(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePolicies", input, options)
end
@doc """
Describes one or more scaling activities for the specified Auto Scaling group.
To view the scaling activities from the Amazon EC2 Auto Scaling console, choose
the **Activity** tab of the Auto Scaling group. When scaling events occur, you
see scaling activity messages in the **Activity history**. For more information,
see [Verifying a scaling activity for an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-verify-scaling-activity.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def describe_scaling_activities(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeScalingActivities", input, options)
end
@doc """
Describes the scaling process types for use with the `ResumeProcesses` and
`SuspendProcesses` APIs.
"""
def describe_scaling_process_types(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeScalingProcessTypes", input, options)
end
@doc """
Describes the actions scheduled for your Auto Scaling group that haven't run or
that have not reached their end time.
To describe the actions that have already run, call the
`DescribeScalingActivities` API.
"""
def describe_scheduled_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeScheduledActions", input, options)
end
@doc """
Describes the specified tags.
You can use filters to limit the results. For example, you can query for the
tags for a specific Auto Scaling group. You can specify multiple values for a
filter. A tag must match at least one of the specified values for it to be
included in the results.
You can also specify multiple filters. The result includes information for a
particular tag only if it matches all the filters. If there's no match, no
special message is returned.
For more information, see [Tagging Auto Scaling groups and instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def describe_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTags", input, options)
end
@doc """
Describes the termination policies supported by Amazon EC2 Auto Scaling.
For more information, see [Controlling which Auto Scaling instances terminate during scale
in](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def describe_termination_policy_types(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTerminationPolicyTypes", input, options)
end
@doc """
Removes one or more instances from the specified Auto Scaling group.
After the instances are detached, you can manage them independent of the Auto
Scaling group.
If you do not specify the option to decrement the desired capacity, Amazon EC2
Auto Scaling launches instances to replace the ones that are detached.
If there is a Classic Load Balancer attached to the Auto Scaling group, the
instances are deregistered from the load balancer. If there are target groups
attached to the Auto Scaling group, the instances are deregistered from the
target groups.
For more information, see [Detach EC2 instances from your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/detach-instance-asg.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def detach_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetachInstances", input, options)
end
@doc """
Detaches one or more target groups from the specified Auto Scaling group.
"""
def detach_load_balancer_target_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetachLoadBalancerTargetGroups", input, options)
end
@doc """
Detaches one or more Classic Load Balancers from the specified Auto Scaling
group.
This operation detaches only Classic Load Balancers. If you have Application
Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the
`DetachLoadBalancerTargetGroups` API instead.
When you detach a load balancer, it enters the `Removing` state while
deregistering the instances in the group. When all instances are deregistered,
then you can no longer describe the load balancer using the
`DescribeLoadBalancers` API call. The instances remain running.
"""
def detach_load_balancers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetachLoadBalancers", input, options)
end
@doc """
Disables group metrics for the specified Auto Scaling group.
"""
def disable_metrics_collection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableMetricsCollection", input, options)
end
@doc """
Enables group metrics for the specified Auto Scaling group.
For more information, see [Monitoring CloudWatch metrics for your Auto Scaling groups and
instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-monitoring.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def enable_metrics_collection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableMetricsCollection", input, options)
end
@doc """
Moves the specified instances into the standby state.
If you choose to decrement the desired capacity of the Auto Scaling group, the
instances can enter standby as long as the desired capacity of the Auto Scaling
group after the instances are placed into standby is equal to or greater than
the minimum capacity of the group.
If you choose not to decrement the desired capacity of the Auto Scaling group,
the Auto Scaling group launches new instances to replace the instances on
standby.
For more information, see [Temporarily removing instances from your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def enter_standby(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnterStandby", input, options)
end
@doc """
Executes the specified policy.
This can be useful for testing the design of your scaling policy.
"""
def execute_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ExecutePolicy", input, options)
end
@doc """
Moves the specified instances out of the standby state.
After you put the instances back in service, the desired capacity is
incremented.
For more information, see [Temporarily removing instances from your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def exit_standby(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ExitStandby", input, options)
end
@doc """
Creates or updates a lifecycle hook for the specified Auto Scaling group.
A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an
instance when the instance launches (before it is put into service) or as the
instance terminates (before it is fully terminated).
This step is a part of the procedure for adding a lifecycle hook to an Auto
Scaling group:
1. (Optional) Create a Lambda function and a rule that allows
CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling
launches or terminates instances.
2. (Optional) Create a notification target and an IAM role. The
target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows
Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
3. ## Create the lifecycle hook. Specify whether the hook is used
when the instances launch or terminate.
4. If you need more time, record the lifecycle action heartbeat to
keep the instance in a pending state using the `RecordLifecycleActionHeartbeat`
API call.
5. If you finish before the timeout period ends, complete the
lifecycle action using the `CompleteLifecycleAction` API call.
For more information, see [Amazon EC2 Auto Scaling lifecycle hooks](https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html)
in the *Amazon EC2 Auto Scaling User Guide*.
If you exceed your maximum limit of lifecycle hooks, which by default is 50 per
Auto Scaling group, the call fails.
You can view the lifecycle hooks for an Auto Scaling group using the
`DescribeLifecycleHooks` API call. If you are no longer using a lifecycle hook,
you can delete it by calling the `DeleteLifecycleHook` API.
"""
def put_lifecycle_hook(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutLifecycleHook", input, options)
end
@doc """
Configures an Auto Scaling group to send notifications when specified events
take place.
Subscribers to the specified topic can have messages delivered to an endpoint
such as a web server or an email address.
This configuration overwrites any existing configuration.
For more information, see [Getting Amazon SNS notifications when your Auto Scaling group
scales](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ASGettingNotifications.html)
in the *Amazon EC2 Auto Scaling User Guide*.
If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling
group, the call fails.
"""
def put_notification_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutNotificationConfiguration", input, options)
end
@doc """
Creates or updates a scaling policy for an Auto Scaling group.
For more information about using scaling policies to scale your Auto Scaling
group, see [Target tracking scaling policies](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-target-tracking.html)
and [Step and simple scaling policies](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def put_scaling_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutScalingPolicy", input, options)
end
@doc """
Creates or updates a scheduled scaling action for an Auto Scaling group.
For more information, see [Scheduled scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/schedule_time.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def put_scheduled_update_group_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutScheduledUpdateGroupAction", input, options)
end
@doc """
Records a heartbeat for the lifecycle action associated with the specified token
or instance.
This extends the timeout by the length of time defined using the
`PutLifecycleHook` API call.
This step is a part of the procedure for adding a lifecycle hook to an Auto
Scaling group:
1. (Optional) Create a Lambda function and a rule that allows
CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling
launches or terminates instances.
2. (Optional) Create a notification target and an IAM role. The
target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows
Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
3. Create the lifecycle hook. Specify whether the hook is used when
the instances launch or terminate.
4. ## If you need more time, record the lifecycle action heartbeat to
keep the instance in a pending state.
5. If you finish before the timeout period ends, complete the
lifecycle action.
For more information, see [Auto Scaling lifecycle](https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def record_lifecycle_action_heartbeat(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RecordLifecycleActionHeartbeat", input, options)
end
@doc """
Resumes the specified suspended auto scaling processes, or all suspended
process, for the specified Auto Scaling group.
For more information, see [Suspending and resuming scaling processes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def resume_processes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResumeProcesses", input, options)
end
@doc """
Sets the size of the specified Auto Scaling group.
If a scale-in activity occurs as a result of a new `DesiredCapacity` value that
is lower than the current size of the group, the Auto Scaling group uses its
termination policy to determine which instances to terminate.
For more information, see [Manual scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-manual-scaling.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def set_desired_capacity(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetDesiredCapacity", input, options)
end
@doc """
Sets the health status of the specified instance.
For more information, see [Health checks for Auto Scaling instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def set_instance_health(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetInstanceHealth", input, options)
end
@doc """
Updates the instance protection settings of the specified instances.
For more information about preventing instances that are part of an Auto Scaling
group from terminating on scale in, see [Instance scale-in protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection)
in the *Amazon EC2 Auto Scaling User Guide*.
If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling
group, the call fails.
"""
def set_instance_protection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetInstanceProtection", input, options)
end
@doc """
Starts a new instance refresh operation, which triggers a rolling replacement of
all previously launched instances in the Auto Scaling group with a new group of
instances.
If successful, this call creates a new instance refresh request with a unique ID
that you can use to track its progress. To query its status, call the
`DescribeInstanceRefreshes` API. To describe the instance refreshes that have
already run, call the `DescribeInstanceRefreshes` API. To cancel an instance
refresh operation in progress, use the `CancelInstanceRefresh` API.
For more information, see [Replacing Auto Scaling instances based on an instance refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def start_instance_refresh(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartInstanceRefresh", input, options)
end
@doc """
Suspends the specified auto scaling processes, or all processes, for the
specified Auto Scaling group.
If you suspend either the `Launch` or `Terminate` process types, it can prevent
other process types from functioning properly. For more information, see
[Suspending and resuming scaling processes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)
in the *Amazon EC2 Auto Scaling User Guide*.
To resume processes that have been suspended, call the `ResumeProcesses` API.
"""
def suspend_processes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SuspendProcesses", input, options)
end
@doc """
Terminates the specified instance and optionally adjusts the desired group size.
This call simply makes a termination request. The instance is not terminated
immediately. When an instance is terminated, the instance status changes to
`terminated`. You can't connect to or start an instance after you've terminated
it.
If you do not specify the option to decrement the desired capacity, Amazon EC2
Auto Scaling launches instances to replace the ones that are terminated.
By default, Amazon EC2 Auto Scaling balances instances across all Availability
Zones. If you decrement the desired capacity, your Auto Scaling group can become
unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to
rebalance the group, and rebalancing might terminate instances in other zones.
For more information, see [Rebalancing activities](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-benefits.html#AutoScalingBehavior.InstanceUsage)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def terminate_instance_in_auto_scaling_group(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"TerminateInstanceInAutoScalingGroup",
input,
options
)
end
@doc """
## We strongly recommend that all Auto Scaling groups use launch templates to
ensure full functionality for Amazon EC2 Auto Scaling and Amazon EC2.
Updates the configuration for the specified Auto Scaling group.
To update an Auto Scaling group, specify the name of the group and the parameter
that you want to change. Any parameters that you don't specify are not changed
by this update request. The new settings take effect on any scaling activities
after this call returns.
If you associate a new launch configuration or template with an Auto Scaling
group, all new instances will get the updated configuration. Existing instances
continue to run with the configuration that they were originally launched with.
When you update a group to specify a mixed instances policy instead of a launch
configuration or template, existing instances may be replaced to match the new
purchasing options that you specified in the policy. For example, if the group
currently has 100% On-Demand capacity and the policy specifies 50% Spot
capacity, this means that half of your instances will be gradually terminated
and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto
Scaling launches new instances before terminating the old ones, so that updating
your group does not compromise the performance or availability of your
application.
Note the following about changing `DesiredCapacity`, `MaxSize`, or `MinSize`:
* If a scale-in activity occurs as a result of a new
`DesiredCapacity` value that is lower than the current size of the group, the
Auto Scaling group uses its termination policy to determine which instances to
terminate.
* If you specify a new value for `MinSize` without specifying a
value for `DesiredCapacity`, and the new `MinSize` is larger than the current
size of the group, this sets the group's `DesiredCapacity` to the new `MinSize`
value.
* If you specify a new value for `MaxSize` without specifying a
value for `DesiredCapacity`, and the new `MaxSize` is smaller than the current
size of the group, this sets the group's `DesiredCapacity` to the new `MaxSize`
value.
To see which parameters have been set, call the `DescribeAutoScalingGroups` API.
To view the scaling policies for an Auto Scaling group, call the
`DescribePolicies` API. If the group has scaling policies, you can update them
by calling the `PutScalingPolicy` API.
"""
def update_auto_scaling_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateAutoScalingGroup", input, options)
end
end
|
lib/aws/generated/auto_scaling.ex
| 0.917036 | 0.617138 |
auto_scaling.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.