code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Raxx.Context do
@type section_name :: term()
@typedoc """
An opaque type for the context snapshot data.
"""
@opaque snapshot :: map()
@moduledoc """
`Raxx.Context` is a mechanism for simple sharing of state/information between
`Raxx.Middleware`s and `Raxx.Server`s.
It is designed to be flexible and to enable different middlewares to operate
on it without conflicts. Each separate functionality using the context
can be in a different "section", containing arbitrary data.
Context is implicitly shared using the process dictionary and persists for the
duration of a single request/response cycle. If you want to pass the context
to a different process, you need to take its snapshot, pass it explicitly and
"restore" it in the other process. See `Raxx.Context.get_snapshot/0` and
`Raxx.Context.restore_snapshot/1` for details.
"""
@doc """
Sets the value of a context section.
Returns the previous value of the section or `nil` if one was
not set.
"""
@spec set(section_name, term) :: term | nil
def set(section_name, value) do
Process.put(tag(section_name), value)
end
@doc """
Deletes the section from the context.
Returns the previous value of the section or `nil` if one was
not set.
"""
@spec delete(section_name) :: term | nil
def delete(section_name) do
Process.delete(tag(section_name))
end
@doc """
Retrieves the value of the context section.
If the section wasn't set yet, it will return `nil`.
"""
@spec retrieve(section_name, default :: term) :: term
def retrieve(section_name, default \\ nil) do
Process.get(tag(section_name), default)
end
@doc """
Restores a previously created context snapshot.
It will restore the implicit state of the context for the current
process to what it was when the snapshot was created using
`Raxx.Context.get_snapshot/0`. The current context values won't
be persisted in any way.
"""
@spec restore_snapshot(snapshot()) :: :ok
def restore_snapshot(context) when is_map(context) do
new_context_tuples =
context
|> Enum.map(fn {k, v} -> {tag(k), v} end)
current_context_keys =
Process.get_keys()
|> Enum.filter(&tagged_key?/1)
new_keys = Enum.map(new_context_tuples, fn {k, _v} -> k end)
keys_to_remove = current_context_keys -- new_keys
Enum.each(keys_to_remove, &Process.delete/1)
Enum.each(new_context_tuples, fn {k, v} -> Process.put(k, v) end)
end
@doc """
Creates a snapshot of the current process' context.
The returned context data can be passed between processes and restored
using `Raxx.Context.restore_snapshot/1`
"""
@spec get_snapshot() :: snapshot()
def get_snapshot() do
Process.get()
|> Enum.filter(fn {k, _v} -> tagged_key?(k) end)
|> Enum.map(fn {k, v} -> {strip_tag(k), v} end)
|> Map.new()
end
defp tagged_key?({__MODULE__, _}) do
true
end
defp tagged_key?(_) do
false
end
defp strip_tag({__MODULE__, key}) do
key
end
defp tag(key) do
{__MODULE__, key}
end
end
|
lib/raxx/context.ex
| 0.885681 | 0.553324 |
context.ex
|
starcoder
|
defmodule Membrane.Element.Base do
@moduledoc """
Modules in this namespace contain behaviours, default callback implementations
and other stuff useful when creating elements.
Elements are units that produce, process or consume data. They can be linked
with `Membrane.Pipeline`, and thus form a pipeline able to perform complex data
processing. Each element defines a set of pads, through which it can be linked
with other elements. During playback, pads can either send (output pads) or
receive (input pads) data. For more information on pads, see
`Membrane.Element.Pad`.
To implement an element, one of base modules (`Membrane.Element.Base.Source`,
`Membrane.Element.Base.Filter`, `Membrane.Element.Base.Sink`)
has to be `use`d, depending on the element type:
- source, producing buffers (contain only output pads),
- filter, processing buffers (contain both input and output pads),
- sink, consuming buffers (contain only input pads).
For more information on each element type, check documentation for appropriate
base module.
## Behaviours
Element-specific behaviours are specified in modules:
- `Membrane.Element.Base.Mixin.CommonBehaviour` - behaviour common to all
elements,
- `Membrane.Element.Base.Mixin.SourceBehaviour` - behaviour common to sources
and filters,
- `Membrane.Element.Base.Mixin.SinkBehaviour` - behaviour common to sinks and
filters,
- Base modules (`Membrane.Element.Base.Source`, `Membrane.Element.Base.Filter`,
`Membrane.Element.Base.Sink`) - behaviours specific to each element type.
## Callbacks
Modules listed above provide specifications of callbacks that define elements
lifecycle. All of these callbacks have names with the `handle_` prefix.
They are used to define reaction to certain events that happen during runtime,
and indicate what actions frawork should undertake as a result, besides
executing element-specific code.
For actions that can be returned by each callback, see `Membrane.Element.Action`
module.
"""
end
|
lib/membrane/element/base.ex
| 0.847369 | 0.467514 |
base.ex
|
starcoder
|
defmodule Scenic.Primitive.Ellipse do
@moduledoc """
Draw an ellipse on the screen.
## Data
`{radius_1, radius_2}`
The data for an arc is a single number.
* `radius_1` - the radius of the ellipse in one direction
* `radius_2` - the radius of the ellipse in the other direction
## Styles
This primitive recognizes the following styles
* [`hidden`](Scenic.Primitive.Style.Hidden.html) - show or hide the primitive
* [`fill`](Scenic.Primitive.Style.Fill.html) - fill in the area of the primitive
* [`stroke`](Scenic.Primitive.Style.Stroke.html) - stroke the outline of the primitive. In this case, only the curvy part.
Note: you can achieve the same effect with a Circle primitive
by applying a :size transform to it with unequal values on the axes
## Usage
You should add/modify primitives via the helper functions in
[`Scenic.Primitives`](Scenic.Primitives.html#ellipse/3)
"""
use Scenic.Primitive
@styles [:hidden, :fill, :stroke]
# ============================================================================
# data verification and serialization
# --------------------------------------------------------
@doc false
def info(data),
do: """
#{IO.ANSI.red()}#{__MODULE__} data must be: {radius_1, radius_2}
#{IO.ANSI.yellow()}Received: #{inspect(data)}
#{IO.ANSI.default_color()}
"""
# --------------------------------------------------------
@doc false
def verify(data) do
normalize(data)
{:ok, data}
rescue
_ -> :invalid_data
end
# --------------------------------------------------------
@doc false
@spec normalize({number(), number()}) :: {number(), number()}
def normalize({r1, r2} = data) when is_number(r1) and is_number(r2) do
data
end
# ============================================================================
@doc """
Returns a list of styles recognized by this primitive.
"""
@spec valid_styles() :: [:fill | :hidden | :stroke, ...]
def valid_styles(), do: @styles
# --------------------------------------------------------
def contains_point?({r1, r2}, {xp, yp}) do
dx = xp * xp / (r1 * r1)
dy = yp * yp / (r2 * r2)
# test if less or equal to 1
dx + dy <= 1
end
end
|
lib/scenic/primitive/ellipse.ex
| 0.927486 | 0.725114 |
ellipse.ex
|
starcoder
|
defmodule OpenTelemetry.Honeycomb.Event do
@moduledoc """
Event structure.
Honeycomb events bind a timestamp to data as described in `t:t/0` below. The `data` corresponds
to OpenTelemetry span attributes, with limitations dictated by the intersection of their data
data models. For information on how we clean and flatten span attributes before sending them,
see `OpenTelemetry.Honeycomb.Attributes`.
Honeycomb event attributes for trace handling can collide with other attributes. For information
on the defaults and how to change them, see `OpenTelemetry.Honeycomb.Config.AttributeMap`.
"""
alias OpenTelemetry.Honeycomb.Attributes
alias OpenTelemetry.Honeycomb.Config.AttributeMap
alias OpenTelemetry.Records.Span
@enforce_keys [:time, :data]
defstruct [:time, :data, samplerate: 1]
@typedoc """
Span attributes after flattening.
"""
@type event_data :: %{optional(String.t()) => OpenTelemetry.attribute_value()}
@typedoc """
Honeycomb event suitable for POSTing to their batch API.
* `time`: ms since epoch; [MUST] be in ISO 8601 format, e.g. `"2019-05-17T09:55:12.622658Z"`
* `data`: `t:event_data/0` after flattening.
* `samplerate`: the sample rate, as a positive integer; `1_000` describes a `1:1000` ratio.
[MUST]: https://tools.ietf.org/html/rfc2119#section-1
"""
@type t :: %__MODULE__{
time: String.t(),
samplerate: pos_integer(),
data: event_data()
}
@doc """
The current UTC time in ISO 8601 format, e.g. `"2019-05-17T09:55:12.622658Z"`
Useful when creating events manually.
"""
@spec now() :: String.t()
def now do
DateTime.utc_now() |> DateTime.to_iso8601()
end
@doc """
Convert one OpenTelemetry span to an event suitable for POSTing to the
[Honeycomb Events API][HCevents].
[HCevents]: https://docs.honeycomb.io/api/events/
"""
@spec from_otel_span(
:opentelemetry.span(),
resource_attributes :: OpenTelemetry.attributes(),
attribute_map :: AttributeMap.t()
) :: [t()]
def from_otel_span(otel_span, resource_attributes, attribute_map) do
span = Span.from(otel_span)
data =
span
|> Map.get(:attributes)
|> Attributes.clean()
|> Attributes.merge(resource_attributes)
|> Attributes.merge(extracted_attributes(span, attribute_map))
|> Enum.into(%{}, &Attributes.trim_long_strings/1)
time =
span.start_time
|> :opentelemetry.convert_timestamp(:microsecond)
|> DateTime.from_unix!(:microsecond)
|> DateTime.to_iso8601()
[%__MODULE__{time: time, data: data}]
end
# span attribute extractors
@spec extracted_attributes(Span.t(), AttributeMap.t()) :: OpenTelemetry.attributes()
defp extracted_attributes(%Span{} = span, attribute_map) do
attribute_mapper = get_attribute_mapper(attribute_map)
[
duration_ms: ms(span.end_time) - ms(span.start_time),
name: span.name,
parent_span_id: hexify_span_id(span.parent_span_id),
span_id: hexify_span_id(span.span_id),
trace_id: hexify_trace_id(span.trace_id)
]
|> Enum.map(attribute_mapper)
|> Enum.filter(&has_binary_key?/1)
|> Attributes.sort()
end
@spec get_attribute_mapper(attribute_map :: AttributeMap.t()) ::
({atom(), OpenTelemetry.attribute_value()} ->
{OpenTelemetry.attribute_key(), OpenTelemetry.attribute_value()})
defp get_attribute_mapper(map) do
fn {k, v} -> {Map.get(map, k), v} end
end
defp has_binary_key?({k, _}) when is_binary(k), do: true
defp has_binary_key?(_), do: false
defp hexify_trace_id(:undefined), do: nil
defp hexify_trace_id(n), do: :io_lib.format("~32.16.0b", [n]) |> to_string()
defp hexify_span_id(:undefined), do: nil
defp hexify_span_id(n), do: :io_lib.format("~16.16.0b", [n]) |> to_string()
defp ms(t), do: :opentelemetry.convert_timestamp(t, :microsecond) / 1000
end
|
lib/open_telemetry/honeycomb/event.ex
| 0.898443 | 0.525186 |
event.ex
|
starcoder
|
defmodule Sptfy.Client do
@moduledoc false
alias Sptfy.Client.{Document, HTTP, Parameter, Placeholder, ResponseHandler, ReturnType}
defmacro __using__(_) do
quote location: :keep do
import Sptfy.Client
import Sptfy.Client.BodyMapper
end
end
defmacro get(path, opts) do
[as: fun, query: query, mapping: mapping] = Keyword.take(opts, [:as, :query, :mapping])
placeholders = Placeholder.extract(path)
placeholder_keys = Keyword.keys(placeholders)
type_ast = (ReturnType.ast(mapping) || Keyword.get(opts, :return_type)) |> ReturnType.or_error()
quote location: :keep do
@doc Document.build("GET", unquote(path), unquote(placeholders) ++ unquote(query))
@spec unquote(fun)(token :: String.t(), params :: map() | Keyword.t()) :: unquote(type_ast)
def unquote(fun)(token, params \\ %{})
def unquote(fun)(token, params) when is_list(params) do
unquote(fun)(token, Enum.into(params, %{}))
end
def unquote(fun)(token, params) when is_map(params) do
query_params = Parameter.prepare(params, unquote(query))
path_params = Parameter.prepare(params, unquote(placeholder_keys))
filled_path = Placeholder.fill(unquote(path), path_params)
Parameter.check_required!(params, unquote(placeholders) ++ unquote(query))
case HTTP.get(token, filled_path, query_params) do
{:ok, response} -> ResponseHandler.handle(response, unquote(mapping))
error -> error
end
end
end
end
defmacro post(path, opts) do
[as: fun, query: query, body: body, mapping: mapping] = Keyword.take(opts, [:as, :query, :body, :mapping])
placeholders = Placeholder.extract(path)
placeholder_keys = Keyword.keys(placeholders)
type_ast = (ReturnType.ast(mapping) || Keyword.get(opts, :return_type)) |> ReturnType.or_error()
quote location: :keep do
@doc Document.build("POST", unquote(path), unquote(placeholders) ++ unquote(query) ++ unquote(body))
@spec unquote(fun)(token :: String.t(), params :: map() | Keyword.t()) :: unquote(type_ast)
def unquote(fun)(token, params \\ %{})
def unquote(fun)(token, params) when is_list(params) do
unquote(fun)(token, Enum.into(params, %{}))
end
def unquote(fun)(token, params) when is_map(params) do
query_params = Parameter.prepare(params, unquote(query))
body_params = Parameter.prepare(params, unquote(body))
path_params = Parameter.prepare(params, unquote(placeholder_keys))
filled_path = Placeholder.fill(unquote(path), path_params)
Parameter.check_required!(params, unquote(placeholders) ++ unquote(query) ++ unquote(body))
case HTTP.post(token, filled_path, query_params, body_params) do
{:ok, response} -> ResponseHandler.handle(response, unquote(mapping))
error -> error
end
end
end
end
defmacro put(path, opts) do
[as: fun, query: query, body: body, mapping: mapping] = Keyword.take(opts, [:as, :query, :body, :mapping])
placeholders = Placeholder.extract(path)
placeholder_keys = Keyword.keys(placeholders)
type_ast = (ReturnType.ast(mapping) || Keyword.get(opts, :return_type)) |> ReturnType.or_error()
quote location: :keep do
@doc Document.build("PUT", unquote(path), unquote(placeholders) ++ unquote(query) ++ unquote(body))
@spec unquote(fun)(token :: String.t(), params :: map() | Keyword.t()) :: unquote(type_ast)
def unquote(fun)(token, params \\ %{})
def unquote(fun)(token, params) when is_list(params) do
unquote(fun)(token, Enum.into(params, %{}))
end
def unquote(fun)(token, params) when is_map(params) do
query_params = Parameter.prepare(params, unquote(query))
body_params = Parameter.prepare(params, unquote(body))
path_params = Parameter.prepare(params, unquote(placeholder_keys))
filled_path = Placeholder.fill(unquote(path), path_params)
Parameter.check_required!(params, unquote(placeholders) ++ unquote(query) ++ unquote(body))
case HTTP.put(token, filled_path, query_params, body_params) do
{:ok, response} -> ResponseHandler.handle(response, unquote(mapping))
error -> error
end
end
end
end
defmacro put_jpeg(path, opts) do
[as: fun, query: query, mapping: mapping] = Keyword.take(opts, [:as, :query, :mapping])
placeholders = Placeholder.extract(path)
placeholder_keys = Keyword.keys(placeholders)
type_ast = (ReturnType.ast(mapping) || Keyword.get(opts, :return_type)) |> ReturnType.or_error()
quote location: :keep do
@doc Document.build("PUT", unquote(path), unquote(placeholders) ++ unquote(query))
@spec unquote(fun)(token :: String.t(), base64_body :: String.t(), params :: map() | Keyword.t()) :: unquote(type_ast)
def unquote(fun)(token, base64_body, params \\ %{})
def unquote(fun)(token, base64_body, params) when is_list(params) do
unquote(fun)(token, base64_body, Enum.into(params, %{}))
end
def unquote(fun)(token, base64_body, params) when is_map(params) do
query_params = Parameter.prepare(params, unquote(query))
path_params = Parameter.prepare(params, unquote(placeholder_keys))
filled_path = Placeholder.fill(unquote(path), path_params)
Parameter.check_required!(params, unquote(placeholders) ++ unquote(query))
case HTTP.put_jpeg(token, filled_path, query_params, base64_body) do
{:ok, response} -> ResponseHandler.handle(response, unquote(mapping))
error -> error
end
end
end
end
defmacro delete(path, opts) do
[as: fun, query: query, body: body, mapping: mapping] = Keyword.take(opts, [:as, :query, :body, :mapping])
placeholders = Placeholder.extract(path)
placeholder_keys = Keyword.keys(placeholders)
type_ast = (ReturnType.ast(mapping) || Keyword.get(opts, :return_type)) |> ReturnType.or_error()
quote location: :keep do
@doc Document.build("DELETE", unquote(path), unquote(placeholders) ++ unquote(query) ++ unquote(body))
@spec unquote(fun)(token :: String.t(), params :: map() | Keyword.t()) :: unquote(type_ast)
def unquote(fun)(token, params \\ %{})
def unquote(fun)(token, params) when is_list(params) do
unquote(fun)(token, Enum.into(params, %{}))
end
def unquote(fun)(token, params) when is_map(params) do
query_params = Parameter.prepare(params, unquote(query))
body_params = Parameter.prepare(params, unquote(body))
path_params = Parameter.prepare(params, unquote(placeholder_keys))
filled_path = Placeholder.fill(unquote(path), path_params)
Parameter.check_required!(params, unquote(placeholders) ++ unquote(query) ++ unquote(body))
case HTTP.delete(token, filled_path, query_params, body_params) do
{:ok, response} -> ResponseHandler.handle(response, unquote(mapping))
error -> error
end
end
end
end
end
|
lib/sptfy/client.ex
| 0.829112 | 0.439507 |
client.ex
|
starcoder
|
defmodule GitHubActions.Config do
@moduledoc """
A simple keyword-based configuration API.
## Examples
This module is used to define the configuration for `GitHubActions`.
```elixir
import GitHubActions.Config
config :linux,
name: "Ubuntu",
runs_on: "ubuntu-latest"
config key: "value"
"""
alias GitHubActions.Access
@config_key __MODULE__
@type key :: atom()
@type keys :: [atom()]
@type value :: any()
@type config :: keyword()
@doc """
Reads the configuration from the given `path`.
"""
@spec read(Path.t()) :: :ok | {:error, :enonet}
def read(path) do
path |> File.read!() |> Code.eval_string()
:ok
end
@doc """
Returns the configuaration.
"""
@spec config :: config()
def config, do: Process.get(@config_key) || []
@doc """
Adds the given `value` to the configuration under the given `key`.
Returns the configuration that was previously stored.
"""
@spec config(key(), value()) :: config() | nil
def config(key, value) when is_atom(key), do: add([{key, value}])
@doc """
Adds the given data to the configuration.
Returns the configuration that was previously stored.
"""
@spec config(config()) :: config() | nil
def config(data) when is_list(data) do
unless Keyword.keyword?(data) do
raise ArgumentError, "config/1 expected a keyword list, got: #{inspect(data)}"
end
add(data)
end
@doc """
Returns the value for `key` or `keys`.
If the configuration parameter does not exist, the function returns the
default value.
## Examples
iex> Config.get(:jobs)
[:linux]
iex> Config.get(:foo, :bar)
:bar
iex> Config.get([:linux, :runs_on])
"ubuntu-latest"
iex> Config.get(:foo)
nil
"""
@spec get(key() | keys(), value()) :: value()
def get(keys, default \\ nil), do: Access.get(config!(), keys, default)
@doc """
Returns the value for `key` or `keys` in a tuple.
If the configuration parameter does not exist, the function returns `error`.
## Examples
iex> Config.fetch(:jobs)
{:ok, [:linux]}
iex> Config.fetch(:foo)
:error
iex> Config.fetch([:linux, :name])
{:ok, "Ubuntu"}
"""
@spec fetch(key() | keys()) :: value()
def fetch(keys), do: Access.fetch(config!(), keys)
@doc """
Returns the value for `key` or `keys`.
## Examples
iex> Config.fetch!(:jobs)
[:linux]
iex> Config.fetch!([:linux, :runs_on])
"ubuntu-latest"
iex> Config.fetch!([:linux, :foo])
** (KeyError) key :foo not found in: [name: \"Ubuntu\", runs_on: \"ubuntu-latest\"]
"""
def fetch!(keys), do: Access.fetch!(config!(), keys)
defp add([{key, value}] = data) when is_atom(key) and is_list(value) do
case Keyword.keyword?(value) do
true ->
merge(key, config(), value) |> put()
false ->
config() |> Keyword.merge(data) |> put()
end
end
defp add(data), do: config() |> Keyword.merge(data) |> put
defp merge(key, config, data) do
new = config |> Keyword.get(key, []) |> Keyword.merge(data)
Keyword.put(config, key, new)
end
defp put(value), do: Process.put(@config_key, value)
defp config!, do: Process.get(@config_key) || raise("No configuration available")
end
|
lib/git_hub_actions/config.ex
| 0.856122 | 0.676132 |
config.ex
|
starcoder
|
defmodule Grizzly.CommandClass.Mappings do
@type command_class_name :: atom
@type command_class_type :: :raw | :network | :application | :management
@type basic_class_name :: :controller | :static_controller | :routing_slave | :slave
@type specific_type_name :: atom()
@type generic_type_name :: atom()
@type command_class_byte :: byte()
@type command_class_unk :: {:unk, byte | command_class_name()}
@type specific_cmd_class_unk ::
{:unk, byte() | generic_type_name(), byte | specific_type_name()}
@known_network_command_classes [0x34]
require Logger
@spec from_byte(byte) :: command_class_name | command_class_unk
def from_byte(0x02), do: :zensor_net
def from_byte(0x20), do: :basic
def from_byte(0x21), do: :controller_replication
def from_byte(0x22), do: :application_status
def from_byte(0x23), do: :zip
def from_byte(0x24), do: :security_panel_mode
def from_byte(0x25), do: :switch_binary
def from_byte(0x26), do: :switch_multilevel
def from_byte(0x27), do: :switch_all
def from_byte(0x28), do: :switch_toggle_binary
def from_byte(0x2A), do: :chimney_fan
def from_byte(0x2B), do: :scene_activation
def from_byte(0x2C), do: :scene_actuator_conf
def from_byte(0x2D), do: :scene_controller_conf
def from_byte(0x2E), do: :security_panel_zone
def from_byte(0x2F), do: :security_panel_zone_sensor
def from_byte(0x30), do: :sensor_binary
def from_byte(0x31), do: :sensor_multilevel
def from_byte(0x32), do: :meter
def from_byte(0x33), do: :switch_color
def from_byte(0x34), do: :network_management_inclusion
def from_byte(0x35), do: :meter_pulse
def from_byte(0x36), do: :basic_tariff_info
def from_byte(0x37), do: :hrv_status
def from_byte(0x38), do: :thermostat_heating
def from_byte(0x39), do: :hrv_control
def from_byte(0x3A), do: :dcp_config
def from_byte(0x3B), do: :dcp_monitor
def from_byte(0x3C), do: :meter_tbl_config
def from_byte(0x3D), do: :meter_tbl_monitor
def from_byte(0x3E), do: :meter_tbl_push
def from_byte(0x3F), do: :prepayment
def from_byte(0x40), do: :thermostat_mode
def from_byte(0x41), do: :prepayment_encapsulation
def from_byte(0x42), do: :operating_state
def from_byte(0x43), do: :thermostat_setpoint
def from_byte(0x44), do: :thermostat_fan_mode
def from_byte(0x45), do: :thermostat_fan_state
def from_byte(0x46), do: :climate_control_schedule
def from_byte(0x47), do: :thermostat_setback
def from_byte(0x48), do: :rate_tbl_config
def from_byte(0x49), do: :rate_tbl_monitor
def from_byte(0x4A), do: :tariff_config
def from_byte(0x4B), do: :tariff_tbl_monitor
def from_byte(0x4C), do: :door_lock_logging
def from_byte(0x4D), do: :network_management_basic
def from_byte(0x4E), do: :schedule_entry_lock
def from_byte(0x4F), do: :zip_6lowpan
def from_byte(0x50), do: :basic_window_covering
def from_byte(0x51), do: :mtp_window_covering
def from_byte(0x52), do: :network_management_proxy
def from_byte(0x53), do: :schedule
def from_byte(0x54), do: :network_management_primary
def from_byte(0x55), do: :transport_service
def from_byte(0x56), do: :crc_16_encap
def from_byte(0x57), do: :application_capability
def from_byte(0x58), do: :zip_nd
def from_byte(0x59), do: :association_group_info
def from_byte(0x5A), do: :device_rest_locally
def from_byte(0x5B), do: :central_scene
def from_byte(0x5C), do: :ip_association
def from_byte(0x5D), do: :antitheft
def from_byte(0x5E), do: :zwaveplus_info
def from_byte(0x5F), do: :zip_gateway
def from_byte(0x61), do: :zip_portal
def from_byte(0x62), do: :door_lock
def from_byte(0x63), do: :user_code
def from_byte(0x64), do: :humidity_control_setpoint
def from_byte(0x65), do: :dmx
def from_byte(0x66), do: :barrier_operator
def from_byte(0x67), do: :network_management_installation_maintenance
def from_byte(0x68), do: :zip_naming
def from_byte(0x69), do: :mailbox
def from_byte(0x6A), do: :window_covering
def from_byte(0x6B), do: :irrigation
def from_byte(0x6C), do: :supervision
def from_byte(0x6D), do: :humidity_control_mode
def from_byte(0x6E), do: :humidity_control_operating_state
def from_byte(0x6F), do: :entry_control
def from_byte(0x70), do: :configuration
def from_byte(0x71), do: :alarm
def from_byte(0x72), do: :manufacturer_specific
def from_byte(0x73), do: :powerlevel
def from_byte(0x74), do: :inclusion_controller
def from_byte(0x75), do: :protection
def from_byte(0x76), do: :lock
def from_byte(0x77), do: :node_naming
def from_byte(0x78), do: :node_provisioning
def from_byte(0x7A), do: :firmware_update_md
def from_byte(0x7B), do: :grouping_name
def from_byte(0x7C), do: :remote_association_activate
def from_byte(0x7D), do: :remote_association
def from_byte(0x80), do: :battery
def from_byte(0x81), do: :clock
def from_byte(0x82), do: :hail
def from_byte(0x84), do: :wake_up
def from_byte(0x85), do: :association
def from_byte(0x86), do: :command_class_version
def from_byte(0x87), do: :indicator
def from_byte(0x88), do: :proprietary
def from_byte(0x89), do: :language
def from_byte(0x8A), do: :time
def from_byte(0x8B), do: :time_parameters
def from_byte(0x8C), do: :geographic_location
def from_byte(0x8E), do: :multi_channel_association
def from_byte(0x8F), do: :multi_cmd
def from_byte(0x90), do: :energy_production
def from_byte(0x91), do: :manufacturer_proprietary
def from_byte(0x92), do: :screen_md
def from_byte(0x93), do: :screen_attributes
def from_byte(0x94), do: :simple_av_control
def from_byte(0x95), do: :av_content_directory_md
def from_byte(0x96), do: :av_content_renderer_status
def from_byte(0x97), do: :av_content_search_md
def from_byte(0x98), do: :security
def from_byte(0x99), do: :av_tagging_md
def from_byte(0x9A), do: :ip_configuration
def from_byte(0x9B), do: :association_command_configuration
def from_byte(0x9C), do: :sensor_alarm
def from_byte(0x9D), do: :silence_alarm
def from_byte(0x9E), do: :sensor_configuration
def from_byte(0x9F), do: :security_2
def from_byte(0xEF), do: :mark
def from_byte(0xF0), do: :non_interoperable
def from_byte(byte) do
_ = Logger.warn("Unknown command class byte #{Integer.to_string(byte, 16)}")
{:unk, byte}
end
@spec to_byte(command_class_name) :: command_class_byte() | command_class_unk()
def to_byte(:zensor_net), do: 0x02
def to_byte(:basic), do: 0x20
def to_byte(:controller_replication), do: 0x21
def to_byte(:application_status), do: 0x22
def to_byte(:zip), do: 0x23
def to_byte(:security_panel_mode), do: 0x24
def to_byte(:switch_binary), do: 0x25
def to_byte(:switch_multilevel), do: 0x26
def to_byte(:switch_all), do: 0x27
def to_byte(:switch_toggle_binary), do: 0x28
def to_byte(:chimney_fan), do: 0x2A
def to_byte(:scene_activation), do: 0x2B
def to_byte(:scene_actuator_conf), do: 0x2C
def to_byte(:scene_controller_conf), do: 0x2D
def to_byte(:security_panel_zone), do: 0x2E
def to_byte(:security_panel_zone_sensor), do: 0x2F
def to_byte(:sensor_binary), do: 0x30
def to_byte(:sensor_multilevel), do: 0x31
def to_byte(:meter), do: 0x32
def to_byte(:switch_color), do: 0x33
def to_byte(:network_management_inclusion), do: 0x34
def to_byte(:meter_pulse), do: 0x35
def to_byte(:basic_tariff_info), do: 0x36
def to_byte(:hrv_status), do: 0x37
def to_byte(:thermostat_heating), do: 0x38
def to_byte(:hrv_control), do: 0x39
def to_byte(:dcp_config), do: 0x3A
def to_byte(:dcp_monitor), do: 0x3B
def to_byte(:meter_tbl_config), do: 0x3C
def to_byte(:meter_tbl_monitor), do: 0x3D
def to_byte(:meter_tbl_push), do: 0x3E
def to_byte(:prepayment), do: 0x3F
def to_byte(:thermostat_mode), do: 0x40
def to_byte(:prepayment_encapsulation), do: 0x41
def to_byte(:operating_state), do: 0x42
def to_byte(:thermostat_setpoint), do: 0x43
def to_byte(:thermostat_fan_mode), do: 0x44
def to_byte(:thermostat_fan_state), do: 0x45
def to_byte(:climate_control_schedule), do: 0x46
def to_byte(:thermostat_setback), do: 0x47
def to_byte(:rate_tbl_config), do: 0x48
def to_byte(:rate_tbl_monitor), do: 0x49
def to_byte(:tariff_config), do: 0x4A
def to_byte(:tariff_tbl_monitor), do: 0x4B
def to_byte(:door_lock_logging), do: 0x4C
def to_byte(:network_management_basic), do: 0x4D
def to_byte(:schedule_entry_lock), do: 0x4E
def to_byte(:zip_6lowpan), do: 0x4F
def to_byte(:basic_window_covering), do: 0x50
def to_byte(:mtp_window_covering), do: 0x51
def to_byte(:network_management_proxy), do: 0x52
def to_byte(:schedule), do: 0x53
def to_byte(:network_management_primary), do: 0x54
def to_byte(:transport_service), do: 0x55
def to_byte(:crc_16_encap), do: 0x56
def to_byte(:application_capability), do: 0x57
def to_byte(:zip_nd), do: 0x58
def to_byte(:association_group_info), do: 0x59
def to_byte(:device_rest_locally), do: 0x5A
def to_byte(:central_scene), do: 0x5B
def to_byte(:ip_association), do: 0x5C
def to_byte(:antitheft), do: 0x5D
def to_byte(:zwaveplus_info), do: 0x5E
def to_byte(:zip_gateway), do: 0x5F
def to_byte(:zip_portal), do: 0x61
def to_byte(:door_lock), do: 0x62
def to_byte(:user_code), do: 0x63
def to_byte(:humidity_control_setpoint), do: 0x64
def to_byte(:dmx), do: 0x65
def to_byte(:barrier_operator), do: 0x66
def to_byte(:network_management_installation_maintenance), do: 0x67
def to_byte(:zip_naming), do: 0x68
def to_byte(:mailbox), do: 0x69
def to_byte(:window_covering), do: 0x6A
def to_byte(:irrigation), do: 0x6B
def to_byte(:supervision), do: 0x6C
def to_byte(:humidity_control_mode), do: 0x6D
def to_byte(:humidity_control_operating_state), do: 0x6E
def to_byte(:entry_control), do: 0x6F
def to_byte(:configuration), do: 0x70
def to_byte(:alarm), do: 0x71
def to_byte(:manufacturer_specific), do: 0x72
def to_byte(:powerlevel), do: 0x73
def to_byte(:inclusion_controller), do: 0x74
def to_byte(:protection), do: 0x75
def to_byte(:lock), do: 0x76
def to_byte(:node_naming), do: 0x77
def to_byte(:node_provisioning), do: 0x78
def to_byte(:firmware_update_md), do: 0x7A
def to_byte(:grouping_name), do: 0x7B
def to_byte(:remote_association_activate), do: 0x7C
def to_byte(:remote_association), do: 0x7D
def to_byte(:battery), do: 0x80
def to_byte(:clock), do: 0x81
def to_byte(:hail), do: 0x82
def to_byte(:wake_up), do: 0x84
def to_byte(:association), do: 0x85
def to_byte(:command_class_version), do: 0x86
def to_byte(:indicator), do: 0x87
def to_byte(:proprietary), do: 0x88
def to_byte(:language), do: 0x89
def to_byte(:time), do: 0x8A
def to_byte(:time_parameters), do: 0x8B
def to_byte(:geographic_location), do: 0x8C
def to_byte(:multi_channel_association), do: 0x8E
def to_byte(:multi_cmd), do: 0x8F
def to_byte(:energy_production), do: 0x90
def to_byte(:manufacturer_proprietary), do: 0x91
def to_byte(:screen_md), do: 0x92
def to_byte(:screen_attributes), do: 0x93
def to_byte(:simple_av_control), do: 0x94
def to_byte(:av_content_directory_md), do: 0x95
def to_byte(:av_content_renderer_status), do: 0x96
def to_byte(:av_content_search_md), do: 0x97
def to_byte(:security), do: 0x98
def to_byte(:av_tagging_md), do: 0x99
def to_byte(:ip_configuration), do: 0x9A
def to_byte(:association_command_configuration), do: 0x9B
def to_byte(:sensor_alarm), do: 0x9C
def to_byte(:silence_alarm), do: 0x9D
def to_byte(:sensor_configuration), do: 0x9E
def to_byte(:security_2), do: 0x9F
def to_byte(:mark), do: 0xEF
def to_byte(:non_interoperable), do: 0xF0
def to_byte(command_class) do
_ = Logger.warn("Unknown command class name #{inspect(command_class)}")
{:unk, command_class}
end
@spec command_from_byte(command_class :: byte, command :: byte) ::
command_class_name() | command_class_unk()
def command_from_byte(0x25, 0x01), do: :set
def command_from_byte(0x25, 0x02), do: :get
def command_from_byte(0x25, 0x03), do: :switch_binary_report
def command_from_byte(0x31, 0x05), do: :sensor_multilevel_report
def command_from_byte(0x32, 0x02), do: :meter_report
def command_from_byte(0x34, 0x02), do: :node_add_status
def command_from_byte(0x34, 0x04), do: :node_remove_status
def command_from_byte(0x43, 0x03), do: :thermostat_setpoint_report
def command_from_byte(0x52, 0x01), do: :node_list_get
def command_from_byte(0x52, 0x03), do: :node_info_cache
def command_from_byte(0x5A, 0x01), do: :device_rest_locally_notification
def command_from_byte(0x69, 0x03), do: :mailbox_configuration_report
def command_from_byte(0x71, 0x05), do: :zwave_alarm_event
def command_from_byte(0x72, 0x04), do: :manufacturer_specific_get
def command_from_byte(0x72, 0x05), do: :manufacturer_specific_report
def command_from_byte(0x80, 0x02), do: :get
def command_from_byte(0x84, 0x05), do: :interval_get
def command_from_byte(0x84, 0x06), do: :interval_report
def command_from_byte(0x84, 0x0A), do: :interval_capabilities_report
def command_from_byte(0x85, 0x01), do: :set
def command_from_byte(0x85, 0x03), do: :report
def command_from_byte(command_class_byte, byte) do
_ =
Logger.warn(
"Unknown command from byte #{Integer.to_string(byte, 16)} for command class byte #{
Integer.to_string(command_class_byte, 16)
}"
)
{:unk, byte}
end
@spec byte_to_basic_class(byte()) :: basic_class_name() | command_class_unk()
def byte_to_basic_class(0x01), do: :controller
def byte_to_basic_class(0x02), do: :static_controller
def byte_to_basic_class(0x03), do: :slave
def byte_to_basic_class(0x04), do: :routing_slave
def byte_to_basic_class(byte) do
_ = Logger.warn("Unknown basic class #{Integer.to_string(byte, 16)}")
{:unk, byte}
end
@spec byte_to_generic_class(byte()) :: generic_type_name() | command_class_unk()
def byte_to_generic_class(0x01), do: :generic_controller
def byte_to_generic_class(0x02), do: :static_controller
def byte_to_generic_class(0x03), do: :av_control_point
def byte_to_generic_class(0x04), do: :display
def byte_to_generic_class(0x05), do: :network_extender
def byte_to_generic_class(0x06), do: :appliance
def byte_to_generic_class(0x07), do: :sensor_notification
def byte_to_generic_class(0x08), do: :thermostat
def byte_to_generic_class(0x09), do: :window_covering
def byte_to_generic_class(0x0F), do: :repeater_slave
def byte_to_generic_class(0x10), do: :switch_binary
def byte_to_generic_class(0x11), do: :switch_multilevel
def byte_to_generic_class(0x12), do: :switch_remote
def byte_to_generic_class(0x13), do: :switch_toggle
def byte_to_generic_class(0x15), do: :zip_node
def byte_to_generic_class(0x16), do: :ventilation
def byte_to_generic_class(0x17), do: :security_panel
def byte_to_generic_class(0x18), do: :wall_controller
def byte_to_generic_class(0x20), do: :sensor_binary
def byte_to_generic_class(0x21), do: :sensor_multilevel
def byte_to_generic_class(0x30), do: :meter_pulse
def byte_to_generic_class(0x31), do: :meter
def byte_to_generic_class(0x40), do: :entry_control
def byte_to_generic_class(0x50), do: :semi_interoperable
def byte_to_generic_class(0xA1), do: :sensor_alarm
def byte_to_generic_class(0xFF), do: :non_interoperable
def byte_to_generic_class(byte) do
_ = Logger.warn("Unknown generic class #{Integer.to_string(byte, 16)}")
{:unk, byte}
end
@spec generic_class_to_byte(generic_type_name()) :: byte() | command_class_unk()
def generic_class_to_byte(:generic_controller), do: 0x01
def generic_class_to_byte(:static_controller), do: 0x02
def generic_class_to_byte(:av_control_point), do: 0x03
def generic_class_to_byte(:display), do: 0x04
def generic_class_to_byte(:network_extender), do: 0x05
def generic_class_to_byte(:appliance), do: 0x06
def generic_class_to_byte(:sensor_notification), do: 0x07
def generic_class_to_byte(:thermostat), do: 0x08
def generic_class_to_byte(:window_covering), do: 0x09
def generic_class_to_byte(:repeater_slave), do: 0x0F
def generic_class_to_byte(:switch_binary), do: 0x10
def generic_class_to_byte(:switch_multilevel), do: 0x11
def generic_class_to_byte(:switch_remote), do: 0x12
def generic_class_to_byte(:switch_toggle), do: 0x13
def generic_class_to_byte(:zip_node), do: 0x15
def generic_class_to_byte(:ventilation), do: 0x16
def generic_class_to_byte(:security_panel), do: 0x17
def generic_class_to_byte(:wall_controller), do: 0x18
def generic_class_to_byte(:sensor_binary), do: 0x20
def generic_class_to_byte(:sensor_multilevel), do: 0x21
def generic_class_to_byte(:meter_pulse), do: 0x30
def generic_class_to_byte(:meter), do: 0x31
def generic_class_to_byte(:entry_control), do: 0x40
def generic_class_to_byte(:semi_interoperable), do: 0x50
def generic_class_to_byte(:sensor_alarm), do: 0xA1
def generic_class_to_byte(:non_interoperable), do: 0xFF
def generic_class_to_byte(generic_class) do
_ = Logger.warn("Unknown generic class #{inspect(generic_class)}")
{:unk, generic_class}
end
@spec byte_to_specific_class(byte(), byte()) :: specific_type_name() | specific_cmd_class_unk()
def byte_to_specific_class(0x01, 0x00), do: :not_used
def byte_to_specific_class(0x01, 0x01), do: :portable_remote_controller
def byte_to_specific_class(0x01, 0x02), do: :portable_scene_controller
def byte_to_specific_class(0x01, 0x03), do: :installer_tool
def byte_to_specific_class(0x01, 0x04), do: :remote_control_av
def byte_to_specific_class(0x01, 0x06), do: :remote_control_simple
def byte_to_specific_class(0x02, 0x00), do: :not_used
def byte_to_specific_class(0x02, 0x01), do: :pc_controller
def byte_to_specific_class(0x02, 0x02), do: :scene_controller
def byte_to_specific_class(0x02, 0x03), do: :static_installer_tool
def byte_to_specific_class(0x02, 0x04), do: :set_top_box
def byte_to_specific_class(0x02, 0x05), do: :sub_system_controller
def byte_to_specific_class(0x02, 0x06), do: :tv
def byte_to_specific_class(0x02, 0x07), do: :gateway
def byte_to_specific_class(0x03, 0x00), do: :not_used
def byte_to_specific_class(0x03, 0x04), do: :satellite_receiver
def byte_to_specific_class(0x03, 0x11), do: :satellite_receiver_v2
def byte_to_specific_class(0x03, 0x12), do: :doorbell
def byte_to_specific_class(0x04, 0x00), do: :not_used
def byte_to_specific_class(0x04, 0x01), do: :simple_display
def byte_to_specific_class(0x05, 0x00), do: :not_used
def byte_to_specific_class(0x05, 0x01), do: :secure_extender
def byte_to_specific_class(0x06, 0x00), do: :not_used
def byte_to_specific_class(0x06, 0x01), do: :general_appliance
def byte_to_specific_class(0x06, 0x02), do: :kitchen_appliance
def byte_to_specific_class(0x06, 0x03), do: :laundry_appliance
def byte_to_specific_class(0x07, 0x00), do: :not_used
def byte_to_specific_class(0x07, 0x01), do: :notification_sensor
def byte_to_specific_class(0x08, 0x00), do: :not_used
def byte_to_specific_class(0x08, 0x01), do: :thermostat_heating
def byte_to_specific_class(0x08, 0x02), do: :thermostat_general
def byte_to_specific_class(0x08, 0x03), do: :setback_schedule_thermostat
def byte_to_specific_class(0x08, 0x04), do: :setpoint_thermostat
def byte_to_specific_class(0x08, 0x05), do: :setback_thermostat
def byte_to_specific_class(0x08, 0x06), do: :thermostat_general_v2
def byte_to_specific_class(0x09, 0x00), do: :not_used
def byte_to_specific_class(0x09, 0x01), do: :simple_window_covering
def byte_to_specific_class(0x10, 0x00), do: :not_used
def byte_to_specific_class(0x10, 0x01), do: :power_switch_binary
def byte_to_specific_class(0x10, 0x02), do: :color_tunable_binary
def byte_to_specific_class(0x10, 0x03), do: :scene_switch_binary
def byte_to_specific_class(0x10, 0x04), do: :power_strip
def byte_to_specific_class(0x10, 0x05), do: :siren
def byte_to_specific_class(0x10, 0x06), do: :valve_open_close
def byte_to_specific_class(0x10, 0x07), do: :irrigation_controller
def byte_to_specific_class(0x11, 0x00), do: :not_used
def byte_to_specific_class(0x11, 0x01), do: :power_switch_multilevel
def byte_to_specific_class(0x11, 0x02), do: :color_tunable_multilevel
def byte_to_specific_class(0x11, 0x03), do: :motor_multipositions
def byte_to_specific_class(0x11, 0x04), do: :scene_switch_multilevel
def byte_to_specific_class(0x11, 0x05), do: :class_a_motor_control
def byte_to_specific_class(0x11, 0x06), do: :class_b_motor_control
def byte_to_specific_class(0x11, 0x07), do: :class_c_motor_control
def byte_to_specific_class(0x11, 0x08), do: :fan_switch
def byte_to_specific_class(0x12, 0x00), do: :not_used
def byte_to_specific_class(0x12, 0x01), do: :switch_remote_binary
def byte_to_specific_class(0x12, 0x02), do: :switch_remote_multilevel
def byte_to_specific_class(0x12, 0x03), do: :switch_remote_toggle_binary
def byte_to_specific_class(0x12, 0x04), do: :switch_remote_toggle_multilevel
def byte_to_specific_class(0x13, 0x00), do: :not_used
def byte_to_specific_class(0x13, 0x01), do: :switch_toggle_binary
def byte_to_specific_class(0x13, 0x02), do: :switch_toggle_multilevel
def byte_to_specific_class(0x15, 0x00), do: :not_used
def byte_to_specific_class(0x15, 0x01), do: :zip_adv_node
def byte_to_specific_class(0x15, 0x02), do: :zip_tun_node
def byte_to_specific_class(0x17, 0x00), do: :not_used
def byte_to_specific_class(0x17, 0x01), do: :zoned_security_panel
def byte_to_specific_class(0x18, 0x00), do: :not_used
def byte_to_specific_class(0x18, 0x01), do: :basic_wall_controller
def byte_to_specific_class(0x20, 0x00), do: :not_used
def byte_to_specific_class(0x20, 0x01), do: :routing_sensor_binary
def byte_to_specific_class(0x21, 0x00), do: :not_used
def byte_to_specific_class(0x21, 0x01), do: :routing_sensor_multilevel
def byte_to_specific_class(0x21, 0x02), do: :chimney_fan
def byte_to_specific_class(0x30, 0x00), do: :not_used
def byte_to_specific_class(0x31, 0x00), do: :not_used
def byte_to_specific_class(0x31, 0x01), do: :simple_meter
def byte_to_specific_class(0x31, 0x02), do: :adv_energy_control
def byte_to_specific_class(0x31, 0x03), do: :whole_home_meter_simple
def byte_to_specific_class(0x40, 0x00), do: :not_used
def byte_to_specific_class(0x40, 0x01), do: :door_lock
def byte_to_specific_class(0x40, 0x02), do: :advanced_door_lock
def byte_to_specific_class(0x40, 0x03), do: :secure_keypad_door_lock
def byte_to_specific_class(0x40, 0x04), do: :secure_keypad_door_lock_deadbolt
def byte_to_specific_class(0x40, 0x05), do: :secure_door
def byte_to_specific_class(0x40, 0x06), do: :secure_gate
def byte_to_specific_class(0x40, 0x07), do: :secure_barrier_addon
def byte_to_specific_class(0x40, 0x08), do: :secure_barrier_open_only
def byte_to_specific_class(0x40, 0x09), do: :secure_barrier_close_only
def byte_to_specific_class(0x40, 0x0A), do: :secure_lockbox
def byte_to_specific_class(0x40, 0x0B), do: :secure_keypad
def byte_to_specific_class(0x50, 0x00), do: :not_used
def byte_to_specific_class(0x50, 0x01), do: :energy_production
def byte_to_specific_class(0xA1, 0x00), do: :not_used
def byte_to_specific_class(0xA1, 0x01), do: :basic_routing_alarm_sensor
def byte_to_specific_class(0xA1, 0x02), do: :routing_alarm_sensor
def byte_to_specific_class(0xA1, 0x03), do: :basic_zensor_net_alarm_sensor
def byte_to_specific_class(0xA1, 0x04), do: :zensor_net_alarm_sensor
def byte_to_specific_class(0xA1, 0x05), do: :adv_zensor_net_alarm_sensor
def byte_to_specific_class(0xA1, 0x06), do: :basic_routing_smoke_sensor
def byte_to_specific_class(0xA1, 0x07), do: :routing_smoke_sensor
def byte_to_specific_class(0xA1, 0x08), do: :basic_zensor_net_smoke_sensor
def byte_to_specific_class(0xA1, 0x09), do: :zensor_net_smoke_sensor
def byte_to_specific_class(0xA1, 0x0A), do: :adv_zensor_net_smoke_sensor
def byte_to_specific_class(0xA1, 0x0B), do: :alarm_sensor
def byte_to_specific_class(0xFF, 0x00), do: :not_used
def byte_to_specific_class(gen_byte, spec_byte) do
_ =
Logger.warn(
"Unknown specific class #{Integer.to_string(spec_byte, 16)} for generic class #{
Integer.to_string(gen_byte, 16)
}"
)
{:unk, gen_byte, spec_byte}
end
@spec specific_class_to_byte(generic_type_name(), specific_type_name()) ::
byte() | specific_cmd_class_unk()
def specific_class_to_byte(:generic_controller, :not_used), do: 0x00
def specific_class_to_byte(:generic_controller, :portable_remote_controller), do: 0x01
def specific_class_to_byte(:generic_controller, :portable_scene_controller), do: 0x02
def specific_class_to_byte(:generic_controller, :installer_tool), do: 0x03
def specific_class_to_byte(:generic_controller, :remote_control_av), do: 0x04
def specific_class_to_byte(:generic_controller, :remote_control_simple), do: 0x06
def specific_class_to_byte(:static_controller, :not_used), do: 0x00
def specific_class_to_byte(:static_controller, :pc_controller), do: 0x01
def specific_class_to_byte(:static_controller, :scene_controller), do: 0x02
def specific_class_to_byte(:static_controller, :static_installer_tool), do: 0x03
def specific_class_to_byte(:static_controller, :set_top_box), do: 0x04
def specific_class_to_byte(:static_controller, :sub_system_controller), do: 0x05
def specific_class_to_byte(:static_controller, :tv), do: 0x06
def specific_class_to_byte(:static_controller, :gateway), do: 0x07
def specific_class_to_byte(:av_control_point, :not_used), do: 0x00
def specific_class_to_byte(:av_control_point, :satellite_receiver), do: 0x04
def specific_class_to_byte(:av_control_point, :satellite_receiver_v2), do: 0x11
def specific_class_to_byte(:av_control_point, :doorbell), do: 0x12
def specific_class_to_byte(:display, :not_used), do: 0x00
def specific_class_to_byte(:display, :simple_display), do: 0x01
def specific_class_to_byte(:network_extender, :not_used), do: 0x00
def specific_class_to_byte(:network_extender, :secure_extender), do: 0x01
def specific_class_to_byte(:appliance, :not_used), do: 0x00
def specific_class_to_byte(:appliance, :general_appliance), do: 0x01
def specific_class_to_byte(:appliance, :kitchen_appliance), do: 0x02
def specific_class_to_byte(:appliance, :laundry_appliance), do: 0x03
def specific_class_to_byte(:sensor_notification, :not_used), do: 0x00
def specific_class_to_byte(:sensor_notification, :notification_sensor), do: 0x01
def specific_class_to_byte(:thermostat, :not_used), do: 0x00
def specific_class_to_byte(:thermostat, :thermostat_heating), do: 0x01
def specific_class_to_byte(:thermostat, :thermostat_general), do: 0x02
def specific_class_to_byte(:thermostat, :setback_schedule_thermostat), do: 0x03
def specific_class_to_byte(:thermostat, :setpoint_thermostat), do: 0x04
def specific_class_to_byte(:thermostat, :setback_thermostat), do: 0x05
def specific_class_to_byte(:thermostat, :thermostat_general_v2), do: 0x06
def specific_class_to_byte(:window_covering, :not_used), do: 0x00
def specific_class_to_byte(:window_covering, :simple_window_covering), do: 0x01
def specific_class_to_byte(:switch_binary, :not_used), do: 0x00
def specific_class_to_byte(:switch_binary, :power_switch_binary), do: 0x01
def specific_class_to_byte(:switch_binary, :color_tunable_binary), do: 0x02
def specific_class_to_byte(:switch_binary, :scene_switch_binary), do: 0x03
def specific_class_to_byte(:switch_binary, :power_strip), do: 0x04
def specific_class_to_byte(:switch_binary, :siren), do: 0x05
def specific_class_to_byte(:switch_binary, :valve_open_close), do: 0x06
def specific_class_to_byte(:switch_binary, :irrigation_controller), do: 0x07
def specific_class_to_byte(:switch_multilevel, :not_used), do: 0x00
def specific_class_to_byte(:switch_multilevel, :power_switch_multilevel), do: 0x01
def specific_class_to_byte(:switch_multilevel, :color_tunable_multilevel), do: 0x02
def specific_class_to_byte(:switch_multilevel, :motor_multipositions), do: 0x03
def specific_class_to_byte(:switch_multilevel, :scene_switch_multilevel), do: 0x04
def specific_class_to_byte(:switch_multilevel, :class_a_motor_control), do: 0x05
def specific_class_to_byte(:switch_multilevel, :class_b_motor_control), do: 0x06
def specific_class_to_byte(:switch_multilevel, :class_c_motor_control), do: 0x07
def specific_class_to_byte(:switch_multilevel, :fan_switch), do: 0x08
def specific_class_to_byte(:switch_remote, :not_used), do: 0x00
def specific_class_to_byte(:switch_remote, :switch_remote_binary), do: 0x01
def specific_class_to_byte(:switch_remote, :switch_remote_multilevel), do: 0x02
def specific_class_to_byte(:switch_remote, :switch_remote_toggle_binary), do: 0x03
def specific_class_to_byte(:switch_remote, :switch_remote_toggle_multilevel), do: 0x04
def specific_class_to_byte(:switch_toggle, :not_used), do: 0x00
def specific_class_to_byte(:switch_toggle, :switch_toggle_binary), do: 0x01
def specific_class_to_byte(:switch_toggle, :switch_toggle_multilevel), do: 0x02
def specific_class_to_byte(:zip_node, :not_used), do: 0x00
def specific_class_to_byte(:zip_node, :zip_adv_node), do: 0x01
def specific_class_to_byte(:zip_node, :zip_tun_node), do: 0x02
def specific_class_to_byte(:security_panel, :not_used), do: 0x00
def specific_class_to_byte(:security_panel, :zoned_security_panel), do: 0x01
def specific_class_to_byte(:wall_controller, :not_used), do: 0x00
def specific_class_to_byte(:wall_controller, :basic_wall_controller), do: 0x01
def specific_class_to_byte(:sensor_binary, :not_used), do: 0x00
def specific_class_to_byte(:sensor_binary, :routing_sensor_binary), do: 0x01
def specific_class_to_byte(:sensor_multilevel, :not_used), do: 0x00
def specific_class_to_byte(:sensor_multilevel, :routing_sensor_multilevel), do: 0x01
def specific_class_to_byte(:sensor_multilevel, :chimney_fan), do: 0x02
def specific_class_to_byte(:meter_pulse, :not_used), do: 0x00
def specific_class_to_byte(:meter, :not_used), do: 0x00
def specific_class_to_byte(:meter, :simple_meter), do: 0x01
def specific_class_to_byte(:meter, :adv_energy_control), do: 0x02
def specific_class_to_byte(:meter, :whole_home_meter_simple), do: 0x03
def specific_class_to_byte(:entry_control, :not_used), do: 0x00
def specific_class_to_byte(:entry_control, :door_lock), do: 0x01
def specific_class_to_byte(:entry_control, :advanced_door_lock), do: 0x02
def specific_class_to_byte(:entry_control, :secure_keypad_door_lock), do: 0x03
def specific_class_to_byte(:entry_control, :secure_keypad_door_lock_deadbolt), do: 0x04
def specific_class_to_byte(:entry_control, :secure_door), do: 0x05
def specific_class_to_byte(:entry_control, :secure_gate), do: 0x06
def specific_class_to_byte(:entry_control, :secure_barrier_addon), do: 0x07
def specific_class_to_byte(:entry_control, :secure_barrier_open_only), do: 0x08
def specific_class_to_byte(:entry_control, :secure_barrier_close_only), do: 0x09
def specific_class_to_byte(:entry_control, :secure_lockbox), do: 0x0A
def specific_class_to_byte(:entry_control, :secure_keypad), do: 0x0B
def specific_class_to_byte(:semi_interoperable, :not_used), do: 0x00
def specific_class_to_byte(:semi_interoperable, :energy_production), do: 0x01
def specific_class_to_byte(:sensor_alarm, :not_used), do: 0x00
def specific_class_to_byte(:sensor_alarm, :basic_routing_alarm_sensor), do: 0x01
def specific_class_to_byte(:sensor_alarm, :routing_alarm_sensor), do: 0x02
def specific_class_to_byte(:sensor_alarm, :basic_zensor_net_alarm_sensor), do: 0x03
def specific_class_to_byte(:sensor_alarm, :zensor_net_alarm_sensor), do: 0x04
def specific_class_to_byte(:sensor_alarm, :adv_zensor_net_alarm_sensor), do: 0x05
def specific_class_to_byte(:sensor_alarm, :basic_routing_smoke_sensor), do: 0x06
def specific_class_to_byte(:sensor_alarm, :routing_smoke_sensor), do: 0x07
def specific_class_to_byte(:sensor_alarm, :basic_zensor_net_smoke_sensor), do: 0x08
def specific_class_to_byte(:sensor_alarm, :zensor_net_smoke_sensor), do: 0x09
def specific_class_to_byte(:sensor_alarm, :adv_zensor_net_smoke_sensor), do: 0x0A
def specific_class_to_byte(:sensor_alarm, :alarm_sensor), do: 0x0B
def specific_class_to_byte(:non_interoperable, :not_used), do: 0x00
def specific_class_to_byte(gen_name, spec_name) do
_ =
Logger.warn(
"Unknown specific class #{inspect(spec_name)} for generic class #{inspect(gen_name)}"
)
{:unk, gen_name, spec_name}
end
@spec is_network_command_class(byte) :: boolean
def is_network_command_class(byte) when byte in @known_network_command_classes, do: true
def is_network_command_class(_), do: false
end
|
lib/grizzly/command_class/mappings.ex
| 0.66888 | 0.441673 |
mappings.ex
|
starcoder
|
defmodule Phoenix.HTML.Link do
@moduledoc """
Conveniences for working with links and URLs in HTML.
"""
import Phoenix.HTML.Tag
@doc """
Generates a link to the given URL.
## Examples
link("hello", to: "/world")
#=> <a href="/world">hello</a>
link("<hello>", to: "/world")
#=> <a href="/world"><hello></a>
link("<hello>", to: "/world", class: "btn")
#=> <a class="btn" href="/world"><hello></a>
link("delete", to: "/the_world", data: [confirm: "Really?"])
#=> <a data-confirm="Really?" href="/the_world">delete</a>
# You can use a `do ... end` block too:
link to: "/hello" do
"world"
end
## Options
* `:to` - the page to link to. This option is required
* `:method` - the method to use with the link. In case the
method is not `:get`, the link is generated inside the form
which sets the proper information. In order to submit the
form, JavaScript must be enabled
* `:form` - customize the underlying form when the method
is not `:get`
All other options are forwarded to the underlying `<a>` tag.
## Data attributes
Data attributes are added as a keyword list passed to the
`data` key. The following data attributes are supported:
* `data-submit="parent"` - automatically used when the
`:method` is not `:get`, this module attribute says the
underlying link should submit the parent form on click
* `data-confirm` - shows a confirmation prompt before
submitting the parent when `:method` is not `:get`.
## JavaScript dependency
In order to support the data attributes above, `Phoenix.HTML`
relies on JavaScript. You can either load the ES5 version from
`priv/static/phoenix_html.js` or depend on the one at
`web/static/js/phoenix_html.js` written in ES6 directly from
your build tool.
"""
def link(text, opts)
def link(opts, do: contents) when is_list(opts) do
link(contents, opts)
end
def link(_text, opts) when not is_list(opts) do
raise ArgumentError, "link/2 requires a keyword list as second argument"
end
def link(text, opts) do
{to, opts} = Keyword.pop(opts, :to)
{method, opts} = Keyword.pop(opts, :method, :get)
unless to do
raise ArgumentError, "expected non-nil value for :to in link/2, got: #{inspect to}"
end
if method == :get do
content_tag(:a, text, [href: to] ++ opts)
else
opts = Keyword.put_new(opts, :rel, "nofollow")
{form, opts} = form_options(opts, method, "link")
form_tag(to, form) do
content_tag(:a, text, [href: "#", data: [submit: "parent"]] ++ opts)
end
end
end
@doc false
# No docs since this function is only called when a `do` block is passed as
# `do:` instead of `do...end` (and that case is documented in `link/2`).
def link(opts) when is_list(opts) do
{contents, opts} = Keyword.pop(opts, :do)
unless contents do
raise ArgumentError, "link/2 requires a text as first argument or contents in the :do block"
end
link(contents, opts)
end
@doc """
Generates a button that uses a regular HTML form to submit to the given URL.
Useful to ensure that links that change data are not triggered by
search engines and other spidering software.
## Examples
button("hello", to: "/world")
#=> <form action="/world" class="button" method="post">
<input name="_csrf_token" value="">
<button type="submit">hello</button>
</form>
button("hello", to: "/world", method: "get", class: "btn")
#=> <form action="/world" class="btn" method="post">
<button type="submit">hello</button>
</form>
## Options
* `:to` - the page to link to. This option is required
* `:method` - the method to use with the button. Defaults to :post.
* `:form` - the options for the form. Defaults to
`[class: "button", enforce_utf8: false]`
All other options are forwarded to the underlying button input.
"""
def button(text, opts) do
{to, opts} = Keyword.pop(opts, :to)
{method, opts} = Keyword.pop(opts, :method, :post)
{form, opts} = form_options(opts, method, "button")
unless to do
raise ArgumentError, "option :to is required in button/2"
end
form_tag(to, form) do
Phoenix.HTML.Form.submit(text, opts)
end
end
defp form_options(opts, method, class) do
{form, opts} = Keyword.pop(opts, :form, [])
form =
form
|> Keyword.put_new(:class, class)
|> Keyword.put_new(:method, method)
|> Keyword.put_new(:enforce_utf8, false)
{form, opts}
end
end
|
data/web/deps/phoenix_html/lib/phoenix_html/link.ex
| 0.702734 | 0.46557 |
link.ex
|
starcoder
|
defmodule Geolix.Adapter.MaxMindCSV do
@moduledoc """
Adapter for Geolix to work with MaxMind CSV databases.
## Adapter Configuration
To start using the adapter with a compatible database you need to add the
required configuration entry to your `:geolix` configuration:
config :geolix,
databases: [
%{
id: :my_csv_database,
adapter: Geolix.Adapter.MaxMindCSV,
repo: Geolix.Adapter.MaxMindCSV.Repo,
schema: Geolix.Adapter.MaxMindCSV.Schema.CityBlockDecimal
}
]
### Adapter Repo
With the configuration key `:repo` you can define which `Ecto.Repo` should
be used for the database access.
The module `Geolix.Adapter.MaxMindCSV.Repo` can be used if you define the
database connection for `otp_app: :geolix_adapter_maxmind_csv` and hook it
into your supervision tree.
### Adapter Schemas
The configuration key `:schema` define the database result to
fetch for an IP result.
The following schemas are provided but should be replaced with your own if
you have custom requirements for the underlying database structure:
- `Geolix.Adapter.MaxMindCSV.Schema.ASNBlockDecimal`
- `Geolix.Adapter.MaxMindCSV.Schema.CityBlockDecimal`
- `Geolix.Adapter.MaxMindCSV.Schema.CountryBlockDecimal`
If you use a custom schema you need to implement the
`Geolix.Adapter.MaxMindCSV.Block` behaviour.
#### Adapter Schema Migrations
When using the packaged schemas you need to create the underlying tables
accordingly. Working migrations you can copy to your project can be found in
the directory `priv/repo/migrations` of the downloaded dependency.
## Adapter Database
The database lookups are done using an integer representation of the
requested IP.
To use the official MaxMind CSV databases you ned to convert them to include
this network integer using the
[GeoIP2 CSV Format Converter](https://github.com/maxmind/geoip2-csv-converter)
with the parameter `-include-integer-range`.
"""
@typedoc """
Extended base database type.
"""
@type database :: %{
required(:id) => atom,
required(:adapter) => module,
required(:repo) => module,
required(:schema) => module
}
@behaviour Geolix.Adapter
@impl Geolix.Adapter
@spec lookup(ip :: :inet.ip_address(), opts :: Keyword.t(), database :: database) :: map | nil
def lookup(ip, _opts, %{repo: repo, schema: schema}), do: schema.find(ip, repo)
end
|
lib/maxmind_csv.ex
| 0.812682 | 0.505859 |
maxmind_csv.ex
|
starcoder
|
defmodule MastaniServer.Test.AssertHelper do
@moduledoc """
This module defines some helper function used by
tests that require check from graphql response
"""
import Phoenix.ConnTest
import Helper.Utils, only: [map_key_stringify: 1, get_config: 2]
@endpoint MastaniServerWeb.Endpoint
@page_size get_config(:general, :page_size)
@inner_page_size get_config(:general, :inner_page_size)
@doc """
used for non exsit id
"""
def non_exsit_id, do: 15_982_398_614
def inner_page_size, do: @inner_page_size
def page_size, do: @page_size
def is_valid_kv?(obj, key, :list) when is_map(obj) do
obj = map_key_stringify(obj)
case Map.has_key?(obj, key) do
true -> obj |> Map.get(key) |> is_list
_ -> false
end
end
def is_valid_kv?(obj, key, :int) when is_map(obj) do
obj = map_key_stringify(obj)
case Map.has_key?(obj, key) do
true -> obj |> Map.get(key) |> is_integer
_ -> false
end
end
def is_valid_kv?(obj, key, :string) when is_map(obj) and is_binary(key) do
obj = map_key_stringify(obj)
case Map.has_key?(obj, key) do
true -> String.length(Map.get(obj, key)) != 0
_ -> false
end
end
def is_valid_pagination?(obj) when is_map(obj) do
is_valid_kv?(obj, "entries", :list) and is_valid_kv?(obj, "totalPages", :int) and
is_valid_kv?(obj, "totalCount", :int) and is_valid_kv?(obj, "pageSize", :int) and
is_valid_kv?(obj, "pageNumber", :int)
end
def is_valid_pagination?(obj, :empty) when is_map(obj) do
case is_valid_pagination?(obj) do
false ->
false
true ->
obj["entries"] |> Enum.empty?() and obj["totalCount"] == 0 and obj["pageNumber"] == 1 and
obj["totalPages"] == 1
end
end
def is_valid_pagination?(obj, :raw) when is_map(obj) do
is_valid_kv?(obj, "entries", :list) and is_valid_kv?(obj, "total_pages", :int) and
is_valid_kv?(obj, "total_count", :int) and is_valid_kv?(obj, "page_size", :int) and
is_valid_kv?(obj, "page_number", :int)
end
def is_valid_pagination?(obj, :raw, :empty) when is_map(obj) do
case is_valid_pagination?(obj, :raw) do
false ->
false
true ->
obj.entries |> Enum.empty?() and obj.total_count == 0 and obj.page_number == 1 and
obj.total_pages == 1
end
end
def has_boolen_value?(obj, key) do
obj |> Map.get(key) |> is_boolean
end
@doc """
simulate the Graphiql murate operation
"""
def mutation_result(conn, query, variables, key) do
conn
|> post("/graphiql", query: query, variables: variables)
|> json_response(200)
# |> IO.inspect(label: "debug")
|> Map.get("data")
|> Map.get(key)
end
@doc """
check if Graphiql murate get error
"""
def mutation_get_error?(conn, query, variables) do
conn
|> post("/graphiql", query: query, variables: variables)
# |> IO.inspect(label: "debug status")
|> json_response(200)
# |> IO.inspect(label: "debug")
|> Map.has_key?("errors")
end
@doc """
Graphiql murate error with code equal check
"""
def mutation_get_error?(conn, query, variables, code) when is_integer(code) do
resp =
conn
|> post("/graphiql", query: query, variables: variables)
|> json_response(200)
# |> IO.inspect(label: "debug")
case resp |> Map.has_key?("errors") do
true ->
code == resp["errors"] |> List.first() |> Map.get("code")
false ->
false
end
end
def query_result(conn, query, variables, key) do
conn
|> get("/graphiql", query: query, variables: variables)
|> json_response(200)
# |> IO.inspect(label: "debug")
|> Map.get("data")
|> Map.get(key)
end
def query_result(conn, query, key) do
conn
|> get("/graphiql", query: query, variables: %{})
|> json_response(200)
|> Map.get("data")
|> Map.get(key)
end
def query_get_error?(conn, query, variables) do
conn
|> get("/graphiql", query: query, variables: variables)
|> json_response(200)
|> Map.has_key?("errors")
end
@doc """
check if Graphiql murate get error
"""
def query_get_error?(conn, query, variables, code) when is_integer(code) do
resp =
conn
|> get("/graphiql", query: query, variables: variables)
|> json_response(200)
case resp |> Map.has_key?("errors") do
true ->
code == resp["errors"] |> List.first() |> Map.get("code")
false ->
false
end
end
def firstn_and_last(values, 3) do
[value_1 | [value_2 | [value_3 | _]]] = values
value_x = values |> List.last()
[value_1, value_2, value_3, value_x]
end
end
|
test/support/assert_helper.ex
| 0.658088 | 0.403655 |
assert_helper.ex
|
starcoder
|
defmodule ExDns.Message.Question do
@moduledoc """
Manages the Question part of a DNS message
4.1.2. Question section format
The question section is used to carry the "question" in most queries,
i.e., the parameters that define what is being asked. The section
contains QDCOUNT (usually 1) entries, each of the following format:
1 1 1 1 1 1
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| |
/ QNAME /
/ /
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| QTYPE |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| QCLASS |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
where:
QNAME a domain name represented as a sequence of labels, where
each label consists of a length octet followed by that
number of octets. The domain name terminates with the
zero length octet for the null label of the root. Note
that this field may be an odd number of octets; no
padding is used.
QTYPE a two octet code which specifies the type of the query.
The values for this field include all codes valid for a
TYPE field, together with some more general codes which
can match more than one type of RR.
QCLASS a two octet code that specifies the class of the query.
For example, the QCLASS field is IN for the Internet.
"""
alias ExDns.Resource
alias ExDns.Message
@keys [:host, :type, :class]
@enforce_keys [:host]
defstruct @keys
@type t :: [
host: [binary],
type: ExDNS.Resource.type(),
class: ExDNS.Resource.class()
]
# TODO: Note that there may be more than one question in a query but currently
# we're assuming its one question only
@spec decode(ExDNS.Message.Header.t(), binary()) :: {:ok, t(), binary()}
def decode(%Message.Header{qc: 1}, message) do
decode_question(message)
end
# For when there are no questions
def decode(%Message.Header{qc: 0}, message) do
{:ok, nil, message}
end
# It's the first part of the question.
defp decode_question(message) do
{:ok, name, rest} = Message.decode_name(message)
question = %Message.Question{host: name}
decode_question(question, rest)
end
# There are no more parts to this query - and its the end of the message
defp decode_question(question, <<qt::size(16), qc::size(16)>>) do
question = %Message.Question{question | type: qt, class: qc}
{:ok, question, nil}
end
# There are no more parts to this query - but its not the end of the message
defp decode_question(question, <<qt::size(16), qc::size(16), rest::binary>>) do
question = %Message.Question{question |
type: Resource.type_from(qt), class: Resource.class_from(qc)}
{:ok, question, rest}
end
end
|
lib/ex_dns/message/question.ex
| 0.566738 | 0.595434 |
question.ex
|
starcoder
|
defmodule EctoOrdered do
@moduledoc """
EctoOrdered provides changeset methods for updating ordering an ordering column
It should be added to your schema like so:
```
defmodule OrderedListItem do
use Ecto.Schema
import Ecto.Changeset
import EctoOrdered
schema "ordered_list_item" do
field :title, :string
field :position, :integer
end
def changeset(%__MODULE__{} = model, params) do
model
|> cast(params, [:position, :title])
|> set_order(:position)
end
def delete(%__MODULE__{} = model) do
model
|> cast(%{}, [])
|> Map.put(:action, :delete)
|> set_order(:position)
end
end
```
Note the `delete` function used to ensure that the remaining items are repositioned on
deletion.
"""
defstruct repo: nil,
module: nil,
field: :position,
new_position: nil,
old_position: nil,
move: :move_position,
scope: nil,
old_scope: nil,
new_scope: nil,
until: nil,
max: nil
defmodule InvalidMove do
defexception type: nil
def message(%__MODULE__{type: :too_large}), do: "too large"
def message(%__MODULE__{type: :too_small}), do: "too small"
end
import Ecto.Query
import Ecto.Changeset
alias EctoOrdered, as: Order
@doc """
Returns a changeset which will include updates to the other ordered rows
within the same transaction as the insertion, deletion or update of this row.
The arguments are as follows:
- `changeset` the changeset which is part of the ordered list
- `field` the field in which the order should be stored
- `scope` the field in which the scope for the order should be stored (optional)
"""
def set_order(changeset, field, scope \\ nil) do
prepare_changes(changeset, fn changeset ->
case changeset.action do
:insert ->
EctoOrdered.before_insert(changeset, %EctoOrdered{
repo: changeset.repo,
field: field,
scope: scope
})
:update ->
EctoOrdered.before_update(changeset, %EctoOrdered{
repo: changeset.repo,
field: field,
scope: scope
})
:delete ->
EctoOrdered.before_delete(changeset, %EctoOrdered{
repo: changeset.repo,
field: field,
scope: scope
})
end
end)
end
@doc false
def before_insert(cs, %Order{field: field} = struct) do
struct = %{struct | module: cs.data.__struct__}
struct = %Order{max: max} = update_max(struct, cs)
position_assigned = get_field(cs, field)
if position_assigned do
struct =
struct
|> update_new_scope(cs)
|> update_new_position(cs)
increment_position(struct)
validate_position!(cs, struct)
else
put_change(cs, field, max + 1)
end
end
@doc false
def before_update(cs, struct) do
%{struct | module: cs.data.__struct__}
|> update_old_scope(cs)
|> update_new_scope(cs)
|> reorder_model(cs)
end
defp increment_position(
%Order{module: module, field: field, scope: nil, new_position: split_by} = struct
) do
query =
from(m in module,
where: field(m, ^field) >= ^split_by
)
execute_increment(struct, query)
end
defp increment_position(
%Order{
module: module,
field: field,
scope: scope,
new_position: split_by,
new_scope: new_scope
} = struct
)
when is_list(scope) do
query =
module
|> where([m], field(m, ^field) >= ^split_by)
|> multi_scope_query(scope, new_scope)
execute_increment(struct, query)
end
defp increment_position(
%Order{
module: module,
field: field,
scope: scope,
new_position: split_by,
new_scope: new_scope
} = struct
) do
query =
from(m in module,
where: field(m, ^field) >= ^split_by and field(m, ^scope) == ^new_scope
)
execute_increment(struct, query)
end
defp decrement_position(
%Order{module: module, field: field, old_position: split_by, until: until, scope: nil} =
struct
) do
query =
from(m in module,
where: field(m, ^field) > ^split_by and field(m, ^field) <= ^until
)
execute_decrement(struct, query)
end
defp decrement_position(
%Order{
module: module,
field: field,
old_position: split_by,
until: nil,
old_scope: old_scope,
scope: scope
} = struct
)
when is_list(scope) do
query =
module
|> where([m], field(m, ^field) > ^split_by)
|> multi_scope_query(scope, old_scope)
execute_decrement(struct, query)
end
defp decrement_position(
%Order{
module: module,
field: field,
old_position: split_by,
until: nil,
old_scope: old_scope,
scope: scope
} = struct
) do
query =
from(m in module,
where: field(m, ^field) > ^split_by and field(m, ^scope) == ^old_scope
)
execute_decrement(struct, query)
end
defp decrement_position(
%Order{
module: module,
field: field,
scope: scope,
old_position: split_by,
until: until,
old_scope: old_scope
} = struct
)
when is_list(scope) do
query =
module
|> where([m], field(m, ^field) > ^split_by and field(m, ^field) <= ^until)
|> multi_scope_query(scope, old_scope)
execute_decrement(struct, query)
end
defp decrement_position(
%Order{
module: module,
field: field,
scope: scope,
old_position: split_by,
until: until,
old_scope: old_scope
} = struct
) do
query =
from(m in module,
where:
field(m, ^field) > ^split_by and field(m, ^field) <= ^until and
field(m, ^scope) == ^old_scope
)
execute_decrement(struct, query)
end
defp validate_position!(cs, %Order{field: field, new_position: position, max: max})
when position > max + 1 do
raise EctoOrdered.InvalidMove, type: :too_large
%Ecto.Changeset{cs | valid?: false} |> add_error(field, :too_large)
end
defp validate_position!(cs, %Order{field: field, new_position: position}) when position < 1 do
raise EctoOrdered.InvalidMove, type: :too_small
%Ecto.Changeset{cs | valid?: false} |> add_error(field, :too_small)
end
defp validate_position!(cs, _), do: cs
defp update_old_scope(%Order{scope: scope} = struct, cs) when is_list(scope) do
%{struct | old_scope: Enum.map(scope, fn f -> Map.get(cs.data, f) end)}
end
defp update_old_scope(%Order{scope: scope} = struct, cs) do
%{struct | old_scope: Map.get(cs.data, scope)}
end
defp update_new_scope(%Order{scope: scope} = struct, cs) when is_list(scope) do
%{struct | new_scope: Enum.map(scope, fn f -> get_field(cs, f) end)}
end
defp update_new_scope(%Order{scope: scope} = struct, cs) do
%{struct | new_scope: get_field(cs, scope)}
end
defp update_new_position(%Order{field: field} = struct, cs) do
%{struct | new_position: get_field(cs, field)}
end
defp update_old_position(%Order{field: field} = struct, cs) do
%{struct | old_position: Map.get(cs.data, field)}
end
defp update_max(%Order{repo: repo} = struct, cs) do
rows = query(struct, cs) |> repo.all |> Enum.reject(&is_nil/1)
max = (rows == [] && 0) || Enum.max(rows)
%{struct | max: max}
end
defp reorder_model(
%Order{scope: scope, new_scope: new_scope, old_scope: old_scope} = struct,
cs
)
when not is_nil(old_scope) and is_list(scope) and new_scope != old_scope do
cs
|> change(Enum.zip(scope, new_scope))
|> before_delete(struct)
before_insert(cs, struct)
end
defp reorder_model(
%Order{scope: scope, old_scope: old_scope, new_scope: new_scope} = struct,
cs
)
when not is_nil(old_scope) and new_scope != old_scope do
cs
|> put_change(scope, new_scope)
|> before_delete(struct)
before_insert(cs, struct)
end
defp reorder_model(struct, cs) do
struct
|> update_max(cs)
|> update_new_position(cs)
|> update_old_position(cs)
|> adjust_position(cs)
end
defp adjust_position(
%Order{max: max, field: field, new_position: new_position, old_position: old_position} =
struct,
cs
)
when new_position > old_position do
struct = %{struct | until: new_position}
decrement_position(struct)
cs = if new_position == max + 1, do: put_change(cs, field, max), else: cs
validate_position!(cs, struct)
end
defp adjust_position(
%Order{max: max, new_position: new_position, old_position: old_position} = struct,
cs
)
when new_position < old_position do
struct = %{struct | until: max}
decrement_position(struct)
increment_position(struct)
validate_position!(cs, struct)
end
defp adjust_position(_struct, cs) do
cs
end
@doc false
def before_delete(cs, struct) do
struct =
%Order{max: max} =
%{struct | module: cs.data.__struct__}
|> update_max(cs)
|> update_old_position(cs)
|> update_old_scope(cs)
decrement_position(%{struct | until: max})
cs
end
defp query(%Order{module: module, field: field, scope: nil}, _cs) do
from(m in module) |> selector(field)
end
defp query(%Order{module: module, field: field, scope: scope}, cs) when is_list(scope) do
Enum.reduce(scope, module, fn s, q ->
new_scope = get_field(cs, s)
scope_query(q, s, new_scope)
end)
|> selector(field)
end
defp query(%Order{module: module, field: field, scope: scope}, cs) do
new_scope = get_field(cs, scope)
scope_query(module, scope, new_scope)
|> selector(field)
end
defp selector(q, field) do
Ecto.Query.select(q, [m], field(m, ^field))
end
defp execute_increment(%Order{repo: repo, field: field}, query) do
query
|> repo.update_all(inc: [{field, 1}])
end
defp execute_decrement(%Order{repo: repo, field: field}, query) do
query |> repo.update_all(inc: [{field, -1}])
end
defp multi_scope_query(query, scope, new_scope) do
Enum.zip(scope, new_scope)
|> Enum.reduce(query, fn s, q -> scope_query(q, elem(s, 0), elem(s, 1)) end)
end
defp scope_query(q, scope, nil) do
q |> where([m], is_nil(field(m, ^scope)))
end
defp scope_query(q, scope, new_scope) do
q |> where([m], field(m, ^scope) == ^new_scope)
end
end
|
lib/ecto_ordered.ex
| 0.85405 | 0.676683 |
ecto_ordered.ex
|
starcoder
|
defmodule Pushest.Socket.Data.Frame do
@moduledoc ~S"""
Structure representing a Frame being passed between Pushest and Pusher server.
Includes methods constructing Frame structure for various pusher events.
This module handles encode/decode actions for a Frame.
"""
alias Pushest.Socket.Data.SubscriptionData
defstruct [:channel, :event, :data]
@doc ~S"""
Returns Frame struct representing subscribe event being sent to the Pusher.
## Examples
iex> Pushest.Socket.Data.Frame.subscribe("private-chnl", "auth")
%Pushest.Socket.Data.Frame{
event: "pusher:subscribe",
data: %Pushest.Socket.Data.SubscriptionData{
auth: "auth",
channel: "private-chnl",
channel_data: %{}
},
channel: nil
}
iex> Pushest.Socket.Data.Frame.subscribe("private-chnl", "auth", %{user_id: 1})
%Pushest.Socket.Data.Frame{
event: "pusher:subscribe",
data: %Pushest.Socket.Data.SubscriptionData{
auth: "auth",
channel: "private-chnl",
channel_data: %{user_id: 1}
},
channel: nil
}
"""
@spec subscribe(String.t(), String.t() | nil, map) :: %__MODULE__{}
def subscribe(channel, auth, user_data \\ %{}) do
%__MODULE__{
event: "pusher:subscribe",
data: %SubscriptionData{
channel: channel,
auth: auth,
channel_data: user_data
}
}
end
@doc ~S"""
Returns Frame struct representing unsubscribe event being sent to the Pusher.
## Examples
iex> Pushest.Socket.Data.Frame.unsubscribe("private-chnl")
%Pushest.Socket.Data.Frame{
event: "pusher:unsubscribe",
data: %Pushest.Socket.Data.SubscriptionData{channel: "private-chnl"}
}
"""
@spec unsubscribe(String.t()) :: %__MODULE__{}
def unsubscribe(channel) do
%__MODULE__{
event: "pusher:unsubscribe",
data: %SubscriptionData{
channel: channel
}
}
end
@doc ~S"""
Returns Frame struct representing an event being sent to the Pusher.
## Examples
iex> Pushest.Socket.Data.Frame.event("private-chnl", "evnt", %{name: "stepnivlk"})
%Pushest.Socket.Data.Frame{
channel: "private-chnl",
data: %{name: "stepnivlk"},
event: "client-evnt"
}
"""
@spec event(String.t(), String.t(), term) :: %__MODULE__{}
def event(channel, event, data) do
%__MODULE__{
channel: channel,
event: "client-#{event}",
data: data
}
end
@doc ~S"""
Encodes Frame struct to stringified JSON.
## Examples
iex> Pushest.Socket.Data.Frame.encode!(%Pushest.Socket.Data.Frame{
...> channel: "public-channel", event: "first-event"
...> })
"{\"event\":\"first-event\",\"data\":null,\"channel\":\"public-channel\"}"
iex> Pushest.Socket.Data.Frame.encode!(%Pushest.Socket.Data.Frame{
...> channel: "public-channel",
...> event: "first-event",
...> data: %{name: "stepnivlk"}
...> })
"{\"event\":\"first-event\",\"data\":{\"name\":\"stepnivlk\"},\"channel\":\"public-channel\"}"
"""
@spec encode!(%__MODULE__{}) :: String.t()
def encode!(frame = %__MODULE__{data: %SubscriptionData{channel_data: channel_data}}) do
%{frame | data: %{frame.data | channel_data: Poison.encode!(channel_data)}}
|> Poison.encode!()
end
def encode!(frame = %__MODULE__{}) do
Poison.encode!(frame)
end
@doc ~S"""
Decodes frame from stringified JSON to Frame struct.
## Examples
iex> Pushest.Socket.Data.Frame.decode!("{\"event\":\"first-event\",\"data\":null,\"channel\":\"public-channel\"}")
%Pushest.Socket.Data.Frame{channel: "public-channel", event: "first-event"}
iex> Pushest.Socket.Data.Frame.decode!("{\"event\":\"first-event\",\"data\":{\"test\":1},\"channel\":\"public-channel\"}")
%Pushest.Socket.Data.Frame{channel: "public-channel", event: "first-event", data: %{"test" => 1}}
"""
@spec decode!(String.t()) :: %__MODULE__{}
def decode!(raw_frame) do
frame = Poison.decode!(raw_frame, as: %__MODULE__{})
%{frame | data: decode_data!(frame.data)}
end
def decode_data!(data) when is_map(data), do: data
def decode_data!(nil), do: nil
def decode_data!(data), do: Poison.decode!(data)
end
|
lib/pushest/socket/data/frame.ex
| 0.891696 | 0.497315 |
frame.ex
|
starcoder
|
defmodule Transmog.Parser do
@moduledoc """
`Parser` is a module which parses the two tuple dot notation strings into a
format that can be understood by the rest of the library. It is able to parse
values of the format "a.b.c" into the library format of ["a", "b", "c"],
for example.
It exposes two functions. `parse/1` is the main exported function from this
module which parses the dot notation strings into the special format.
`valid?/1` takes a parsed value and returns whether or not the pairs value
is valid.
"""
@typedoc """
`error` is the type for an error that occurs during parsing. During parsing if
an invalid pair is found then the value described in this type is returned.
"""
@type error :: {:error, :invalid_pair}
@doc """
`parse/1` converts a list of raw pairs into a list of pairs that can be
understood by the formatter. It performs the parse according to the following
rules:
1. Each pair must be a two tuple of strings.
2. Each string can use dot notation to represent nested values.
3. Atoms are represented literally, ie. ":name" for `:name`.
## Examples
iex> pairs = [{"a.b.c", "c.b.a"}]
iex> Transmog.Parser.parse(pairs)
[{["a", "b", "c"], ["c", "b", "a"]}]
iex> pairs = [{":a.b", ":a.:b"}]
iex> Transmog.Parser.parse(pairs)
[{[:a, "b"], [:a, :b]}]
"""
@spec parse(pairs :: [Transmog.raw_pair()]) :: {:ok, [Transmog.pair()]} | error
def parse(pairs) when is_list(pairs) do
case Enum.reduce_while(pairs, [], &parse_pair/2) do
{:error, _} = error -> error
pairs -> {:ok, pairs}
end
end
@doc """
`valid?/1` checks that a list of pairs is valid. A list of pairs is considered
valid if each pair consists of equal length lists of keys. A key is either an
atom or a string.
## Examples
iex> pairs = [{["a", "b"], [:a, :b]}]
iex> Transmog.Parser.valid?(pairs)
true
iex> pairs = [{["a"], []}]
iex> Transmog.Parser.valid?(pairs)
false
iex> pairs = [{[], [:a]}]
iex> Transmog.Parser.valid?(pairs)
false
"""
@spec valid?(pairs :: term) :: boolean
def valid?(pairs) when is_list(pairs), do: Enum.all?(pairs, &valid_pair?/1)
def valid?(_), do: false
# Converts a dot notation string into a path list. Atoms will be parsed from
# strings if applicable at this stage.
@spec from_string(path :: binary) :: [Transmog.key()]
defp from_string(path) when is_binary(path) do
path
|> String.split(".")
|> Enum.map(&parse_field/1)
end
# Parses a single field of the dot notation string. If the field begins with
# a colon, then it is parsed as an atom. Only existing atoms will be used to
# be safe.
@spec parse_field(field :: binary) :: Transmog.key()
defp parse_field(":" <> field) when is_binary(field), do: String.to_existing_atom(field)
defp parse_field(field) when is_binary(field), do: field
# Parses a pair for `parse/1`. Returns values that are used by
# `Enum.reduce_while/3` to stop execution early if an invalid value is
# encountered.
@spec parse_pair(pair :: Transmog.pair(), pairs :: [Transmog.pair()]) ::
{:cont, [Transmog.pair()]} | {:halt, error}
defp parse_pair({from, to}, pairs) when is_binary(from) and is_binary(to) do
{:cont, pairs ++ [{from_string(from), from_string(to)}]}
end
defp parse_pair(_, _), do: {:halt, {:error, :invalid_pair}}
# Determines if a single field in a path is valid. A single field is valid if
# it is either an atom or a string.
@spec valid_field?(field :: Transmog.key()) :: boolean
defp valid_field?(field) when is_atom(field) or is_binary(field), do: true
defp valid_field?(_), do: false
# Determines if a pair is valid. A pair is valid if both lists of keys in the
# pair are of the same length and if each key in the list is valid.
@spec valid_pair?(pair :: Transmog.pair()) :: boolean
defp valid_pair?({from, to}) when is_list(from) and is_list(to) do
length(from) == length(to) && Enum.all?(from ++ to, &valid_field?/1)
end
defp valid_pair?(_), do: false
end
|
lib/transmog/parser.ex
| 0.92501 | 0.792705 |
parser.ex
|
starcoder
|
defmodule VersionHelper do
@moduledoc """
Helper functions for dealing with `Version` values.
"""
@doc """
Bumps the specified `part` of the given `version`, zeroing out all smaller parts of the version.
## Examples
Bump the major version, which increments the major version number and zeroes out the minor and
patch version numbers.
iex> {:ok, version} = Version.parse("1.2.3")
iex> VersionHelper.bump(version, :major)
#Version<2.0.0>
Bump the minor version, which leaves the major version number unchanged, increments the minor
version number, and zeroes out the patch version number.
iex> {:ok, version} = Version.parse("1.2.3")
iex> VersionHelper.bump(version, :minor)
#Version<1.3.0>
Bump the patch version, which leaves the major and minor version numbers unchanged, and increments
the patch version number.
iex> {:ok, version} = Version.parse("1.2.3")
iex> VersionHelper.bump(version, :patch)
#Version<1.2.4>
Bump the patch version when there is build or prerelease information in the version number, which
strips the build and prerelease information and leaves the rest of the version unchanged.
iex> {:ok, version} = Version.parse("1.2.3-beta+20170611")
iex> VersionHelper.bump(version, :patch)
#Version<1.2.3>
"""
def bump(version, part)
def bump(version, :major) do
%Version{
build: nil,
major: version.major + 1,
minor: 0,
patch: 0,
pre: []
}
end
def bump(version, :minor) do
%Version{
build: nil,
major: version.major,
minor: version.minor + 1,
patch: 0,
pre: []
}
end
def bump(version = %Version{build: b, pre: p}, :patch) when not is_nil(b) or p != [] do
%Version{
build: nil,
major: version.major,
minor: version.minor,
patch: version.patch,
pre: []
}
end
def bump(version, :patch) do
%Version{
build: nil,
major: version.major,
minor: version.minor,
patch: version.patch + 1,
pre: []
}
end
end
|
lib/version_helper.ex
| 0.902926 | 0.549278 |
version_helper.ex
|
starcoder
|
defmodule Trie do
@behaviour Access
@moduledoc ~S"""
This module contains the type and functions to work with a [Trie (tree data
structure)](https://en.wikipedia.org/wiki/Trie). The difference from the
accepted data structure is that this one only keeps one character per node.
The data structure here also implements the Elixir's `Access` behaviour.
### Functions of interest
- `search/2`: searches for a prefix in the trie and returns a list of
complete words that match the prefix.
- `words/1`: returns a list of all complete words found in the trie.
No particular order is guaranteed.
- `word_count/1`: returns the count of all complete words in the trie.
### Fields
**IMPORTANT NOTE**: None of the fields should be relied on as a public API.
Anything you need from a `Trie` should be achievable by its functions.
This section is provided as an informative piece to demonstrate the internal
data structure.
1. `key`: an integer representing an Unicode character. A word is composed
by recursively adding (or looking up) its characters, one level at a
time. **EXAMPLE**: Only loading the word "hello" will return a `Trie`
which is 6 levels deep: one root node (see below for its field values)
and 5 nodes for each character.
2. `children`: a `Map` of `integer` keys (Unicode characters) and `Trie`
nodes.
3. `frequency`: amount of times the full word has been added to this `Trie`
node. A `Trie` node having a `frequency` greater than zero is considered
to be a word terminator (see the example below for clarification).
### Root node
The root node of a newly constructed `Trie` always has a `nil` key and zero
`frequency`. Its `children` are the first characters of the words.
### Example
Given the words `["ten", "tons", "tea"]` loaded with frequencies of
2, 3 and 4 respectively, a `Trie` will look like the following (L0 to L4
stand for Levels 0 to 4 in the tree):
|L0 |L1 |L2 |L3 |L4 |Frequency|
|------|---|---|---|---|--------:|
|_root_| | | | | 0|
| |`t`| | | | 0|
| | |`e`| | | 0|
| | | |`a`| | 4|
| | | |`n`| | 2|
| | |`o`| | | 0|
| | | |`n`| | 0|
| | | | |`s`| 3|
In the above example only the words `tea`, `ten` and `tons` are complete
while the words `t`, `te`, `to` and `ton` are not.
"""
@type key :: char
@type optional_key :: key | nil
@type children :: %{required(key) => t} | %{}
@type text :: charlist | String.t()
@type freq :: pos_integer
@opaque t :: %Trie{
key: optional_key,
children: children,
frequency: non_neg_integer
}
defstruct key: nil, children: %{}, frequency: 0
defguard is_key(x) when is_integer(x)
defguard is_freq(x) when is_integer(x) and x > 0
@doc ~S"""
Creates a `Trie` and invokes `add/3`.
Raises `ArgumentError` if there are non-printable characters in the word.
"""
@spec put_word(text, freq) :: t | no_return
def put_word(word, frequency \\ 1)
def put_word(word, frequency)
when is_list(word) and is_freq(frequency) do
put_word(List.to_string(word), frequency)
end
def put_word(word, frequency)
when is_binary(word) and is_freq(frequency) do
if not String.printable?(word) do
raise(ArgumentError, "the parameter must be printable")
end
add(%__MODULE__{}, word, frequency)
end
@doc ~S"""
Creates a `Trie` and invokes `add/3` on each word (or pairs of words and
frequencies) to it.
Note that any combination of words and words with frequencies is accepted, for
example `["one", {"word", 2}, {"another", 5}, "day"]` is a valid input and it
would add the words "one" and "day" with frequencies of one while the words
"word" and "another" will have frequencies of two and five, respectively.
Also see `put_word/2`.
"""
@spec put_words([String.t()] | [{String.t(), pos_integer}]) :: t
def put_words(texts)
when is_list(texts) do
Enum.reduce(texts, %__MODULE__{}, fn
{text, frequency}, acc when is_binary(text) and is_freq(frequency) ->
add(acc, text, frequency)
text, acc when is_binary(text) ->
add(acc, text)
end)
end
@spec get_or_create_node(t, key) :: t
defp get_or_create_node(%__MODULE__{} = t, key)
when is_key(key) do
t.children
|> Map.put_new(key, %__MODULE__{key: key})
|> Map.get(key)
end
@spec get_node(t, key) :: t | nil
defp get_node(%__MODULE__{} = t, key)
when is_key(key) do
Map.get(t.children, key)
end
@spec fetch_node(t, key) :: {:ok, t} | :error
defp fetch_node(%__MODULE__{} = t, key)
when is_key(key) do
Map.fetch(t.children, key)
end
@doc ~S"""
Adds the given word (binary or charlist) into the passed `Trie`.
The returned `Trie` will have _N_ levels in its tree structure (where _N_ is
the length of the binary / charlist), with each node along the way having a
key of the character at the current position.
It can then be polled via all available mechanisms of the `Access` behaviour
plus the `Kernel` functions like `Kernel.get_in/2`, `Kernel.put_in/3` and
friends. Consult the documentation of `Access` for more details.
"""
@spec add(t, text, integer) :: t
def add(t, word, frequency \\ 1)
def add(%__MODULE__{} = t, word, frequency)
when is_binary(word) and is_freq(frequency) do
add(t, to_charlist(word), frequency)
end
def add(%__MODULE__{} = t, [char | rest_chars], frequency)
when is_freq(frequency) do
child = get_or_create_node(t, char)
child = add(child, rest_chars, frequency)
children = Map.put(t.children, char, child)
%__MODULE__{t | children: children}
end
def add(%__MODULE__{} = t, [], frequency)
when is_freq(frequency) do
%__MODULE__{t | frequency: t.frequency + frequency}
end
@doc ~S"""
Implements the callback `c:Access.fetch/2`.
"""
@spec fetch(t, text) :: {:ok, t} | :error
def fetch(t, key)
def fetch(%__MODULE__{} = t, key)
when is_binary(key) do
fetch(t, to_charlist(key))
end
def fetch(%__MODULE__{} = t, [char | rest_chars]) do
case fetch(t, char) do
{:ok, child} -> fetch(child, rest_chars)
:error -> :error
end
end
def fetch(%__MODULE__{} = t, []), do: {:ok, t}
def fetch(%__MODULE__{} = t, key)
when is_key(key) do
fetch_node(t, key)
end
@doc ~S"""
Implements the callback `c:Access.get/3`.
"""
@spec get(t, text, t | nil) :: t | nil
def get(t, key, default \\ nil)
def get(%__MODULE__{} = t, key, default) do
case fetch(t, key) do
{:ok, val} -> val
:error -> default
end
end
@doc ~S"""
Implements the callback `c:Access.pop/2`.
"""
@spec pop(t, text) :: {nil | t, t}
def pop(%__MODULE__{} = t, key)
when is_binary(key) do
pop(t, to_charlist(key))
end
def pop(%__MODULE__{} = t, [char | rest_chars])
when is_key(char) and length(rest_chars) > 0 do
{popped_trie, modified_trie} = pop(get_node(t, char), rest_chars)
t = %__MODULE__{t | children: Map.put(t.children, char, modified_trie)}
{popped_trie, t}
end
def pop(%__MODULE__{} = t, [char | rest_chars])
when is_key(char) and length(rest_chars) == 0 do
{popped_trie, modified_children} = Map.pop(t.children, char)
t = %__MODULE__{t | children: modified_children}
{popped_trie, t}
end
def pop(%__MODULE__{} = _t, ""), do: {nil, %{}}
def pop(%__MODULE__{} = _t, []), do: {nil, %{}}
@doc ~S"""
Implements the callback `c:Access.get_and_update/3`.
"""
@spec get_and_update(t, text, (key -> {t, t} | :pop)) :: {t, t}
def get_and_update(t, key, fun)
def get_and_update(%__MODULE__{} = t, key, fun)
when is_binary(key) and is_function(fun, 1) do
get_and_update(t, to_charlist(key), fun)
end
def get_and_update(%__MODULE__{} = t, key, fun)
when is_list(key) and is_function(fun, 1) do
case fun.(key) do
{old_val, %__MODULE__{} = new_val} ->
get_and_update_without_pop(t, key, old_val, new_val)
:pop ->
pop(t, key)
end
end
@spec get_and_update_without_pop(t, charlist, t | nil, t) :: {t, t}
defp get_and_update_without_pop(
%__MODULE__{} = t,
[char],
old_val,
%__MODULE__{} = new_val
)
when is_key(char) do
modified_trie = %__MODULE__{t | children: Map.put(t.children, char, new_val)}
{old_val, modified_trie}
end
defp get_and_update_without_pop(
%__MODULE__{} = t,
[char | rest_chars],
old_val,
%__MODULE__{} = new_val
)
when is_key(char) do
{_, modified_child} =
get_and_update_without_pop(Map.get(t.children, char), rest_chars, old_val, new_val)
modified_trie = %__MODULE__{t | children: Map.put(t.children, char, modified_child)}
{old_val, modified_trie}
end
@spec get_words(t, charlist, [binary]) :: [binary]
defp get_words(%__MODULE__{} = t, word, words) do
{next_word, next_words} =
case t.frequency do
f when f > 0 ->
new_word =
[t.key | word]
|> Enum.reverse()
|> to_string()
{[t.key | word], [new_word | words]}
_ ->
if is_nil(t.key) do
{word, words}
else
{[t.key | word], words}
end
end
Enum.reduce(t.children, next_words, fn {_key, trie}, acc ->
get_words(trie, next_word, acc)
end)
end
@spec words(t) :: [binary]
@doc ~S"""
Returns a list of all words.
"""
def words(%__MODULE__{} = t) do
get_words(t, [], [])
end
@spec word_count_by_frequency(non_neg_integer) :: 0 | 1
defp word_count_by_frequency(freq) when freq > 0, do: 1
defp word_count_by_frequency(_), do: 0
@spec word_count(t) :: non_neg_integer
@doc ~S"""
Returns the count of all words (`Trie` nodes that have a non-zero frequency).
"""
def word_count(%__MODULE__{} = t) do
Enum.reduce(
t.children,
word_count_by_frequency(t.frequency),
fn {_key, child}, total ->
total + word_count(child)
end
)
end
@spec search(t, text) :: [binary]
@doc ~S"""
Searches for a prefix and returns a list of word matches.
"""
def search(%__MODULE__{} = t, prefix) when is_binary(prefix) do
cut_prefix = String.slice(prefix, 0..-2)
sub_trie = get(t, prefix) || %__MODULE__{}
sub_trie
|> words()
|> Enum.map(&(cut_prefix <> &1))
end
def search(%__MODULE__{} = t, prefix) when is_list(prefix) do
search(t, to_string(prefix))
end
end
|
lib/trie.ex
| 0.933195 | 0.892234 |
trie.ex
|
starcoder
|
defmodule Ecto.Model.OptimisticLock do
@moduledoc """
Facilities for using the optimistic-locking technique.
[Optimistic
locking](http://en.wikipedia.org/wiki/Optimistic_concurrency_control) (or
*optimistic concurrency control*) is a technique that allows concurrent edits
on a single record. While pessimistic locking works by locking a resource for
an entire transaction, optimistic locking only checks if the resource changed
before updating it.
This is done by regularly fetching the record from the database, then checking
whether another process has made changes to the record *only when updating the
record*. This behaviour is ideal in situations where the chances of concurrent
updates to the same record are low; if they're not, pessimistic locking or
other concurrency patterns may be more suited.
## Usage
Optimistic locking works by keeping a "version" counter for each record; this
counter gets incremented each time a modification is made to a record. Hence,
in order to use optimistic locking, a column must be added to a given model's
table and a field must be added to that model's schema.
## Examples
Assuming we have a `Post` model (stored in the `posts` table), the first step
is to add a version column to the `posts` table:
alter table(:posts) do
add :lock_version, :integer, default: 1
end
The column name is arbitrary and doesn't need to be `:lock_version`. However,
it **needs to be an integer**.
Now a field must be added to the schema and the `optimistic_lock/1` macro has
to be used in order to specify which column in the schema will be used as
the "version" column.
defmodule Post do
use Ecto.Model
schema "posts" do
field :title, :string
field :lock_version, :integer, default: 1
end
optimistic_lock :lock_version
end
Note that the `optimistic_lock/1` macro is defined in this module, which is
imported when `Ecto.Model` is used. To use the `optimistic_lock/1` macro
without using `Ecto.Model`, just use `Ecto.Model.OptimisticLock` but be sure
to use `Ecto.Model.Callbacks` as well since it's used by
`Ecto.Model.OptimisticLock` under the hood.
When a conflict happens (a record which has been previously fetched is being
updated, but that same record has been modified since it was fetched), an
`Ecto.StaleModelError` exception is raised.
iex> post = Repo.insert!(%Post{title: "foo"})
%Post{id: 1, title: "foo", lock_version: 1}
iex> valid_change = cast(%{title: "bar"}, post, ~w(title), ~w())
iex> stale_change = cast(%{title: "baz"}, post, ~w(title), ~w())
iex> Repo.update!(valid_change)
%Post{id: 1, title: "bar", lock_version: 2}
iex> Repo.update!(stale_change)
** (Ecto.StaleModelError) attempted to update a stale model:
%Post{id: 1, title: "baz", lock_version: 1}
Optimistic locking also works with delete operations: when trying to delete a
stale model, an `Ecto.StaleModelError` exception is raised as well.
"""
import Ecto.Changeset
@doc false
defmacro __using__(_) do
quote do
import Ecto.Model.OptimisticLock
end
end
@doc """
Specifies a field to use with optimistic locking.
This macro specifies a `field` that will be used to implement the
optimistic-locking technique described in the docs for this module.
`optimistic_lock/1` can be used multiple times per model.
## Examples
defmodule Note do
use Ecto.Model
schema "notes" do
add :title, :string
add :body, :text
add :optlock, :integer, default: 1
end
optimistic_lock :optlock
end
"""
defmacro optimistic_lock(field) do
quote bind_quoted: [field: field] do
before_update Ecto.Model.OptimisticLock, :__lock__, [field]
before_delete Ecto.Model.OptimisticLock, :__lock__, [field]
end
end
@doc false
def __lock__(%Ecto.Changeset{model: model} = changeset, field) do
current = Map.fetch!(model, field)
update_in(changeset.filters, &Map.put(&1, field, current))
|> force_change(field, current + 1)
end
end
|
lib/ecto/model/optimistic_lock.ex
| 0.911913 | 0.597314 |
optimistic_lock.ex
|
starcoder
|
defmodule Griffin.Model.Adapters do
@moduledoc """
Module for storing model database adapters, which are module with CRUDL APIs
for persisting model operations to a database.
"""
defmodule Memory do
@moduledoc """
Module for storing model data in an in-memory map. Useful for development
but not production-worthy.
"""
@doc """
Starts the agent used to persist the in-memory database.
"""
def init do
Agent.start_link(fn -> %{} end, name: __MODULE__)
end
@doc """
A model resolver to persist a CRUDL operation into a database statement that
updates the response.
"""
def to_db_statement(%{errs: errs} = ctx, _) when length(errs) > 0, do: ctx
def to_db_statement(%{op: op} = ctx) do
{_, col} = Griffin.Model.Module.namespaces(ctx._model)
res =
case op do
:create -> create(col, ctx.args)
:read -> read(col, ctx.args)
:update -> update(col, ctx.args)
:delete -> delete(col, ctx.args)
:list -> list(col, ctx.args)
end
%{ctx | res: res}
end
def empty do
Agent.update(__MODULE__, fn _ -> %{} end)
end
def create(col, doc) do
Agent.update(__MODULE__, fn map ->
old_col = Map.get(map, col)
doc =
case {is_nil(old_col), is_nil(doc[:id])} do
{true, true} -> Map.put(doc, :id, 0)
{false, true} -> Map.put(doc, :id, length(old_col))
{_, false} -> doc
end
new_col = if old_col, do: map[col] ++ [doc], else: [doc]
Map.put(map, col, new_col)
end)
Agent.get(__MODULE__, &Map.get(&1, col)) |> List.last()
end
def read(col, args) do
List.first(list(col, args))
end
def update(_, _) do
raise "Update not implemented"
end
def delete(_, _) do
raise "Update not implemented"
end
def list(col, args) do
docs = Agent.get(__MODULE__, &Map.get(&1, col))
docs =
docs ||
[]
|> Enum.filter(fn doc ->
subset = Map.take(doc, Map.keys(args))
subset == args
end)
|> List.flatten()
docs
end
end
end
|
lib/griffin/model/adapters.ex
| 0.715821 | 0.497437 |
adapters.ex
|
starcoder
|
defmodule Botfuel.Entity do
defstruct [sentence: "",
dimensions: [],
antidimensions: [],
timezone: "",
case_sensitive: false,
keep_quotes: false,
keep_accents: false
]
@type t :: %__MODULE__{sentence: String.t, dimensions: [dimension()], antidimensions: [dimension()],
timezone: String.t, case_sensitive: boolean(), keep_quotes: boolean(),
keep_accents: boolean()
}
@type dimension :: :street_number | :street_type | :postal | :city | :country | :address | :language
| :nationality | :email | :hashtag | :number | :ordinal | :time | :duration
| :distance | :area | :volume | :temperature | :forename | :family | :percentage
| :url | :item_count | :currency | :money | :color
defp dimensions, do: MapSet.new [:street_number, :street_type, :postal, :city, :country, :address, :language,
:nationality, :email, :hashtag, :number, :ordinal, :time, :duration,
:distance, :area, :volume, :temperature, :forename, :family, :percentage,
:url, :item_count, :currency, :money, :color]
@doc """
Check the provided parameters and field against the set of all possible dimensions.
It returns a *purified* `%Botfuel.Entity{}` from bad user input.
"""
@spec purify(Botfuel.Entity.t, atom()) :: map()
def purify(params, field) do
{values, map} = Map.pop(params, field)
f_values = dimensions() |> MapSet.intersection(MapSet.new(values)) |> MapSet.to_list
struct(Botfuel.Entity, Map.put(map, field, f_values))
end
defmodule Response do
defstruct [dim: "",
body: "",
values: [],
start: 0,
end: 0
]
@type t :: %__MODULE__{dim: String.t, body: String.t, values: [String.t],
start: non_neg_integer(), end: non_neg_integer()
}
end
end
|
lib/botfuel/entity.ex
| 0.690246 | 0.618723 |
entity.ex
|
starcoder
|
defmodule Ada.Source.LastFm.Track do
@moduledoc false
defstruct [:artist, :album, :name, :listened_at]
@type t :: %__MODULE__{
artist: String.t(),
album: String.t(),
name: String.t(),
listened_at: :now_playing | DateTime.t()
}
@doc """
Finds the currently playing track (if any).
"""
@spec now_playing([t]) :: {:now_playing, t} | :not_playing
def now_playing(tracks) do
case Enum.find(tracks, fn t -> t.listened_at == :now_playing end) do
nil -> :not_playing
track -> {:now_playing, track}
end
end
@doc """
Returns the artist with the highest number of tracks in the collection.
"""
@spec most_listened_artist([t]) :: nil | String.t()
def most_listened_artist([]), do: nil
def most_listened_artist(tracks) do
{artist, _tracks} =
tracks
|> Enum.group_by(fn track -> track.artist end)
|> Enum.max_by(fn {_artist, artist_tracks} -> Enum.count(artist_tracks) end)
artist
end
@doc """
Group tracks by the hour.
If there's a playing track, it's grouped in the hour corresponding to the
local now datetime passed with the function.
"""
@spec group_by_hour([t], Calendar.time_zone(), DateTime.t()) :: [{DateTime.t(), [t]}]
def group_by_hour(tracks, timezone, local_now) do
tracks
|> Enum.group_by(fn track ->
case track.listened_at do
:now_playing ->
%{local_now | minute: 0, second: 0, microsecond: {0, 0}}
datetime ->
local_datetime = Calendar.DateTime.shift_zone!(datetime, timezone)
%{local_datetime | minute: 0, second: 0, microsecond: {0, 0}}
end
end)
|> Enum.sort(fn {hour1, _tracks1}, {hour2, _tracks2} ->
datetime_asc_compare(hour1, hour2)
end)
end
@doc """
Count tracks by the hour.
If there's a playing track, it's grouped in the hour corresponding to the
local now datetime passed with the function.
"""
@spec count_by_hour([t], Calendar.time_zone(), DateTime.t()) :: [{DateTime.t(), pos_integer()}]
def count_by_hour(tracks, timezone, local_now) do
tracks
|> group_by_hour(timezone, local_now)
|> Enum.map(fn {hour, hour_tracks} ->
{hour, Enum.count(hour_tracks)}
end)
end
defp datetime_asc_compare(dt1, dt2) do
case DateTime.compare(dt1, dt2) do
:lt -> true
:eq -> true
:gt -> false
end
end
end
|
lib/ada/source/last_fm/track.ex
| 0.838051 | 0.476458 |
track.ex
|
starcoder
|
defmodule Nostrum.Cache.ChannelCache do
@default_cache_implementation Nostrum.Cache.ChannelCache.ETS
@moduledoc """
Cache behaviour & dispatcher for channels.
You can call the functions provided by this module independent of which cache
is configured, and it will dispatch to the configured cache implementation.
The user-facing functions for reading the cache can be found in the "Reading
the cache" section.
By default, #{@default_cache_implementation} will be used for caching channels.
You can override this in the `:caches` option of the `:nostrum` application
by setting the `:channels` field to a different module implementing the
`Nostrum.Cache.ChannelCache` behaviour. Any module below
`Nostrum.Cache.ChannelCache` can be used as a cache.
## Writing your own channel cache
As with the other caches, the channel cache API consists of two parts:
- The functions that the user calls, currently only `get/1` and `get!/1`
- The functions that nostrum calls, such as `c:create/1` or `c:update/1`.
These **do not create any objects in the Discord API**, they are purely
created to update the cached data from data that Discord sends us. If you
want to create objects on Discord, use the functions exposed by `Nostrum.Api`
instead.
You need to implement both of them for nostrum to work with your custom
cache. **You also need to implement `Supervisor` callbacks**, which will
start your cache as a child under `Nostrum.Cache.CacheSupervisor`: As an
example, the `Nostrum.Cache.ChannelCache.ETS` implementation uses this to to
set up its ETS table it uses for caching. See the callbacks section for every
nostrum-related callback you need to implement.
The "upstream data" wording in this module references the fact that the
data that the channel cache (and other caches) retrieves represents the raw
data we receive from the upstream connection, no attempt is made by nostrum
to sanitize the data before it enters the cache. Caching implementations
need to cast the data to the resulting type themselves. A possible future
improvement would be moving the data casting into this module before the
backing cache implementation is called.
"""
alias Nostrum.Struct.Channel
@configured_cache :nostrum
|> Application.compile_env(
[:caches, :channels],
@default_cache_implementation
)
@typedoc "Specifies the reason for why a lookup operation has failed."
@type reason :: :channel_not_found
## Supervisor callbacks
# These set up the backing cache.
@doc false
defdelegate init(init_arg), to: @configured_cache
@doc false
defdelegate start_link(init_arg), to: @configured_cache
@doc false
defdelegate child_spec(opts), to: @configured_cache
## Behaviour specification
@doc ~S"""
Retrieves a channel from the cache.
Internally, the ChannelCache process only stores
`t:Nostrum.Struct.Channel.dm_channel/0` references. To get channel
information, a call is made to a `Nostrum.Cache.GuildCache`.
If successful, returns `{:ok, channel}`. Otherwise, returns `{:error, reason}`
## Example
```elixir
case Nostrum.Cache.ChannelCache.get(133333333337) do
{:ok, channel} ->
"We found " <> channel.name
{:error, _reason} ->
"Donde esta"
end
```
"""
@doc section: :reading
@callback get(Channel.id() | Nostrum.Struct.Message.t()) ::
{:error, reason} | {:ok, Channel.t()} | {:error, reason}
@doc """
Same as `get/1`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@doc section: :reading
@callback get!(Channel.id() | Nostrum.Struct.Message.t()) :: no_return | Channel.t()
# Functions called from nostrum.
@doc "Create a channel in the cache."
@callback create(map) :: Channel.t()
@doc """
Update a channel from upstream data.
Return the original channel before the update, and the updated channel.
"""
@callback update(Channel.t()) :: :noop | {Channel.t(), Channel.t()}
@doc """
Delete a channel from the cache.
Return the old channel if it was cached, or `nil` otherwise.
"""
@callback delete(Channel.id()) :: :noop | Channel.t()
@doc """
Lookup a channel from the cache by ID.
Return channel_not_found if not found.
"""
@callback lookup(Channel.id()) :: {:error, reason} | {:ok, map}
# Dispatching logic
defdelegate get(channel_id), to: @configured_cache
defdelegate get!(channel_id), to: @configured_cache
defdelegate create(map), to: @configured_cache
defdelegate update(channel), to: @configured_cache
defdelegate delete(channel_id), to: @configured_cache
defdelegate lookup(channel_id), to: @configured_cache
end
|
lib/nostrum/cache/channel_cache.ex
| 0.875608 | 0.770594 |
channel_cache.ex
|
starcoder
|
defmodule ExAudit.Type.PatchMap do
use Ecto.Type
def type, do: :map
def cast(a), do: {:ok, a}
def dump(patch) do
{:ok, encode(patch)}
end
def load(binary) do
{:ok, decode(binary)}
end
defp encode({:added, value}), do: %{"a" => "add", "v" => value}
defp encode({:removed, value}), do: %{"a" => "del", "v" => value}
defp encode({:primitive_change, old_value, new_value}),
do: %{
"a" => "upd",
"ov" => old_value,
"v" => new_value
}
defp encode({:changed, changes}), do: %{"a" => "upd", "v" => encode(changes)}
defp encode({:added_to_list, index, value}),
do: %{
"a" => "add",
"idx" => index,
"v" => value
}
defp encode({:removed_from_list, index, value}),
do: %{
"a" => "del",
"idx" => index,
"v" => value
}
defp encode({:changed_in_list, index, changes}),
do: %{
"a" => "upd",
"index" => index,
"v" => encode(changes)
}
defp encode(changes) when is_struct(changes) do
changes
end
defp encode(changes) when is_map(changes) do
Enum.reduce(changes, %{}, fn {key, value}, acc ->
Map.put(acc, key, encode(value))
end)
end
defp encode(changes) when is_tuple(changes) do
changes
|> Tuple.to_list()
|> Enum.map(&encode/1)
end
defp encode(changes) when is_list(changes) do
changes
|> Enum.map(&encode/1)
end
defp encode(value), do: value
defp decode(%{"a" => "add", "idx" => index, "v" => value}) do
{:added_to_list, index, value}
end
defp decode(%{"a" => "del", "idx" => index, "v" => value}) do
{:remove_from_list, index, value}
end
defp decode(%{"a" => "upd", "idx" => index, "v" => value}) do
{:changed_in_list, index, decode(value)}
end
defp decode(%{"a" => "add", "v" => value}) do
{:added, value}
end
defp decode(%{"a" => "del", "v" => value}) do
{:removed, value}
end
defp decode(%{"a" => "upd", "v" => new_value, "ov" => old_value}) do
{:primitive_change, old_value, new_value}
end
defp decode(%{"a" => "upd", "v" => value}) do
{:changed, decode(value)}
end
defp decode(value) when is_map(value) do
Enum.reduce(value, %{}, fn {key, value}, acc ->
Map.put(acc, String.to_atom(key), decode(value))
end)
end
defp decode(value) when is_list(value) do
first_value = List.first(value)
if is_binary(first_value) do
key = List.first(value) |> String.to_atom()
value = List.replace_at(value, 0, key)
value
|> Enum.map(&decode/1)
|> List.to_tuple()
else
value
|> Enum.map(&decode/1)
|> List.to_tuple()
end
end
defp decode(value), do: value
end
|
lib/repo/types/patch_map_type.ex
| 0.636466 | 0.430806 |
patch_map_type.ex
|
starcoder
|
defmodule JsonWebToken.Jwt do
@moduledoc """
Encode claims for transmission as a JSON object that is used as the payload of a JSON Web
Signature (JWS) structure, enabling the claims to be integrity protected with a Message
Authentication Code (MAC), to be later verified
see http://tools.ietf.org/html/rfc7519
"""
alias JsonWebToken.Jws
@algorithm_default "HS256"
@header_default %{typ: "JWT"}
# JOSE header types from: https://tools.ietf.org/html/rfc7515
@header_jose_keys [:alg, :jku, :jwk, :kid, :x5u, :x5c, :x5t, :"x5t#S256", :typ, :cty, :crit]
@doc """
Return a JSON Web Token (JWT), a string representing a set of claims as a JSON object that is
encoded in a JWS
## Example
iex> claims = %{iss: "joe", exp: 1300819380, "http://example.com/is_root": true}
...> key = "<KEY>"
...> JsonWebToken.Jwt.sign(claims, %{key: key})
"<KEY>"
see http://tools.ietf.org/html/rfc7519#section-7.1
"""
def sign(claims, options) do
header = config_header(options)
payload = claims_to_json(claims)
jws_message(header, payload, options[:key], header[:alg])
end
@doc """
Given an options map, return a map of header options
## Example
iex> JsonWebToken.Jwt.config_header(alg: "RS256", key: "key")
%{typ: "JWT", alg: "RS256"}
Filters out unsupported claims options and ignores any encryption keys
"""
def config_header(options) when is_map(options) do
{jose_registered_headers, _other_headers} = Map.split(options, @header_jose_keys)
@header_default
|> Map.merge(jose_registered_headers)
|> Map.merge(%{alg: algorithm(options)})
end
def config_header(options) when is_list(options) do
options |> Map.new |> config_header
end
defp algorithm(options) do
alg = options[:alg] || @algorithm_default
alg_or_default(alg, alg == "")
end
defp alg_or_default(_, true), do: @algorithm_default
defp alg_or_default(alg, _), do: alg
defp claims_to_json(nil), do: raise "Claims nil"
defp claims_to_json(""), do: raise "Claims blank"
defp claims_to_json(claims) do
claims
|> Poison.encode
|> claims_json
end
defp claims_json({:ok, json}), do: json
defp claims_json({:error, _}), do: raise "Failed to encode claims as JSON"
defp jws_message(header, payload, _, "none"), do: Jws.unsecured_message(header, payload)
defp jws_message(header, payload, key, _), do: Jws.sign(header, payload, key)
@doc """
Return a tuple {ok: claims (map)} if the signature is verified, or {:error, "invalid"} otherwise
## Example
iex> jwt = "<KEY>"
...> key = "<KEY>"
...> JsonWebToken.Jwt.verify(jwt, %{key: key})
{:ok, %{iss: "joe", exp: 1300819380, "http://example.com/is_root": true}}
see http://tools.ietf.org/html/rfc7519#section-7.2
"""
def verify(jwt, options) do
payload(Jws.verify jwt, algorithm(options), options[:key])
end
defp payload({:error, "invalid"}), do: {:error, "invalid"}
defp payload({:ok, jws}), do: {:ok, jws_payload(jws)}
defp jws_payload(jws) do
[_, encoded_payload, _] = String.split(jws, ".")
payload_to_map(encoded_payload)
end
defp payload_to_map(encoded_payload) do
encoded_payload
|> Base.url_decode64!(padding: false)
|> Poison.decode(keys: :atoms)
|> claims_map
end
defp claims_map({:ok, map}), do: map
defp claims_map({:error, _}), do: raise "Failed to decode claims from JSON"
end
|
lib/json_web_token/jwt.ex
| 0.876931 | 0.466906 |
jwt.ex
|
starcoder
|
defmodule AWS.Batch do
@moduledoc """
AWS Batch enables you to run batch computing workloads on the AWS Cloud.
Batch computing is a common way for developers, scientists, and engineers
to access large amounts of compute resources, and AWS Batch removes the
undifferentiated heavy lifting of configuring and managing the required
infrastructure. AWS Batch will be familiar to users of traditional batch
computing software. This service can efficiently provision resources in
response to jobs submitted in order to eliminate capacity constraints,
reduce compute costs, and deliver results quickly.
As a fully managed service, AWS Batch enables developers, scientists, and
engineers to run batch computing workloads of any scale. AWS Batch
automatically provisions compute resources and optimizes the workload
distribution based on the quantity and scale of the workloads. With AWS
Batch, there is no need to install or manage batch computing software,
which allows you to focus on analyzing results and solving problems. AWS
Batch reduces operational complexities, saves time, and reduces costs,
which makes it easy for developers, scientists, and engineers to run their
batch jobs in the AWS Cloud.
"""
@doc """
Cancels a job in an AWS Batch job queue. Jobs that are in the `SUBMITTED`,
`PENDING`, or `RUNNABLE` state are cancelled. Jobs that have progressed to
`STARTING` or `RUNNING` are not cancelled (but the API operation still
succeeds, even if no job is cancelled); these jobs must be terminated with
the `TerminateJob` operation.
"""
def cancel_job(client, input, options \\ []) do
path_ = "/v1/canceljob"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates an AWS Batch compute environment. You can create `MANAGED` or
`UNMANAGED` compute environments.
In a managed compute environment, AWS Batch manages the capacity and
instance types of the compute resources within the environment. This is
based on the compute resource specification that you define or the [launch
template](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)
that you specify when you create the compute environment. You can choose to
use Amazon EC2 On-Demand Instances or Spot Instances in your managed
compute environment. You can optionally set a maximum price so that Spot
Instances only launch when the Spot Instance price is below a specified
percentage of the On-Demand price.
<note> Multi-node parallel jobs are not supported on Spot Instances.
</note> In an unmanaged compute environment, you can manage your own
compute resources. This provides more compute resource configuration
options, such as using a custom AMI, but you must ensure that your AMI
meets the Amazon ECS container instance AMI specification. For more
information, see [Container Instance
AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container_instance_AMIs.html)
in the *Amazon Elastic Container Service Developer Guide*. After you have
created your unmanaged compute environment, you can use the
`DescribeComputeEnvironments` operation to find the Amazon ECS cluster that
is associated with it. Then, manually launch your container instances into
that Amazon ECS cluster. For more information, see [Launching an Amazon ECS
Container
Instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html)
in the *Amazon Elastic Container Service Developer Guide*.
<note> AWS Batch does not upgrade the AMIs in a compute environment after
it is created (for example, when a newer version of the Amazon
ECS-optimized AMI is available). You are responsible for the management of
the guest operating system (including updates and security patches) and any
additional application software or utilities that you install on the
compute resources. To use a new AMI for your AWS Batch jobs:
<ol> <li> Create a new compute environment with the new AMI.
</li> <li> Add the compute environment to an existing job queue.
</li> <li> Remove the old compute environment from your job queue.
</li> <li> Delete the old compute environment.
</li> </ol> </note>
"""
def create_compute_environment(client, input, options \\ []) do
path_ = "/v1/createcomputeenvironment"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates an AWS Batch job queue. When you create a job queue, you associate
one or more compute environments to the queue and assign an order of
preference for the compute environments.
You also set a priority to the job queue that determines the order in which
the AWS Batch scheduler places jobs onto its associated compute
environments. For example, if a compute environment is associated with more
than one job queue, the job queue with a higher priority is given
preference for scheduling jobs to that compute environment.
"""
def create_job_queue(client, input, options \\ []) do
path_ = "/v1/createjobqueue"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes an AWS Batch compute environment.
Before you can delete a compute environment, you must set its state to
`DISABLED` with the `UpdateComputeEnvironment` API operation and
disassociate it from any job queues with the `UpdateJobQueue` API
operation.
"""
def delete_compute_environment(client, input, options \\ []) do
path_ = "/v1/deletecomputeenvironment"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the specified job queue. You must first disable submissions for a
queue with the `UpdateJobQueue` operation. All jobs in the queue are
terminated when you delete a job queue.
It is not necessary to disassociate compute environments from a queue
before submitting a `DeleteJobQueue` request.
"""
def delete_job_queue(client, input, options \\ []) do
path_ = "/v1/deletejobqueue"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deregisters an AWS Batch job definition. Job definitions will be
permanently deleted after 180 days.
"""
def deregister_job_definition(client, input, options \\ []) do
path_ = "/v1/deregisterjobdefinition"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes one or more of your compute environments.
If you are using an unmanaged compute environment, you can use the
`DescribeComputeEnvironment` operation to determine the `ecsClusterArn`
that you should launch your Amazon ECS container instances into.
"""
def describe_compute_environments(client, input, options \\ []) do
path_ = "/v1/describecomputeenvironments"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes a list of job definitions. You can specify a `status` (such as
`ACTIVE`) to only return job definitions that match that status.
"""
def describe_job_definitions(client, input, options \\ []) do
path_ = "/v1/describejobdefinitions"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes one or more of your job queues.
"""
def describe_job_queues(client, input, options \\ []) do
path_ = "/v1/describejobqueues"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes a list of AWS Batch jobs.
"""
def describe_jobs(client, input, options \\ []) do
path_ = "/v1/describejobs"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Returns a list of AWS Batch jobs.
You must specify only one of the following:
<ul> <li> a job queue ID to return a list of jobs in that job queue
</li> <li> a multi-node parallel job ID to return a list of that job's
nodes
</li> <li> an array job ID to return a list of that job's children
</li> </ul> You can filter the results by job status with the `jobStatus`
parameter. If you do not specify a status, only `RUNNING` jobs are
returned.
"""
def list_jobs(client, input, options \\ []) do
path_ = "/v1/listjobs"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
List the tags for an AWS Batch resource. AWS Batch resources that support
tags are compute environments, jobs, job definitions, and job queues. ARNs
for child jobs of array and multi-node parallel (MNP) jobs are not
supported.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/v1/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Registers an AWS Batch job definition.
"""
def register_job_definition(client, input, options \\ []) do
path_ = "/v1/registerjobdefinition"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Submits an AWS Batch job from a job definition. Parameters specified during
`SubmitJob` override parameters defined in the job definition.
"""
def submit_job(client, input, options \\ []) do
path_ = "/v1/submitjob"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Associates the specified tags to a resource with the specified
`resourceArn`. If existing tags on a resource are not specified in the
request parameters, they are not changed. When a resource is deleted, the
tags associated with that resource are deleted as well. AWS Batch resources
that support tags are compute environments, jobs, job definitions, and job
queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are
not supported.
"""
def tag_resource(client, resource_arn, input, options \\ []) do
path_ = "/v1/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Terminates a job in a job queue. Jobs that are in the `STARTING` or
`RUNNING` state are terminated, which causes them to transition to
`FAILED`. Jobs that have not progressed to the `STARTING` state are
cancelled.
"""
def terminate_job(client, input, options \\ []) do
path_ = "/v1/terminatejob"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes specified tags from an AWS Batch resource.
"""
def untag_resource(client, resource_arn, input, options \\ []) do
path_ = "/v1/tags/#{URI.encode(resource_arn)}"
headers = []
{query_, input} =
[
{"tagKeys", "tagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Updates an AWS Batch compute environment.
"""
def update_compute_environment(client, input, options \\ []) do
path_ = "/v1/updatecomputeenvironment"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates a job queue.
"""
def update_job_queue(client, input, options \\ []) do
path_ = "/v1/updatejobqueue"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "batch"}
host = build_host("batch", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/batch.ex
| 0.879878 | 0.739446 |
batch.ex
|
starcoder
|
# position x-y-z
# size W(x)-H(y)-D(z)
# Objections
# calculate corners
# detect collisions
# rotate 90 degrees (mind boundaries)
defmodule Cuboid do
defstruct position: %{ x: 0, y: 0, z: 0 }, dimensions: %{ w: 0, h: 0, d: 0 }
# cube = Cuboid.create_cuboid(%{x: <int>, y: <int>, z: <int>}, %{ w: <int>, h: <int>, d: <int> })
def new_cuboid(position, dimensions, current_cuboids) do
new = %Cuboid{
position: %{ x: position.x, y: position.y, z: position.z },
dimensions: %{ w: dimensions.w, h: dimensions.h, d: dimensions.d }}
test = new |> vertices |> position_min_max |> intersects?(current_cuboids)
if elem(test, 1) === false, do: new, else: nil
end
def move_to(%Cuboid{} = cuboid, position) do
cuboid
|> Map.replace!(:position, %{ x: position.x, y: position.y, z: position.z })
|> IO.inspect()
# |> modify position
# |> check intersect
# |> return new position
end
def vertices(%Cuboid{} = cuboid) do
{:vertices, [
{ :front_bottom_left, cuboid.position },
{ :front_bottom_right, %{ x: cuboid.position.x + cuboid.dimensions.w, y: cuboid.position.y, z: cuboid.position.z }},
{ :back_bottom_right, %{ x: cuboid.position.x + cuboid.dimensions.w, y: cuboid.position.y, z: cuboid.position.z + cuboid.dimensions.d }},
{ :back_bottom_left, %{ x: cuboid.position.x, y: cuboid.position.y, z: cuboid.position.z + cuboid.dimensions.d }},
{ :front_top_left, %{ x: cuboid.position.x, y: cuboid.position.y + cuboid.dimensions.h, z: cuboid.position.z }},
{ :front_top_right, %{ x: cuboid.position.x + cuboid.dimensions.w, y: cuboid.position.y + cuboid.dimensions.h, z: cuboid.position.z }},
{ :back_top_right, %{ x: cuboid.position.x + cuboid.dimensions.w, y: cuboid.position.y + cuboid.dimensions.h, z: cuboid.position.z + cuboid.dimensions.d }},
{ :back_top_left, %{ x: cuboid.position.x, y: cuboid.position.y + cuboid.dimensions.h, z: cuboid.position.z + cuboid.dimensions.d }}
]}
end
def position_min_max(list, acc \\ nil)
def position_min_max({:vertices, []}, acc), do: acc
def position_min_max({:vertices, [head|tail] }, nil)do
position_min_max({:vertices, tail}, %{
x: {elem(head,1).x, elem(head,1).x},
y: {elem(head,1).y, elem(head,1).y},
z: {elem(head,1).z, elem(head,1).z}})
end
def position_min_max({:vertices, [head|tail] }, acc) do
position_min_max({:vertices, tail},
%{ x: { (if elem(head,1).x < elem(acc.x,0), do: elem(head,1).x, else: elem(acc.x,0)),
(if elem(head,1).x > elem(acc.x,1), do: elem(head,1).x, else: elem(acc.x,1)) },
y: { (if elem(head,1).y < elem(acc.y,0), do: elem(head,1).y, else: elem(acc.y,0)),
(if elem(head,1).y > elem(acc.y,1), do: elem(head,1).y, else: elem(acc.y,1)) },
z: { (if elem(head,1).z < elem(acc.z,0), do: elem(head,1).z, else: elem(acc.z,0)),
(if elem(head,1).z > elem(acc.z,1), do: elem(head,1).z, else: elem(acc.z,1)) }})
end
def intersects?(cuboid_a, []), do: { :ok, false }
def intersects?(cuboid_a, cuboid_b) do
if (elem(cuboid_a.x, 0) < elem(cuboid_b.x, 1) && elem(cuboid_a.x, 1) > elem(cuboid_b.x, 0)) &&
(elem(cuboid_a.y, 0) < elem(cuboid_b.y, 1) && elem(cuboid_a.y, 1) > elem(cuboid_b.y, 0)) &&
(elem(cuboid_a.z, 0) < elem(cuboid_b.z, 1) && elem(cuboid_a.z, 1) > elem(cuboid_b.z, 0)) do
{:ok, false}
else
{:error, true}
end
end
end
|
lib/cuboid.ex
| 0.505127 | 0.593786 |
cuboid.ex
|
starcoder
|
defmodule DivoPulsar do
@moduledoc """
Defines a pulsar broker in 'standalone' mode
as a map compatible with divo for building a
docker-compose file.
"""
@behaviour Divo.Stack
@doc """
Implements the Divo Stack behaviour to take a
keyword list of defined variables specific to
the DivoPulsar stack and returns a map describing
the service definition of Pulsar.
### Optional Configuration
* `port`: The port on which the management API will be exposed to the host for making REST calls (for creating partitioned topics, posting schema, etc). The default port Pulsar uses for its REST API is 8080, which is commonly used by other services for web and REST accessibility and may be more likely to require an override if you are testing additional services alongside Pulsar simultaneously. This only effects the port exposed to the host; internally to the containerized service the API is listening on port 8080.
* `ui_port`: The port on which the Pulsar dashboard will be exposed to the host. Configuring the UI port also enables the pulsar dashboard as part of the stack; this service is not enabled unless a port is specified. This only effects the port exposed to the host; internally to the containerized service the API is listening on port 80
* `version`: The version of the Pulsar container image to create. Defaults to `latest`.
"""
@impl Divo.Stack
@spec gen_stack([tuple()]) :: map()
def gen_stack(envars) do
image_version = Keyword.get(envars, :version, "latest")
api_port = Keyword.get(envars, :port, 8080)
ui_port = Keyword.get(envars, :ui_port)
pulsar_ports = ["6650:6650", exposed_ports(api_port, 8080)]
pulsar_service = %{
pulsar: %{
image: "apachepulsar/pulsar:#{image_version}",
ports: pulsar_ports,
command: ["bin/pulsar", "standalone"],
healthcheck: %{
test: [
"CMD-SHELL",
"curl -I http://localhost:8080/admin/v2/namespaces/public/default | grep '200' || exit 1"
],
interval: "5s",
timeout: "10s",
retries: 3
}
}
}
case ui_port == nil do
true ->
pulsar_service
false ->
dashboard_port = [exposed_ports(ui_port, 80)]
Map.merge(pulsar_service, %{
dashboard: %{
image: "apachepulsar/pulsar-dashboard:latest",
depends_on: ["pulsar"],
ports: dashboard_port,
environment: ["SERVICE_URL=http://pulsar:8080"]
}
})
end
end
defp exposed_ports(external_port, internal_port) do
to_string(external_port) <> ":" <> to_string(internal_port)
end
end
|
lib/divo_pulsar.ex
| 0.749087 | 0.519704 |
divo_pulsar.ex
|
starcoder
|
defmodule Librecov.BadgeCreator do
require EEx
# values and SVG are taken from https://github.com/badges/shields
@base_width 89
@extra_width 7
@template_path Path.join(__DIR__, "templates/badge_template.eex")
@authorized_formats ~w(png jpg svg)
EEx.function_from_file(:defp, :template, @template_path, [
:coverage_str,
:width,
:extra_width,
:bg_color
])
def make_badge(coverage, options \\ []) do
{coverage, coverage_str, digits_num} =
if is_nil(coverage) do
{nil, "NA", 2}
else
coverage = round(coverage)
{coverage, "#{coverage}%", coverage |> Integer.to_string() |> String.length()}
end
extra_width = (digits_num - 1) * @extra_width
width = @base_width + extra_width
color = badge_color(coverage)
template(coverage_str, width, extra_width, color) |> get_image(options[:format])
end
defp get_image(svg, nil),
do: get_image(svg, "png")
defp get_image(svg, format) when format in @authorized_formats,
do: get_image(svg, String.to_atom(format))
defp get_image(svg, :svg),
do: {:ok, :svg, svg}
defp get_image(svg, format) when is_atom(format),
do: transform(svg, format)
def transform(svg, format) do
dir = Temp.mkdir!("librecov")
{svg_path, output_path} =
{Path.join(dir, "coverage.svg"), Path.join(dir, "coverage.#{format}")}
File.write!(svg_path, svg)
case make_output(svg_path, output_path) do
{:ok, output} ->
File.rm_rf!(dir)
{:ok, format, output}
e ->
e
end
end
defp make_output(svg_path, output_path) do
try do
Librecov.ImageMagick.convert([svg_path, output_path])
File.read(output_path)
rescue
ErlangError -> {:error, "failed to run convert"}
end
end
defp badge_color(coverage) do
color =
cond do
is_nil(coverage) -> "lightgrey"
coverage == 0 -> "red"
coverage < 80 -> "yellow"
coverage < 90 -> "yellowgreen"
coverage < 100 -> "green"
true -> "brightgreen"
end
hex_color(color)
end
defp hex_color("red"), do: "#e05d44"
defp hex_color("yellow"), do: "#dfb317"
defp hex_color("yellowgreen"), do: "#a4a61d"
defp hex_color("green"), do: "#97CA00"
defp hex_color("brightgreen"), do: "#4c1"
defp hex_color("lightgrey"), do: "#9f9f9f"
end
|
lib/opencov/badge_creator.ex
| 0.533397 | 0.444444 |
badge_creator.ex
|
starcoder
|
defmodule Resourceful.Registry do
@doc """
Instances of `Resourceful.Type` are intended to be used in conjunction with a
registry in most circumstances. The application, and even the client, will
likely understand a resource type by its string name/identifier. Types
themselves, when associated with a registry, will be able to reference each
other as well, which forms the basis for relationships.
A module using the `Registry` behaviour becomes a key/value store.
`defresourcefultype/1` allows the types to be evaluated entirely at compile
time.
"""
defmodule DuplicateTypeNameError do
defexception message: "type with name already exists"
end
defmodule InvalidType do
defexception message: "result of block must be a `Resourceful.Type`"
end
defmodule NotRegisteredError do
defexception message: "type is not registered"
end
alias Resourceful.{Error, Type}
alias Resourceful.Type.{GraphedField, Relationship}
defmacro __using__(_) do
quote do
import unquote(__MODULE__), only: [type: 1, type: 2]
import Resourceful.Type,
only: [
id: 2,
max_depth: 2,
max_filters: 2,
max_sorters: 2,
meta: 3,
name: 2,
new: 1,
new: 2
]
import Resourceful.Type.Builders
import Resourceful.Type.Ecto, only: [type_with_schema: 1, type_with_schema: 2]
@before_compile {unquote(__MODULE__), :before_compile}
@rtypes %{}
def fetch(name), do: unquote(__MODULE__).fetch(all(), name)
def fetch!(name), do: unquote(__MODULE__).fetch!(all(), name)
def fetch_field_graph(name), do: unquote(__MODULE__).fetch(field_graphs(), name)
def fetch_field_graph!(name), do: unquote(__MODULE__).fetch!(field_graphs(), name)
def has_type?(name), do: Map.has_key?(all(), name)
end
end
@doc """
Builds a field graph for a `Resourceful.Type`. Since types have a `max_depth`,
all possible graphed fields can be computed and cached at compile time when
using a registry. This allows nested fields to be treated like local fields in
the sense that they are available in a flat map.
For example, a song type would have a `title` field. Once graphed, it would
also have an `album.title` field. If the depth was set to 2, access to a field
like `album.artist.name` would also be available.
This prevents a lot of recursion logic from being applied at every lookup and
by instecting the field graph for a type it's easy to see all of the possible
mappings.
Graphed fields are wrapped in a `Resourceful.Type.GraphedField` struct which
contains relational information about the field in addition to the field data
itself.
See `Resourceful.Type.max_depth/2` for information about what is intended to
be included in a field graph based on the depth setting.
"""
@spec build_field_graph(%{String.t() => %Type{}}, String.t()) :: Type.field_graph()
def build_field_graph(types_map, type_name) do
type = Map.get(types_map, type_name)
do_build_field_graph(%{}, types_map, type, type.max_depth, nil, nil, [])
end
def do_build_field_graph(field_graph, _, _, -1, _, _, _), do: field_graph
def do_build_field_graph(
field_graph,
types_map,
%Type{} = type,
depth,
parent,
name_prefix,
map_to_prefix
) do
Enum.reduce(type.fields, field_graph, fn {name, field}, new_field_graph ->
new_map_to = map_to_prefix ++ [field.map_to]
new_name = qualify_name(name_prefix, name)
field_data = GraphedField.new(field, new_name, new_map_to, parent)
new_field_graph = maybe_put_field_data(new_field_graph, field_data, depth)
case field do
%Relationship{graph?: true} ->
do_build_field_graph(
new_field_graph,
types_map,
Map.fetch!(types_map, field.related_type),
depth - 1,
field_data,
new_name,
new_map_to
)
_ ->
new_field_graph
end
end)
end
defp maybe_put_field_data(field_graph, %GraphedField{field: %Relationship{}}, 0) do
field_graph
end
defp maybe_put_field_data(field_graph, graphed_field, _) do
Map.put(field_graph, graphed_field.name, graphed_field)
end
defp qualify_name(nil, name), do: name
defp qualify_name(prefix, name), do: "#{prefix}.#{name}"
@doc """
A name can be optionally passed to the `type/2` macro. If provided, this name
will override the name of the resource created in provided block.
"""
@spec maybe_put_name(%Type{}, String.t() | nil) :: %Type{}
def maybe_put_name(%Type{} = type, nil), do: type
def maybe_put_name(%Type{} = type, name) when is_binary(name) do
Type.name(type, name)
end
@spec fetch(map(), String.t()) :: {:ok, %Type{}} | Error.contextual()
def fetch(rtypes, name) do
case Map.get(rtypes, name) do
nil -> Error.with_key(:resource_type_not_registered, name)
type -> {:ok, type}
end
end
@spec fetch!(map(), String.t()) :: %Type{}
def fetch!(rtypes, name) do
case Map.get(rtypes, name) do
nil -> raise NotRegisteredError, "type with name `#{name}` is not registered"
type -> type
end
end
@spec fetch_field_graph(map(), String.t() | %Type{}) ::
{:ok, %{String.t() => %GraphedField{}}} | Error.contextual()
def fetch_field_graph(field_graphs, %Type{name: name}) do
fetch_field_graph(field_graphs, name)
end
def fetch_field_graph(field_graphs, name) do
case Map.get(field_graphs, name) do
nil -> Error.with_key(:field_graphs_not_registered, name)
graphed_field -> {:ok, graphed_field}
end
end
@spec fetch_field_graph!(map(), String.t() | %Type{}) ::
%{String.t() => %GraphedField{}}
def fetch_field_graph!(field_graphs, name) do
case Map.get(field_graphs, name) do
nil -> raise NotRegisteredError, "field graph for `#{name}` is not registered"
field_graph -> field_graph
end
end
@doc """
Ensures a value is a `Resourceful.Type` and that no type of the same name
exists in the map. Raises an exception if both conditions are not met.
"""
@spec validate_type!(any(), %{String.t() => %Type{}}) :: %Type{}
def validate_type!(%Type{} = type, types) do
if Map.has_key?(types, type.name) do
raise __MODULE__.DuplicateTypeNameError,
message: "type with name `#{type.name}` already exists"
end
type
end
def validate_type!(_, _), do: raise(__MODULE__.InvalidType)
@doc """
Assigns the resource specified in the `block` and makes it part of the
registry. If `name` is provided, it will rename the resource and use that
`name` as the key.
If `block` does not result in a `Resourceful.Type` an exception will be
raised.
"""
defmacro type(name \\ nil, do: block) do
quote do
@rtype unquote(block)
|> Resourceful.Type.register(__MODULE__)
|> unquote(__MODULE__).maybe_put_name(unquote(name))
|> unquote(__MODULE__).validate_type!(@rtypes)
@rtypes Map.put(@rtypes, @rtype.name, @rtype)
end
end
defmacro before_compile(_) do
quote do
def all(), do: @rtypes
@rtype_field_graphs Map.new(@rtypes, fn {name, _} ->
{name, unquote(__MODULE__).build_field_graph(@rtypes, name)}
end)
def field_graphs(), do: @rtype_field_graphs
@rtype_names Map.keys(@rtypes)
def names(), do: @rtype_names
end
end
end
|
lib/resourceful/registry.ex
| 0.842637 | 0.528108 |
registry.ex
|
starcoder
|
defmodule ExploringMars.Mission.Coordinate do
@moduledoc """
This module defines functions that create and operate on coordinates in the
probe's space of movement.
This module and the `Direction` module should change if the
coordinate representation used in the problem changes - for instance,
if we choose to use `{y, x}` coordinates instead of `{x, y}` or if we choose
to model a probe moving in 3D space.
This means this module is coupled to the `ExploringMars.Mission.Direction`
module, as it depends directly on the representation of the
`t:ExploringMars.Mission.Direction.t/0` type. This is acceptable for now, as
the Direction type is quite simple, but it might need change. Check the
note in the documentation of the `ExploringMars.Mission.Direction` module for
further details.
"""
alias ExploringMars.Mission.Direction
@typedoc """
A coordinate is defined as a pair of integers specifying a position on the
cartesian plane. The order of coordinates is `{x, y}`.
"""
@type t :: {integer, integer}
@doc """
Takes a pair of strings, `x_string` and `y_string` and tries to
parse them as a coordinate.
## Examples
iex> Coordinate.from_strings("100", "200")
{:ok, {100, 200}}
iex> Coordinate.from_strings("100x", "200y")
{:no_parse, "100x 200y"}
"""
@spec from_strings(String.t(), String.t()) :: {:ok, t} | {:no_parse, String.t()}
def from_strings(x_string, y_string) do
# we require parsing to be exact - no remaining characters!
with {x_val, ""} <- safe_parse_integer(x_string),
{y_val, ""} <- safe_parse_integer(y_string) do
{:ok, {x_val, y_val}}
else
_ -> {:no_parse, "#{x_string} #{y_string}"}
end
end
@doc """
Does the same as `from_strings`, but fails if any of the coordinate's
components would be negative.
"""
@spec positive_from_strings(
String.t(),
String.t()
) :: {:ok, t} | {:no_parse, String.t()}
def positive_from_strings(x_string, y_string) do
with {x_val, ""} when x_val >= 0 <- safe_parse_integer(x_string),
{y_val, ""} when y_val >= 0 <- safe_parse_integer(y_string) do
{:ok, {x_val, y_val}}
else
_ -> {:no_parse, "#{x_string} #{y_string}"}
end
end
# Integer.parse can throw in many situations. This can lead to unexpected
# cases which we wish to avoid.
defp safe_parse_integer(string) do
try do
Integer.parse(string)
rescue
_ -> :no_parse
end
end
@doc """
Moves a `coordinate` along a `direction`.
## Examples
iex> Coordinate.move({-1, 0}, :E)
{0, 0}
iex> Coordinate.move({0, 0}, "Not a direction")
** (ArgumentError) argument is not a direction
"""
@spec move(t, Direction.t()) :: t
def move(coordinate, direction)
def move({x, y}, direction) do
case direction do
:N -> {x, y + 1}
:E -> {x + 1, y}
:W -> {x - 1, y}
:S -> {x, y - 1}
_ -> raise ArgumentError, message: "argument is not a direction"
end
end
@doc """
Converts a `coordinate` into a representation suitable for user-facing output.
## Examples
iex> Coordinate.pretty_print({2, -2})
"2 -2"
"""
@spec pretty_print(t) :: String.t()
def pretty_print(coordinate)
def pretty_print({x, y}) do
"#{x} #{y}"
end
end
|
lib/exploring_mars/mission/coordinate.ex
| 0.894341 | 0.900748 |
coordinate.ex
|
starcoder
|
defmodule AOC.Day13.CarePackage do
alias AOC.Day13.Intcode
@moduledoc """
Had to get clarification that we are supposed to wait until the program finishes before gathering output.
"""
@type grid :: map
@type point :: {integer, integer}
def part1(path) do
Intcode.stream_puzzle_input(path)
|> Intcode.puzzle_input_to_map()
|> compute_part1(%{}, 0)
|> process_outputs()
|> count_walls()
end
def part2(path, use_prev_inputs \\ false) do
Intcode.stream_puzzle_input(path)
|> Intcode.puzzle_input_to_map()
|> (&Intcode.update(&1, {0, 0}, 2)).()
|> compute_part2(%{}, 0)
|> process_outputs()
|> print()
end
def read_program_input(path) do
File.read!(path)
|> String.trim()
|> String.split(",")
end
def compute_part1(memory, grid \\ %{}, score \\ 0) do
with {:waiting, memory} <- Intcode.compute(memory),
{outputs, memory} <- Intcode.empty_outputs(memory),
{grid, score} <- process_outputs({outputs, grid, score}),
{next_input, memory} <- user_input(memory, grid, score),
memory <- Intcode.append_input(memory, next_input) do
compute_part1(memory, grid, score)
else
{:error, _memory} ->
:error_compute
{:terminate, memory} ->
{outputs, memory} = Intcode.empty_outputs(memory)
{outputs, grid, score}
end
end
def compute_part2(memory, grid \\ %{}, score \\ 0, prev_states \\ []) do
with {:waiting, memory} <- Intcode.compute(memory),
{outputs, memory} <- Intcode.empty_outputs(memory),
{grid, score} <- process_outputs({outputs, grid, score}),
{next_input, memory} <- user_input(memory, grid, score),
memory <- Intcode.append_input(memory, next_input) do
IO.puts("awaiting input")
prev_states = [{memory, grid, score} | prev_states]
compute_part2(memory, grid, score, prev_states)
else
{:error, _memory} ->
:error_compute
{:terminate, memory} ->
prev_states = [{memory, grid, score} | prev_states]
{rewind_memory, rewind_grid, rewind_score} = rewind(prev_states)
if memory == rewind_memory do
{outputs, memory} = Intcode.empty_outputs(memory)
{outputs, grid, score}
else
compute_part2(rewind_memory, rewind_grid, rewind_score, prev_states)
end
end
end
def rewind(memories) do
index =
IO.gets("Rewind? How far? 0 to stop.\n")
|> String.trim()
|> String.to_integer()
Enum.at(memories, index)
end
def user_input(memory, grid, score) do
print({grid, score})
IO.puts("Enter -1, 0, 1 for Left, Neutral, Right:")
next_input =
IO.read(:line)
|> String.trim()
next_input =
case next_input do
"-1" -> -1
"0" -> 0
"1" -> 1
_ -> 0
end
{next_input, memory}
end
@spec read_grid(grid, point) :: {integer, integer}
def read_grid(grid, location) do
Map.get(grid, location, -1)
end
@spec update_grid(grid, point, integer) :: grid
def update_grid(grid, location, value) when is_integer(value) do
# {_old_value, num_painted} = read_grid(grid, location)
Map.put(grid, location, value)
end
def process_outputs({outputs, grid, score}) do
Enum.chunk_every(outputs, 3)
|> Enum.reduce({grid, score}, fn [x, y, tile], {grid, score} ->
if {-1, 0} == {x, y} do
{grid, tile}
else
grid = update_grid(grid, {x, y}, tile)
{grid, score}
end
end)
end
def count_walls({grid, score}) do
Map.values(grid)
|> Enum.reduce(0, fn value, sum ->
if value == 2 do
sum + 1
else
sum
end
end)
end
def print({grid, score}) do
{{min_x, min_y}, {max_x, max_y}} =
Map.keys(grid)
|> Enum.reduce({{0, 0}, {0, 0}}, fn {x, y}, {{min_x, min_y}, {max_x, max_y}} ->
min_x =
if x < min_x do
x
else
min_x
end
min_y =
if y < min_y do
y
else
min_y
end
max_x =
if x > max_x do
x
else
max_x
end
max_y =
if y > max_y do
y
else
max_y
end
{{min_x, min_y}, {max_x, max_y}}
end)
IO.puts("")
Enum.each(max_y..min_y, fn y ->
Enum.each(min_x..max_x, fn x ->
value = read_grid(grid, {x, y})
output =
cond do
value == -1 -> " ? "
value == 0 -> " _ "
value == 1 -> " W "
value == 2 -> " # "
value == 3 -> "vvv"
value == 4 -> " * "
true -> " ? "
end
IO.write(output)
end)
IO.puts("")
end)
IO.puts("Score: #{score}")
:ok
end
end
|
aoc-2019/lib/aoc/day13/care_package.ex
| 0.778691 | 0.452415 |
care_package.ex
|
starcoder
|
defmodule Kira.BranchState do
require Kira.Branch, as: Branch
require Kira.Util, as: Util
@moduledoc false
defstruct [
:branch,
:awaiting,
:blocking,
:awaiting_unapply,
:blocking_unapply,
:task
]
@type errors :: [{any, DateTime.t()}]
@type resolved :: any
@type task ::
:not_started
| {:running_apply, pid(), errors}
| {:running_apply_retry, pid(), errors}
| {:running_unapply, pid(), resolved, errors}
| {:running_unapply_retry, pid(), resolved, errors}
| {:failed, errors}
| {:done_applied, any}
| :done_unapplied
@type branches :: %{required(atom()) => Branch.t()}
@type t() :: %Kira.BranchState{
branch: Branch.t(),
awaiting: MapSet.t(atom()),
blocking: MapSet.t(atom()),
awaiting_unapply: MapSet.t(atom()),
blocking_unapply: MapSet.t(atom()),
task: task()
}
@spec create(branch :: Branch.t(), branches :: branches()) :: t()
def create(branch, branches) do
awaiting = Enum.into(branch.dependencies, MapSet.new())
blocking =
for {t, desc} <- branches,
Enum.member?(desc.dependencies, branch.name),
into: MapSet.new(),
do: t
%__MODULE__{
branch: branch,
awaiting: awaiting,
blocking: blocking,
awaiting_unapply: MapSet.new(),
blocking_unapply: awaiting,
task: :not_started
}
end
@spec apply_ready?(state :: t()) :: boolean()
def apply_ready?(state) do
MapSet.size(state.awaiting) == 0
end
@spec unapply_ready?(state :: t()) :: boolean()
def unapply_ready?(state) do
MapSet.size(state.awaiting_unapply) == 0 && is_complete(state)
end
@spec set_task(branch :: t(), task :: task) :: t()
def set_task(branch, task) do
%{branch | task: task}
end
@spec get_completed(branch :: t()) :: Util.result(any)
def get_completed(t) do
case t.task do
{:done_applied, value} ->
{:ok, value}
{:running_unapply, _, value} ->
{:ok, value}
_ ->
{:error, {:unable_to_get_task_value, t.branch.name, t.task}}
end
end
@spec is_complete(branch :: t()) :: boolean
def is_complete(t) do
case t.task do
{:done_applied, _value} -> true
_ -> false
end
end
@spec get_task_pid(branch :: t()) :: Util.result(pid)
def get_task_pid(branch_state) do
case branch_state.task do
{:running_apply, pid, _} ->
{:ok, pid}
{:running_apply_retry, pid, _} ->
{:ok, pid}
{:running_unapply, pid, _, _} ->
{:ok, pid}
{:running_unapply_retry, pid, _, _} ->
{:ok, pid}
_ ->
{:error, {:unable_to_get_task_pid, branch_state.branch.name, branch_state.task}}
end
end
def get_errors(branch_state) do
case branch_state.task do
{:running_apply, _, errors} -> errors
{:running_apply_retry, _, errors} -> errors
{:running_unapply, _, _, errors} -> errors
{:running_unapply_retry, _, _, errors} -> errors
{:failed, errors} -> errors
_ -> []
end
end
end
|
lib/kira/branch_state.ex
| 0.794544 | 0.42319 |
branch_state.ex
|
starcoder
|
defmodule Godfist do
@moduledoc """
Godfist is a wrapper for the League of Legends ReST API written in Elixir.
There are some endpoints that I'll be adding later which will be the static
data from Data Dragon and Tournament support.
Every function requires that you pass the region to execute the request to
since Riot uses that to Rate limit the usage of the api. Every region should
be passed as an Atom, remember that :P
Set your api key in your `config.exs` file with the given params.
```elixir
config :godfist,
token: "YOUR API KEY"
```
"""
alias Godfist.{Summoner, Match, Spectator, Champion, DataDragon, Static}
@doc """
Get the account id of a player by it's region and name.
Refer to `Godfist.Summoner.get_id/2`
"""
@spec get_account_id(atom, String.t()) :: {:ok, integer} | {:error, String.t()}
def get_account_id(region, name) do
{_, id} = Cachex.fetch(:id_cache, "id_#{inc(name)}", fn ->
{:ok, id} = Summoner.get_id(region, name)
{:commit, id}
end)
{:ok, id}
end
defp set_cache(name, key, value, opts \\ []) do
case Application.get_env(:godfist, :rates) do
:test -> :ok
_ -> Cachex.put(name, key, value, opts)
end
end
@doc """
Get the summoner id of a player by it's name and region:
## Example
```elixir
iex> Godfist.get_summid(:lan, "SummonerName")
```
"""
@spec get_summid(atom, String.t()) :: {:ok, integer} | {:error, String.t()}
def get_summid(region, name) do
{_, summid} = Cachex.fetch(:summid_cache, "summid_#{inc(name)}", fn ->
{:ok, %{"id" => summid}} = Summoner.by_name(region, name)
{:commit, summid}
end)
{:ok, summid}
end
@doc """
Get matchlist of a player by it's region and name.
Same as `Godfist.Match.matchlist/3` (Check for a list of options)
but you don't have to provide the summoner id directly.
## Example
```elixir
iex> Godfist.matchlist(:lan, "SummonerName")
```
"""
@spec matchlist(atom, String.t(), Keyword.t()) :: {:ok, map} | {:error, String.t()}
def matchlist(region, name, opts \\ []) do
with {:ok, account_id} <- get_account_id(region, name),
{:ok, matches} <- Match.matchlist(region, account_id, opts) do
{:ok, matches}
else
{:error, reason} ->
{:error, reason}
end
end
@doc """
Get active game of a given player by region and name.
## Example
```elixir
iex> Godfist.active_game(:na, "Summoner name")
```
"""
@spec active_game(atom, String.t()) :: {:ok, map} | {:error, String.t()}
def active_game(region, name) do
with {:ok, id} <- get_summid(region, name),
{:ok, match} <- Spectator.active_game(region, id) do
{:ok, match}
else
{:error, reason} ->
{:error, reason}
end
end
@doc """
Get all champs.
Refer to `Godfist.Champion.all/2` for option.
"""
@spec all_champs(atom, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def all_champs(region, opts \\ []) do
Champion.all(region, opts)
end
@doc """
Get a specific champion by id.
Refer to `Godfist.Champion.by_id/2`
"""
@spec champion(atom, integer) :: {:ok, map} | {:error, String.t()}
def champion(region, id) do
{_, champ} = Cachex.fetch(:champion, "champ_#{id}", fn ->
{:ok, champ} = Champion.by_id(region, id)
{:commit, champ}
end)
{:ok, champ}
end
@doc """
Get a specific champion by it's name. This is useful to work with `Godfist.DataDragon` endpoints.
## Example
```elixir
iex> Godfist.champion_by_name("リー・シン", :japanese)
iex> Godfist.champion_by_name(["<NAME>", "Rek'Sai", "Nocturne"])
```
"""
@spec champion_by_name(list, atom) :: list | MatchError
def champion_by_name(champions, locale \\ :us)
def champion_by_name(champions, locale) when is_list(champions) do
champions
|> Stream.map(fn champs -> champion_by_name(champs, locale) end)
|> Enum.to_list()
end
@spec champion_by_name(String.t(), atom) :: {String.t(), map} | MatchError
def champion_by_name(name, locale) do
{_, champs} = Cachex.fetch(:all_champs, "all_champs", fn ->
{:ok, champs} = DataDragon.Data.champions(locale)
{:commit, champs}
end)
find_single_champ(champs, name)
end
@doc """
Get a champion's information by it's ID.
## Example
```elixir
iex> Godfist.champion_by_id(:lan, 64)
```
"""
@spec champion_by_id(atom, integer) :: {:ok, map} | {:error, String.t()}
def champion_by_id(region, champ_id) do
{_, champs} = Cachex.fetch(:static_champs, "static_champs", fn ->
{:ok, champs} = Static.all_champs(region, dataById: true, tags: "keys")
{:commit, champs}
end)
find_champ_by_id(champs, champ_id)
end
defp find_champ_by_id(champs, champ_id) do
{:ok, Map.get(champs["data"], to_string(champ_id))["name"]}
end
# Makes everything 1 word, "inc" is short for inconsistency.
defp inc(name), do: String.replace(name, " ", "@")
defp find_single_champ(list, name) do
list["data"]
|> Stream.map(& &1)
|> Enum.to_list()
|> Enum.find(fn {_k, v} -> v["name"] == name end)
end
@doc """
Find similar champs to the query.
## Example
```elixir
iex> Godfist.find_similar("Noc", :us)
iex> Godfist.find_similar("L")
```
"""
@spec find_similar(String.t(), atom) :: list | {:error, String.t()}
def find_similar(name, locale \\ :us) do
{_, map} = Cachex.fetch(:all_champs, "all_champs", fn ->
{:ok, %{"data" => map}} = DataDragon.Data.champions(locale)
{:commit, map}
end)
find_champs(map, name)
end
# Map through the champ list and filter the ones that are similar to the given
# name.
defp find_champs(champ_list, name) do
Enum.filter(champ_list, fn {_k, v} -> String.contains?(v["name"], name) end)
end
end
|
lib/godfist.ex
| 0.856167 | 0.84124 |
godfist.ex
|
starcoder
|
defmodule Collidex.Detector do
@moduledoc """
Main module responsible for detecting whether two
particular pieces of geometry have collided. All actual
detections are delegated to functions in Collidex.Detection.*
modules; this module effectively just routes geometry to the
correct detection function.
"""
alias Collidex.Geometry.Circle
alias Collidex.Geometry.Rect
alias Collidex.Geometry.Polygon
alias Collidex.Detection.Circles
alias Collidex.Detection.Rects
alias Collidex.Detection.Polygons
alias Collidex.Detection.MixedShapes
@doc """
Determine if two shapes collide on the plane. If the two shapes do not
overlap, the return value will be falsy. If they do overlap,
it will return `{ :collision, _ }`. (The second member
of the tuple will eventually be the vector along which the two shapes
are colliding, but that is not implented yet).
The optional third argument defaults to `:accurate`. If `:fast` is passed
instead, then Polygon-to-Polygon collisions will be tested with a method
that may return false positives in rare cases but is faster. This
does not affect any collisions involving Rects or Circles.
## Examples
```
iex> Collidex.Detector.collision?(
...> Collidex.Geometry.Circle.make(0, 0, 1.0),
...> Collidex.Geometry.Circle.make(1.0, 1.0, 1.0)
...> )
{:collision, {-1.0, -1.0}}
iex> Collidex.Detector.collision?(
...> Collidex.Geometry.Rect.make(-2, -0.75, 2, -2),
...> Collidex.Geometry.Rect.make(2, 0.5, 3, -0.5)
...> )
false
iex> Collidex.Detector.collision?(
...> Collidex.Geometry.Rect.make(2, 0.5, 3, -0.5),
...> Collidex.Geometry.Rect.make(3,-3,-3,3)
...> )
{:collision, "todo_provide_vector"}
iex> Collidex.Detector.collision?(
...> Collidex.Geometry.Rect.make(-1.0, -1.0, 1.0, 1.0),
...> Collidex.Geometry.Polygon.make([{0.9,0}, {2,1}, {2,-1}])
...> )
{:collision, "todo_provide_vector"}
iex> Collidex.Detector.collision?(
...> Collidex.Geometry.Circle.make(0,0,1.0),
...> Collidex.Geometry.Rect.make(1.1,-1,2,1)
...> )
false
```
"""
def collision?(shape1, shape2, method \\ :accurate)
def collision?(c1 = %Circle{}, c2 = %Circle{}, _) do
Circles.collision?(c1,c2)
end
def collision?(r1 = %Rect{}, r2 = %Rect{}, _) do
Rects.collision?(r1,r2)
end
def collision?(p1 = %Polygon{}, p2 = %Polygon{}, method) do
Polygons.collision?(p1, p2, method)
end
def collision?(shape1, shape2, method) do
MixedShapes.collision?(shape1, shape2, method)
end
end
|
lib/collidex/detector.ex
| 0.910585 | 0.833663 |
detector.ex
|
starcoder
|
defmodule Grizzly.ZWave.DSK do
@moduledoc """
Module for working with the SmartStart and S2 DSKs
"""
import Integer, only: [is_even: 1]
defstruct raw: <<>>
@type t() :: %__MODULE__{raw: <<_::128>>}
@typedoc """
The DSK string is the string version of the DSK
The general format is `XXXXX-XXXXX-XXXXX-XXXXX-XXXXX-XXXXX-XXXXX-XXXXX`
That is 8 blocks of 16 bit integers separated by a dash.
An example of this would be `50285-18819-09924-30691-15973-33711-04005-03623`
"""
@type dsk_string :: <<_::376>>
@typedoc """
The DSK binary is the elixir binary string form of the DSK
The format is `<<b1, b2, b3, ... b16>>`
That is 16 bytes.
An example of this would be:
```elixir
<<196, 109, 73, 131, 38, 196, 119, 227, 62, 101, 131, 175, 15, 165, 14, 39>>
```
"""
@type dsk_binary :: <<_::128>>
@doc """
Make a new DSK
If less than 16 bytes are passed in, the rest are initialized to zero.
Due to how DSKs are constructed, odd length binaries aren't allowed since
they should never be possible.
"""
@spec new(binary()) :: t()
def new(dsk_binary) when byte_size(dsk_binary) == 16 do
%__MODULE__{raw: dsk_binary}
end
def new(dsk_binary) when byte_size(dsk_binary) < 16 and is_even(byte_size(dsk_binary)) do
new(dsk_binary <> <<0::16>>)
end
@doc """
Parse a textual representation of a DSK
"""
@spec parse(dsk_string()) :: {:ok, t()} | {:error, :invalid_dsk}
def parse(dsk_string) do
do_parse(dsk_string, <<>>)
end
defp do_parse(<<>>, parts) when parts != <<>> and byte_size(parts) <= 16 do
{:ok, new(parts)}
end
defp do_parse(<<sep, rest::binary>>, parts) when sep in [?-, ?\s] do
do_parse(rest, parts)
end
defp do_parse(<<s::5-bytes, rest::binary>>, parts) do
case Integer.parse(s) do
{v, ""} when v < 65536 ->
do_parse(rest, parts <> <<v::16>>)
_anything_else ->
{:error, :invalid_dsk}
end
end
defp do_parse(_anything_else, _parts) do
{:error, :invalid_dsk}
end
@doc """
Parse a DSK PIN
PINs can also be parsed by `parse/1`. When working with PINs, though, it's
nice to be more forgiving and accept PINs as integers or strings without
leading zeros.
String examples:
```
iex> {:ok, dsk} = DSK.parse_pin("12345"); dsk
#DSK<12345-00000-00000-00000-00000-00000-00000-00000>
iex> {:ok, dsk} = DSK.parse_pin("123"); dsk
#DSK<00123-00000-00000-00000-00000-00000-00000-00000>
```
Integer examples:
```
iex> {:ok, dsk} = DSK.parse_pin(12345); dsk
#DSK<12345-00000-00000-00000-00000-00000-00000-00000>
iex> {:ok, dsk} = DSK.parse_pin(123); dsk
#DSK<00123-00000-00000-00000-00000-00000-00000-00000>
```
"""
@spec parse_pin(String.t() | non_neg_integer()) :: {:ok, t()} | {:error, :invalid_dsk}
def parse_pin(string) when is_binary(string) do
case Integer.parse(string) do
{pin, ""} -> parse_pin(pin)
_ -> {:error, :invalid_dsk}
end
end
def parse_pin(pin) when pin >= 0 and pin < 65536 do
{:ok, new(<<pin::16>>)}
end
def parse_pin(_other), do: {:error, :invalid_dsk}
@doc """
Convert the DSK to a string
```
iex> {:ok, dsk} = DSK.parse("50285-18819-09924-30691-15973-33711-04005-03623")
iex> DSK.to_string(dsk)
"50285-18819-09924-30691-15973-33711-04005-03623"
iex> {:ok, dsk} = DSK.parse("50285-18819-09924-30691-15973-33711-04005-03623")
iex> DSK.to_string(dsk, delimiter: "")
"5028518819099243069115973337110400503623"
```
Options:
* `:delimiter` - character to join the 5 byte sections together (default `"-"`)
"""
@spec to_string(t(), keyword()) :: String.t()
def to_string(%__MODULE__{raw: raw}, opts \\ []) do
delimiter = Keyword.get(opts, :delimiter, "-")
for(<<b::16 <- raw>>, do: b)
|> Enum.map(&int_to_five_digits/1)
|> Enum.join(delimiter)
end
@doc """
Return the first five digits of a DSK for use as a PIN
```
iex> {:ok, dsk} = DSK.parse("50285-18819-09924-30691-15973-33711-04005-03623")
iex> DSK.to_pin_string(dsk)
"50285"
iex> {:ok, dsk} = DSK.parse("00001-18819-09924-30691-15973-33711-04005-03623")
iex> DSK.to_pin_string(dsk)
"00001"
```
"""
@spec to_pin_string(t()) :: String.t()
def to_pin_string(%__MODULE__{raw: <<b::16, _::112>>}) do
int_to_five_digits(b)
end
defp int_to_five_digits(b) do
String.slice("00000" <> "#{b}", -5, 5)
end
@doc """
Take a string representation of the DSK and change it into the
binary representation
"""
@spec string_to_binary(dsk_string()) :: {:ok, dsk_binary()} | {:error, :invalid_dsk}
@deprecated "Use DSK.parse/1 instead"
def string_to_binary(dsk_string) do
case parse(dsk_string) do
{:ok, dsk} -> {:ok, dsk.raw}
error -> error
end
end
@doc """
Take a binary representation of the DSK and change it into the
string representation
"""
@spec binary_to_string(dsk_binary()) :: {:ok, dsk_string()}
def binary_to_string(dsk_binary) do
dsk_string =
dsk_binary
|> new()
|> __MODULE__.to_string()
{:ok, dsk_string}
end
@doc """
Generate a DSK that is all zeros
This is useful for placeholder/default DSKs.
"""
@spec zeros() :: t()
def zeros() do
%__MODULE__{
raw: <<0::size(16)-unit(8)>>
}
end
defimpl String.Chars do
@moduledoc false
defdelegate to_string(v), to: Grizzly.ZWave.DSK
end
defimpl Inspect do
import Inspect.Algebra
alias Grizzly.ZWave.DSK
@moduledoc false
@spec inspect(DSK.t(), Inspect.Opts.t()) :: Inspect.Algebra.t()
def inspect(v, _opts) do
concat(["#DSK<#{to_string(v)}>"])
end
end
end
|
lib/grizzly/zwave/dsk.ex
| 0.912077 | 0.842604 |
dsk.ex
|
starcoder
|
defmodule Hierbautberlin.Importer.Infravelo do
alias Hierbautberlin.Importer.KmlParser
alias Hierbautberlin.Importer.LiqdApi
alias Hierbautberlin.GeoData
@state_mapping %{
"Vorgesehen" => "intended",
"in Vorbereitung" => "in_preparation",
"in Planung" => "in_planning",
"Abgeschlossen" => "finished",
"in Bau" => "under_construction"
}
def import(http_connection \\ HTTPoison) do
items = LiqdApi.fetch_data(http_connection, "https://www.infravelo.de/api/v1/projects/")
{:ok, source} =
GeoData.upsert_source(%{
short_name: "INFRAVELO",
name: "infraVelo",
url: "https://www.infravelo.de/",
copyright: "infravelo.de / Projekte"
})
result =
Enum.map(items, fn item ->
attrs = to_geo_item(item)
{:ok, geo_item} = GeoData.upsert_geo_item(Map.merge(%{source_id: source.id}, attrs))
geo_item
end)
{:ok, result}
rescue
error ->
Bugsnag.report(error)
{:error, error}
end
defp to_geo_item(item) do
kml = KmlParser.parse(item["kml"])
point = KmlParser.extract_point(kml)
polygon = KmlParser.extract_polygon(kml)
%{
external_id: item["id"],
title: item["title"],
subtitle: item["subtitle"],
description: item["description"],
url: item["link"],
state: @state_mapping[item["status"]],
geo_point: point,
geometry: polygon,
date_start: parse_start(item["dateStart"]),
date_end: parse_end(item["dateEnd"])
}
end
defp parse_start(dateStr) do
case parse_quarter(dateStr) do
{year, quarter} ->
{month, day} =
case quarter do
"1" -> {1, 1}
"2" -> {4, 1}
"3" -> {7, 1}
"4" -> {10, 1}
end
DateTime.new!(Date.new!(year, month, day), ~T[00:00:00], "Europe/Berlin")
_ ->
nil
end
end
defp parse_end(dateStr) do
case parse_quarter(dateStr) do
{year, quarter} ->
{month, day} =
case quarter do
"1" -> {3, 31}
"2" -> {6, 30}
"3" -> {9, 30}
"4" -> {12, 31}
end
DateTime.new!(Date.new!(year, month, day), ~T[00:00:00], "Europe/Berlin")
_ ->
nil
end
end
defp parse_quarter(nil) do
nil
end
defp parse_quarter(quarterStr) do
case Regex.named_captures(~r/(?<quarter>\d*). Quartal (?<year>\d*)/, quarterStr) do
%{"year" => year, "quarter" => quarter} ->
{year, _} = Integer.parse(year)
{year, quarter}
_ ->
nil
end
end
end
|
lib/hierbautberlin/importer/infravelo.ex
| 0.606848 | 0.488527 |
infravelo.ex
|
starcoder
|
defmodule AWS.OpsWorks.ChefAutomate do
@moduledoc """
AWS OpsWorks for Chef Automate
A service that runs and manages configuration management servers.
Glossary of terms
<ul> <li> **Server**: A server is a configuration management server, and
can be highly-available. The configuration manager runs on your instances
by using various AWS services, such as Amazon Elastic Compute Cloud (EC2),
and potentially Amazon Relational Database Service (RDS). A server is a
generic abstraction over the configuration manager that you want to use,
much like Amazon RDS. In AWS OpsWorks for Chef Automate, you do not start
or stop servers. After you create servers, they continue to run until they
are deleted.
</li> <li> **Engine**: The specific configuration manager that you want to
use (such as `Chef`) is the engine.
</li> <li> **Backup**: This is an application-level backup of the data that
the configuration manager stores. A backup creates a .tar.gz file that is
stored in an Amazon Simple Storage Service (S3) bucket in your account. AWS
OpsWorks for Chef Automate creates the S3 bucket when you launch the first
instance. A backup maintains a snapshot of all of a server's important
attributes at the time of the backup.
</li> <li> **Events**: Events are always related to a server. Events are
written during server creation, when health checks run, when backups are
created, etc. When you delete a server, the server's events are also
deleted.
</li> <li> **AccountAttributes**: Every account has attributes that are
assigned in the AWS OpsWorks for Chef Automate database. These attributes
store information about configuration limits (servers, backups, etc.) and
your customer account.
</li> </ul> Throttling limits
All API operations allow for 5 requests per second with a burst of 10
requests per second.
"""
@doc """
"""
def associate_node(client, input, options \\ []) do
request(client, "AssociateNode", input, options)
end
@doc """
Creates an application-level backup of a server. While the server is
`BACKING_UP`, the server can not be modified and no additional backup can
be created.
Backups can be created for `RUNNING`, `HEALTHY` and `UNHEALTHY` servers.
This operation is asnychronous.
By default 50 manual backups can be created.
A `LimitExceededException` is thrown then the maximum number of manual
backup is reached. A `InvalidStateException` is thrown when the server is
not in any of RUNNING, HEALTHY, UNHEALTHY. A `ResourceNotFoundException` is
thrown when the server is not found. A `ValidationException` is thrown when
parameters of the request are not valid.
"""
def create_backup(client, input, options \\ []) do
request(client, "CreateBackup", input, options)
end
@doc """
Creates and immedately starts a new Server. The server can be used once it
has reached the `HEALTHY` state.
This operation is asnychronous.
A `LimitExceededException` is thrown then the maximum number of server
backup is reached. A `ResourceAlreadyExistsException` is raise when a
server with the same name already exists in the account. A
`ResourceNotFoundException` is thrown when a backupId is passed, but the
backup does not exist. A `ValidationException` is thrown when parameters of
the request are not valid.
By default 10 servers can be created. A `LimitExceededException` is raised
when the limit is exceeded.
When no security groups are provided by using `SecurityGroupIds`, AWS
OpsWorks creates a new security group. This security group opens the Chef
server to the world on TCP port 443. If a KeyName is present, SSH access is
enabled. SSH is also open to the world on TCP port 22.
By default, the Chef Server is accessible from any IP address. We recommend
that you update your security group rules to allow access from known IP
addresses and address ranges only. To edit security group rules, open
Security Groups in the navigation pane of the EC2 management console.
"""
def create_server(client, input, options \\ []) do
request(client, "CreateServer", input, options)
end
@doc """
Deletes a backup. You can delete both manual and automated backups.
This operation is asynchronous.
A `InvalidStateException` is thrown then a backup is already deleting. A
`ResourceNotFoundException` is thrown when the backup does not exist. A
`ValidationException` is thrown when parameters of the request are not
valid.
"""
def delete_backup(client, input, options \\ []) do
request(client, "DeleteBackup", input, options)
end
@doc """
Deletes the server and the underlying AWS CloudFormation stack (including
the server's EC2 instance). The server status updated to `DELETING`. Once
the server is successfully deleted, it will no longer be returned by
`DescribeServer` requests. If the AWS CloudFormation stack cannot be
deleted, the server cannot be deleted.
This operation is asynchronous.
A `InvalidStateException` is thrown then a server is already deleting. A
`ResourceNotFoundException` is thrown when the server does not exist. A
`ValidationException` is raised when parameters of the request are invalid.
"""
def delete_server(client, input, options \\ []) do
request(client, "DeleteServer", input, options)
end
@doc """
Describes your account attributes, and creates requests to increase limits
before they are reached or exceeded.
This operation is synchronous.
"""
def describe_account_attributes(client, input, options \\ []) do
request(client, "DescribeAccountAttributes", input, options)
end
@doc """
Describes backups. The results are ordered by time, with newest backups
first. If you do not specify a BackupId or ServerName, the command returns
all backups.
This operation is synchronous.
A `ResourceNotFoundException` is thrown when the backup does not exist. A
`ValidationException` is raised when parameters of the request are invalid.
"""
def describe_backups(client, input, options \\ []) do
request(client, "DescribeBackups", input, options)
end
@doc """
Describes events for a specified server. Results are ordered by time, with
newest events first.
This operation is synchronous.
A `ResourceNotFoundException` is thrown when the server does not exist. A
`ValidationException` is raised when parameters of the request are invalid.
"""
def describe_events(client, input, options \\ []) do
request(client, "DescribeEvents", input, options)
end
@doc """
"""
def describe_node_association_status(client, input, options \\ []) do
request(client, "DescribeNodeAssociationStatus", input, options)
end
@doc """
Lists all configuration management servers that are identified with your
account. Only the stored results from Amazon DynamoDB are returned. AWS
OpsWorks for Chef Automate does not query other services.
This operation is synchronous.
A `ResourceNotFoundException` is thrown when the server does not exist. A
`ValidationException` is raised when parameters of the request are invalid.
"""
def describe_servers(client, input, options \\ []) do
request(client, "DescribeServers", input, options)
end
@doc """
"""
def disassociate_node(client, input, options \\ []) do
request(client, "DisassociateNode", input, options)
end
@doc """
Restores a backup to a server that is in a `RUNNING`, `FAILED`, or
`HEALTHY` state. When you run RestoreServer, the server's EC2 instance is
deleted, and a new EC2 instance is configured. RestoreServer maintains the
existing server endpoint, so configuration management of all of the
server's client devices should continue to work.
This operation is asynchronous.
A `InvalidStateException` is thrown when the server is not in a valid
state. A `ResourceNotFoundException` is thrown when the server does not
exist. A `ValidationException` is raised when parameters of the request are
invalid.
"""
def restore_server(client, input, options \\ []) do
request(client, "RestoreServer", input, options)
end
@doc """
Manually starts server maintenance. This command can be useful if an
earlier maintenance attempt failed, and the underlying cause of maintenance
failure has been resolved. The server will switch to `UNDER_MAINTENANCE`
state, while maintenace is in progress.
Maintenace can only be started for `HEALTHY` and `UNHEALTHY` servers. A
`InvalidStateException` is thrown otherwise. A `ResourceNotFoundException`
is thrown when the server does not exist. A `ValidationException` is raised
when parameters of the request are invalid.
"""
def start_maintenance(client, input, options \\ []) do
request(client, "StartMaintenance", input, options)
end
@doc """
Updates settings for a server.
This operation is synchronous.
"""
def update_server(client, input, options \\ []) do
request(client, "UpdateServer", input, options)
end
@doc """
Updates engine specific attributes on a specified server. Server will enter
the `MODIFYING` state when this operation is in progress. Only one update
can take place at a time.
This operation can be use to reset Chef Server main API key
(`CHEF_PIVOTAL_KEY`).
This operation is asynchronous.
This operation can only be called for `HEALTHY` and `UNHEALTHY` servers.
Otherwise a `InvalidStateException` is raised. A
`ResourceNotFoundException` is thrown when the server does not exist. A
`ValidationException` is raised when parameters of the request are invalid.
"""
def update_server_engine_attributes(client, input, options \\ []) do
request(client, "UpdateServerEngineAttributes", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "opsworks-cm"}
host = get_host("opsworks-cm", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "OpsWorksCM_V2016_11_01.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/ops_works_chef_automate.ex
| 0.903301 | 0.53437 |
ops_works_chef_automate.ex
|
starcoder
|
defmodule EdgeDB.Protocol.Codecs.Builtin.Array do
@moduledoc false
use EdgeDB.Protocol.Codec
alias EdgeDB.Protocol.{
Datatypes,
Types
}
@reserved0 0
@reserved1 0
@empty_list_iodata [
Datatypes.Int32.encode(0),
Datatypes.Int32.encode(@reserved0),
Datatypes.Int32.encode(@reserved1)
]
defcodec(type: list())
@spec new(Datatypes.UUID.t(), list(integer()), Codec.t()) :: Codec.t()
def new(type_id, dimensions, codec) do
encoder = create_encoder(&encode_array(&1, dimensions, codec))
decoder = create_decoder(&decode_array(&1, dimensions, codec))
%Codec{
type_id: type_id,
encoder: encoder,
decoder: decoder,
module: __MODULE__
}
end
@spec encode_array(t(), list(integer()), Codec.t()) :: iodata()
def encode_array([], _dimensions, _codec) do
@empty_list_iodata
end
def encode_array(instance, dimensions, codec) when is_list(instance) do
if Keyword.keyword?(instance) do
raise EdgeDB.Error.invalid_argument_error(
"unable to encode keyword list #{inspect(instance)} as array"
)
end
ndims = length(dimensions)
calculated_dimensions = get_dimensions_for_list(ndims, instance)
elements = encode_data_into_array_elements(instance, codec)
[
Datatypes.Int32.encode(ndims),
Datatypes.Int32.encode(@reserved0),
Datatypes.Int32.encode(@reserved1),
Types.Dimension.encode(calculated_dimensions, raw: true),
Types.ArrayElement.encode(elements, raw: true)
]
end
@spec decode_array(bitstring(), list(integer()), Codec.t()) :: t()
def decode_array(<<0::int32, _reserved0::int32, _reserved1::int32>>, _dimensions, _codec) do
[]
end
def decode_array(
<<ndims::int32, _reserved0::int32, _reserved1::int32, rest::binary>>,
expected_dimensions,
codec
) do
{parsed_dimensions, rest} = Types.Dimension.decode(ndims, rest)
if length(parsed_dimensions) != length(expected_dimensions) do
raise EdgeDB.Error.invalid_argument_error(
"unable to decode binary data as array: parsed dimensions count don't match expected dimensions count"
)
end
elements_count = count_elements_in_array(parsed_dimensions)
{raw_elements, <<>>} = Types.ArrayElement.decode(elements_count, rest)
decode_array_elements_into_list(raw_elements, parsed_dimensions, codec)
end
defp encode_data_into_array_elements(list, codec) do
Enum.map(list, fn element ->
encoded_data = Codec.encode(codec, element)
%Types.ArrayElement{data: encoded_data}
end)
end
defp decode_array_elements_into_list(elements, dimensions, codec) do
elements
|> Enum.into([], fn %Types.ArrayElement{data: data} ->
Codec.decode(codec, data)
end)
|> transform_in_dimensions(dimensions)
end
defp get_dimensions_for_list(1, list) do
get_dimensions_for_list(0, [], [%Types.Dimension{upper: length(list)}])
end
defp get_dimensions_for_list(ndims, list) do
get_dimensions_for_list(ndims, list, [])
end
defp get_dimensions_for_list(0, [], dimensions) do
dimensions
end
defp get_dimensions_for_list(ndims, [list | rest], dimensions) when is_list(list) do
get_dimensions_for_list(ndims - 1, rest, [%Types.Dimension{upper: length(list)} | dimensions])
end
defp count_elements_in_array(dimensions) do
Enum.reduce(dimensions, 0, fn %Types.Dimension{upper: upper, lower: lower}, acc ->
acc + upper - lower + 1
end)
end
defp transform_in_dimensions(list, [%Types.Dimension{}]) do
list
end
defp transform_in_dimensions(list, dimensions) do
{list, []} =
Enum.reduce(dimensions, {[], list}, fn %Types.Dimension{upper: upper},
{md_list, elements} ->
{new_dim_list, rest} = Enum.split(elements, upper)
{[new_dim_list | md_list], rest}
end)
Enum.reverse(list)
end
end
|
lib/edgedb/protocol/codecs/builtin/array.ex
| 0.696991 | 0.429788 |
array.ex
|
starcoder
|
defmodule Optimus.Builder do
alias Optimus
alias Optimus.PropertyParsers, as: PP
def build(props) do
with :ok <- validate_keyword_list(props),
{:ok, name} <- build_name(props),
{:ok, description} <- build_description(props),
{:ok, version} <- build_version(props),
{:ok, author} <- build_author(props),
{:ok, about} <- build_about(props),
{:ok, allow_unknown_args} <- build_allow_unknown_args(props),
{:ok, parse_double_dash} <- build_parse_double_dash(props),
{:ok, args} <- build_args(props[:args]),
{:ok, flags} <- build_flags(props[:flags]),
{:ok, options} <- build_options(props[:options]),
{:ok, subcommands} <- build_subcommands(props[:subcommands]),
:ok <- validate_args(args),
:ok <- validate_conflicts(flags, options),
do:
{:ok,
%Optimus{
name: name,
description: description,
version: version,
author: author,
about: about,
allow_unknown_args: allow_unknown_args,
parse_double_dash: parse_double_dash,
args: args,
flags: flags,
options: options,
subcommands: subcommands
}}
end
defp build_name(props) do
PP.build_command_name(:name, props[:name])
end
defp build_description(props) do
PP.build_string(:description, props[:description], nil)
end
defp build_version(props) do
PP.build_string(:version, props[:version], nil)
end
defp build_author(props) do
PP.build_string(:author, props[:author], nil)
end
defp build_about(props) do
PP.build_string(:about, props[:about], nil)
end
defp build_allow_unknown_args(props) do
PP.build_bool(:allow_unknown_args, props[:allow_unknown_args], false)
end
defp build_parse_double_dash(props) do
PP.build_bool(:parse_double_dash, props[:parse_double_dash], true)
end
defp build_args(specs), do: build_specs("args", Optimus.Arg, specs)
defp build_flags(specs), do: build_specs("flags", Optimus.Flag, specs)
defp build_options(specs), do: build_specs("options", Optimus.Option, specs)
defp build_specs(_name, _module, nil), do: {:ok, []}
defp build_specs(name, module, specs) do
if Keyword.keyword?(specs) do
build_specs_(module, specs, [])
else
{:error, "#{name} specs are expected to be a Keyword list"}
end
end
defp build_specs_(_module, [], parsed), do: {:ok, Enum.reverse(parsed)}
defp build_specs_(module, [{_name, _props} = arg_spec | other], parsed) do
with {:ok, arg} <- module.new(arg_spec), do: build_specs_(module, other, [arg | parsed])
end
defp build_subcommands(nil), do: {:ok, []}
defp build_subcommands(subcommands) do
if Keyword.keyword?(subcommands) do
build_subcommands_(subcommands, [])
else
{:error, "subcommand specs are expected to be a Keyword list"}
end
end
defp build_subcommands_([], parsed), do: {:ok, Enum.reverse(parsed)}
defp build_subcommands_([{subcommand_name, props} | other], parsed) do
case build(props) do
{:ok, subcommand} ->
subcommand_with_name =
case subcommand.name do
nil ->
%Optimus{subcommand | subcommand: subcommand_name, name: to_string(subcommand_name)}
_ ->
%Optimus{subcommand | subcommand: subcommand_name}
end
build_subcommands_(other, [subcommand_with_name | parsed])
{:error, error} ->
{:error, "error building subcommand #{inspect(subcommand_name)}: #{error}"}
end
end
def validate_keyword_list(list) do
if Keyword.keyword?(list) do
:ok
else
{:error, "configuration is expected to be a keyword list"}
end
end
defp validate_args([arg1, arg2 | other]) do
if !arg1.required && arg2.required do
{:error,
"required argument #{inspect(arg2.name)} follows optional argument #{inspect(arg1.name)}"}
else
validate_args([arg2 | other])
end
end
defp validate_args(_), do: :ok
defp validate_conflicts(flags, options) do
with :ok <- validate_conflicts(flags, options, :short),
:ok <- validate_conflicts(flags, options, :long),
do: :ok
end
defp validate_conflicts(flags, options, key) do
all_options = flags ++ options
duplicate =
all_options
|> Enum.group_by(fn item -> Map.get(item, key) end, fn item -> item end)
|> Map.to_list()
|> Enum.find(fn {option_name, options} -> option_name && length(options) > 1 end)
case duplicate do
{name, _} -> {:error, "duplicate #{key} option name: #{name}"}
nil -> :ok
end
end
end
|
lib/optimus/builder.ex
| 0.530723 | 0.405007 |
builder.ex
|
starcoder
|
defmodule GitExPress.Entries.Storage do
@moduledoc """
This Storage module handles the Mnesia database.
"""
require Logger
# alias :mnesia, as: Mnesia
alias GitExPress.Entries.Entry
@entry_table GitExPressEntries
@entry_attributes [:title, :date, :slug, :content_raw, :content_html, :content_type]
@doc """
1. Initialize a new empty schema by passing in a Node List.
2. Create a table index for :source, allowing us to query by source.
3. Create the table called entries and define the schema.
4. Start :mnesia.
TODO: Check if this succeeds? Does it matter? These four, respectively, return these
when the table and the schema already exist on the system:
{:error, {:nonode@nohost, {:already_exists, :nonode@nohost}}}
:ok
{:aborted, {:already_exists, Entries}}
{:aborted, {:already_exists, Entries, 6}}
"""
@spec init() :: :ok | {:error, any()}
def init do
:mnesia.create_schema([node()])
:mnesia.start()
case :mnesia.create_table(@entry_table,
record_name: @entry_table,
attributes: @entry_attributes
) do
{:atomic, :ok} ->
:mnesia.add_table_index(@entry_table, :content_type)
:ok
_ ->
:ok
end
end
@doc """
Insert an entry to our :mnesia database. We do this with an :mnesia transaction.
An :mnesia transaction is a mechanism by which a series of database operations
can be executed as one functional block.
"""
@spec insert_entry(%Entry{}) :: {:ok, String.t()} | {:error, String.t()}
def insert_entry(entry) when is_map(entry) do
data_to_write = fn ->
:mnesia.write(
{@entry_table, entry.title, entry.date, entry.slug, entry.content_raw, entry.content_html,
entry.content_type}
)
end
perform_transaction(data_to_write)
end
def insert_entry(_other) do
{:error, "Entry not of required type (Entry Struct)"}
end
@doc """
Return all entries. We do this with an :mnesia transaction.
An :mnesia transaction is a mechanism by which a series of database operations
can be executed as one functional block.
"""
@spec get_entries() :: {:ok, list()} | {:error, list()}
def get_entries do
data_to_read = fn ->
:mnesia.index_read(@entry_table, "blog", :content_type)
end
perform_transaction(data_to_read)
end
@doc """
Get entries by field and value, where field is the field in the Entry struct you
want to look for, and value is the value of that field.
[:title, :date, :slug, :content_raw, :content_html, :content_type]
# TODO: Restrict only to specific field atoms, now we can pass anything and still attempt a transaction
"""
@spec get_entries_by(atom(), any()) :: {:ok, list()} | {:error, String.t()}
def get_entries_by(field, value) when is_atom(field) do
if Enum.member?(@entry_attributes, field) do
data_to_read =
case field do
:title ->
fn -> :mnesia.match_object({@entry_table, value, :_, :_, :_, :_, :_}) end
:date ->
fn -> :mnesia.match_object({@entry_table, :_, value, :_, :_, :_, :_}) end
:slug ->
fn -> :mnesia.match_object({@entry_table, :_, :_, value, :_, :_, :_}) end
:content_raw ->
fn -> :mnesia.match_object({@entry_table, :_, :_, :_, value, :_, :_}) end
:content_html ->
fn -> :mnesia.match_object({@entry_table, :_, :_, :_, :_, value, :_}) end
:content_type ->
fn -> :mnesia.match_object({@entry_table, :_, :_, :_, :_, :_, value}) end
end
perform_transaction(data_to_read)
else
{:error, "Given field not one of Entry attributes"}
end
end
# Perform an :mnesia transaction on given `data`, where `data` is an Entry.
@spec perform_transaction(fun()) :: tuple()
defp perform_transaction(data) do
case :mnesia.transaction(data) do
{:atomic, result} ->
Logger.info("Transaction OK")
{:ok, result}
{:aborted, reason} ->
Logger.info("Transaction error")
{:error, reason}
end
end
end
|
lib/gitexpress/entries/storage.ex
| 0.588771 | 0.543651 |
storage.ex
|
starcoder
|
defmodule Mix.Tasks.OptimusHash.Seed do
@moduledoc """
Generates the required configuration for using OptimusHash. This task is intended
to be run only once.
## Example
mix ecto.dump
## Command line options
* `--bits` - does not compile applications before dumping
* `--no-deps-check` - does not check depedendencies before dumping
"""
import Mix.Generator
use Mix.Task
use Bitwise
@shortdoc "Generates a set of configuration values for OptimusHash"
@switches [
bits: [:integer]
]
def run(args) do
case OptionParser.parse!(args, strict: @switches) do
{opts, arguments} ->
max_size = Keyword.get(opts, :bits, 31)
if max_size < 16 do
Mix.raise("Using less than 16bits is not recommended")
end
prime =
case List.first(arguments) do
nil ->
try do
{result, 0} =
System.cmd("openssl", ["prime", "-generate", "-bits", "#{max_size}"])
case Integer.parse(result) do
{number, _} ->
number
:error ->
Mix.raise(
"expected a valid integer as a prime, got: #{inspect(List.first(arguments))}"
)
end
rescue
_ ->
Mix.raise(
"Failed to generate a prime using 'openssl prime -generate -bits #{max_size}'. " <>
"You can either install 'openssl' and run the command again or get a prime " <>
"from somewhere else (e.g. http://primes.utm.edu/lists/small/millions/). " <>
"You should independently verify that your number is in fact a prime number. " <>
"To run the command again use: mix optimus_hash.seed --bits #{max_size} YOUR_PRIME"
)
end
_ ->
case Integer.parse(List.first(arguments)) do
{number, _} ->
number
:error ->
Mix.raise(
"expected a valid integer as a prime, got: #{inspect(List.first(arguments))}"
)
end
end
max_id = trunc(:math.pow(2, max_size)) - 1
mod_inverse = OptimusHash.Helpers.mod_inverse(prime, max_id + 1)
random =
ceil(max_size / 8)
|> :crypto.strong_rand_bytes()
|> :binary.decode_unsigned()
|> band(max_id)
OptimusHash.new(
prime: prime,
mod_inverse: mod_inverse,
random: random,
max_size: max_size
)
code =
Code.format_string!(
code_inline_template(%{
prime: prime,
mod_inverse: mod_inverse,
random: random,
max_size: max_size
})
)
Mix.shell().info("""
Configuration:
- prime: #{prime}
- mod_inverse: #{mod_inverse}
- random: #{random}
- max_size: #{max_size}
Code:
```
#{code}
```
""")
end
end
embed_template(:code_inline, """
OptimusHash.new(
prime: <%= @prime %>,
mod_inverse: <%= @mod_inverse %>,
random: <%= @random %>,
max_size: <%= @max_size %>
)
""")
end
|
lib/mix/tasks/optimushash.seed.ex
| 0.825519 | 0.571438 |
optimushash.seed.ex
|
starcoder
|
defmodule Bitcraft.Helpers do
@moduledoc """
Module for building extra helper functions.
"""
alias __MODULE__
@doc false
defmacro __using__(_opts) do
quote do
unquote(Helpers.build_segment_decoder())
end
end
@doc """
Helper function used internally for building `decode_segment/5` function.
"""
@spec build_segment_decoder :: term
def build_segment_decoder do
sign_opts = [:signed, :unsigned]
endian_opts = [:big, :little]
int_exprs = integer_exprs(sign_opts, endian_opts)
float_exprs = float_exprs(endian_opts)
bin_exprs = bin_exprs()
unicode_exprs = unicode_exprs(endian_opts)
array_exprs = array_exprs()
{dec_exprs, enc_exprs} =
[int_exprs, float_exprs, array_exprs, bin_exprs, unicode_exprs]
|> List.flatten()
|> Enum.unzip()
for expr <- dec_exprs ++ enc_exprs do
expr = Code.string_to_quoted!(expr)
quote do
unquote(expr)
end
end
end
## Internal Helpers
defp integer_exprs(sign_opts, endian_opts) do
for sign <- sign_opts, endian <- endian_opts do
dec = """
def decode_segment(bits, size, :integer, :#{sign}, :#{endian}) do
<<segment::#{sign}-#{endian}-integer-size(size), rest::bits>> = bits
{segment, rest}
end
"""
enc = """
def encode_segment(var, size, :integer, :#{sign}, :#{endian}) do
<<var::#{sign}-#{endian}-integer-size(size)>>
end
"""
{dec, enc}
end
end
defp float_exprs(endian_opts) do
for endian <- endian_opts do
dec = """
def decode_segment(bits, size, :float, _, :#{endian}) do
<<segment::#{endian}-float-size(size), rest::bits>> = bits
{segment, rest}
end
"""
enc = """
def encode_segment(var, size, :float, _, :#{endian}) do
<<var::#{endian}-float-size(size)>>
end
"""
{dec, enc}
end
end
defp bin_exprs do
for type <- [:bitstring, :bits, :binary, :bytes] do
dec = """
def decode_segment(bits, size, :#{type}, _, _) do
<<segment::#{type}-size(size), rest::bits>> = bits
{segment, rest}
end
"""
enc = """
def encode_segment(var, _, :#{type}, _, _) do
<<var::#{type}>>
end
"""
{dec, enc}
end
end
defp unicode_exprs(endian_opts) do
for type <- [:utf8, :utf16, :utf32], endian <- endian_opts do
dec =
if type == :utf8 do
"""
def decode_segment(bits, _, :utf8, _, :#{endian}) do
if is_integer(bits) do
<<segment::#{type}-#{endian}, rest::bits>> = bits
{segment, rest}
else
{:unicode.characters_to_binary(bits, :utf8), ""}
end
end
"""
else
"""
def decode_segment(bits, _, :#{type}, _, :#{endian}) do
if is_integer(bits) do
<<segment::#{type}-#{endian}, rest::bits>> = bits
{segment, rest}
else
{:unicode.characters_to_binary(bits, {:#{type}, :#{endian}}), ""}
end
end
"""
end
enc = """
def encode_segment(var, _, :#{type}, _, :#{endian}) do
if is_integer(var) do
<<var::#{type}-#{endian}>>
else
for << ch <- var >>, into: <<>>, do: <<ch::#{type}-#{endian}>>
end
end
"""
{dec, enc}
end
end
defp array_exprs do
dec = """
def decode_segment(bits, 0, %Bitcraft.BitBlock.Array{}, _sign, _endian) do
{[], bits}
end
def decode_segment(bits, sz, %Bitcraft.BitBlock.Array{type: t, element_size: esz}, sign, endian) do
{array, rest} =
Enum.reduce(1..(div(sz, esz)), {[], bits}, fn _, {lst, bin} ->
{value, bin} = Bitcraft.decode_segment(bin, esz, t, sign, endian)
{[value | lst], bin}
end)
{Enum.reverse(array), rest}
end
"""
enc = """
def encode_segment(array, _, %Bitcraft.BitBlock.Array{type: t, element_size: s}, sign, endian) do
for elem <- array, into: <<>> do
encode_segment(elem, s, t, sign, endian)
end
end
"""
{dec, enc}
end
end
|
lib/bitcraft/helpers.ex
| 0.637708 | 0.491944 |
helpers.ex
|
starcoder
|
defmodule Scientist.Experiment do
@moduledoc """
A behaviour module for creating and running experiments.
An experiment contains all information about how your control and candidate functions
operate, and how their observations are reported. Experiments include functionality
for determining when they should run and how they behave when exceptions are thrown.
The macros exposed by `Scientist` are a thin wrapper around the functions in this
module. If you would like to, you can use the corresponding functions to create
and run your experiment.
In addition to the required callbacks, you can also define custom defaults and
exception handling behaviour.
## Custom Defaults
`default_name/0` and `default_context/0` determine the default name and context,
respectively, of unconfigured experiments in your module.
## Custom Exception Handling
`raised/3` and `thrown/3` determine how your experiment will handle exceptions
during an operation specified by the user. They receive the experiment as well
as the operation name and exception. When left unspecified, exceptions thrown
during an operation will be unhandled by `Scientist`.
The following operations report exceptions:
* `:enabled`
* `:compare`
* `:clean`
* `:ignore`
* `:run_if`
"""
defstruct [
name: "#{__MODULE__}",
candidates: %{},
context: %{},
run_if_fn: nil,
before_run: nil,
result: nil,
clean: nil,
ignore: [],
comparator: &Kernel.==/2,
raise_on_mismatches: false,
module: Scientist.Default
]
@doc """
Returns `true` if the experiment should be run.
If a falsey value is returned, the candidate blocks of the experiment
will be ignored, only running the control.
"""
@callback enabled?() :: Boolean | nil
@doc """
Publish the result of an experiment.
"""
@callback publish(result :: %Scientist.Result{}) :: any
defmacro __using__(opts) do
raise_on_mismatches = Keyword.get(opts, :raise_on_mismatches, false)
quote do
@behaviour unquote(__MODULE__)
@doc """
Creates a new experiment.
"""
def new(name \\ default_name, opts \\ []) do
context = Keyword.get(opts, :context, %{})
should_raise =
Keyword.get(opts, :raise_on_mismatches, unquote(raise_on_mismatches))
unquote(__MODULE__).new(
name,
module: __MODULE__,
context: Map.merge(default_context, context),
raise_on_mismatches: should_raise
)
end
@doc """
Returns the default context for an experiment.
Any additional context passed to `new/2` will be merged with the default context.
"""
def default_context, do: %{}
@doc """
Returns the default name for an experiment.
"""
def default_name, do: "#{__MODULE__}"
@doc """
Called when an experiment run raises an error during an operation.
"""
def raised(experiment, operation, except), do: raise except
@doc """
Called when an experiment run throws an error during an operation.
"""
def thrown(_experiment, _operation, except), do: throw except
defoverridable [ default_context: 0, default_name: 0, raised: 3, thrown: 3 ]
end
end
@doc """
Creates an experiment.
Creates an experiment with the `name` and `opts`.
The following options are available:
* `:module` - The callback module to use, defaults to `Scientist.Default`.
* `:context` - A map of values to be stored in an observation, defaults to `%{}`.
* `:raise_on_mismatches` - If `true`, any mismatches in this experiment's observations
will raise a `Scientist.MismatchError`, defaults to `false`.
"""
def new(name \\ "#{__MODULE__}", opts \\ []) do
%__MODULE__{
name: name,
module: Keyword.get(opts, :module, Scientist.Default),
context: Keyword.get(opts, :context, %{}),
raise_on_mismatches: Keyword.get(opts, :raise_on_mismatches, false)
}
end
@doc """
Executes the given block, reporting exceptions.
Executes `block` and calls `thrown/3` or `raised/3` with the given reason if the block
throws or raises an exception.
"""
defmacro guarded(exp, operation, do: block) do
quote do
try do
unquote(block)
catch
except ->
unquote(exp).module.thrown(unquote(exp), unquote(operation), except)
nil
rescue
except ->
unquote(exp).module.raised(unquote(exp), unquote(operation), except)
nil
end
end
end
@doc """
Runs the experiment.
If `enabled?/0` or a configured run_if function return a falsey value, the experiment
will not be run and only the control will be executed.
Raises `Scientist.MissingControlError` if the experiment has no control.
Raises `Scientist.MismatchError` if the experiment has mismatched observations and is
configured with `raise_on_mismatched: true`.
"""
def run(exp, opts \\ [])
def run(exp = %Scientist.Experiment{candidates: %{"control" => c}}, opts) do
if should_run?(exp) do
!exp.before_run or exp.before_run.()
observations = exp.candidates
|> Enum.shuffle
|> Enum.map(&(eval_candidate(exp, &1)))
|> Enum.to_list
{[control], candidates} = Enum.partition(observations, fn o ->
o.name == "control"
end)
result = Scientist.Result.new(exp, control, candidates)
guarded exp, :publish, do: exp.module.publish(result)
if exp.raise_on_mismatches and Scientist.Result.mismatched?(result) do
raise Scientist.MismatchError, result: result
end
cond do
Keyword.get(opts, :result, false) -> result
Scientist.Observation.except?(control) -> Scientist.Observation.except!(control)
true -> control.value
end
else
c.()
end
end
def run(ex, _), do: raise Scientist.MissingControlError, experiment: ex
@doc """
Returns true if a mismatch should be ignored.
Runs each of the configured ignore functions in turn, ignoring a mismatch when
any of them return `true`.
Reports an `:ignore` error to the callback module if an exception is caught.
"""
def should_ignore_mismatch?(exp, control, candidate) do
ignores = exp.ignore |> Enum.reverse
Enum.any?(ignores, fn i ->
guarded exp, :ignore, do: i.(control.value, candidate.value)
end)
end
defp eval_candidate(experiment, {name, candidate}) do
Scientist.Observation.new(experiment, name, candidate)
end
@doc """
Returns true if the given observations match.
This uses the experiment's compare function, if any. If none is configured,
`==/2` will be used.
Reports a `:compare` error to the callback module if an exception is caught.
"""
def observations_match?(experiment, control, candidate) do
guarded experiment, :compare do
Scientist.Observation.equivalent?(control, candidate, experiment.comparator)
end
end
@doc """
Returns true if the experiment should run.
Reports an `:enabled` error to the callback module if an exception is caught.
"""
def should_run?(experiment = %Scientist.Experiment{candidates: obs, module: module}) do
guarded experiment, :enabled do
Enum.count(obs) > 1 and module.enabled? and run_if_allows?(experiment)
end
end
@doc """
Returns the value of the experiment's run_if function.
If the experiment has no run_if function configured, `true` is returned.
Reports a `:run_if` error to the callback module if an exception is caught.
"""
def run_if_allows?(experiment = %Scientist.Experiment{run_if_fn: f}) do
guarded experiment, :run_if, do: !f or f.()
end
@doc """
Adds `fun` to the experiment as the control.
Raises `Scientist.DuplicateError` if the experiment already has a control.
"""
def add_control(exp = %Scientist.Experiment{candidates: %{"control" => _}}, _) do
raise Scientist.DuplicateError, experiment: exp, name: "control"
end
def add_control(exp, fun), do: add_candidate(exp, "control", fun)
@doc """
Adds `fun` to the experiment as a candidate.
Raises `Scientist.DuplicateError` if the experiment already has a candidate with `name`.
"""
def add_candidate(exp, name \\ "candidate", fun) do
if Map.has_key?(exp.candidates, name) do
raise Scientist.DuplicateError, experiment: exp, name: name
else
update_in(exp.candidates, &(Map.put(&1, name, fun)))
end
end
@doc """
Adds a function to the experiment that is used to compare observations.
If an exception is thrown in `compare_fn`, it will be reported through the `thrown` and `raised`
callbacks as operation `:compare`.
"""
def compare_with(exp, compare_fn) do
put_in(exp.comparator, compare_fn)
end
@doc """
Adds an ignore function to the experiment.
The experiment will ignore a mismatch whenever this function returns true. There is no limit
on the number of ignore functions that can be configured.
If an exception is thrown in `ignore_fn`, it will be reported through the `thrown` and `raised`
callbacks as operation `:ignore`.
"""
def ignore(exp, ignore_fn) do
put_in(exp.ignore, [ignore_fn | exp.ignore])
end
@doc """
Adds a function to the experiment that is used to clean observed values.
When handling observations, the result of `cleaner` will be available under `cleaned_value`.
If an exception is thrown in `cleaner`, it will be reported through the `thrown` and `raised`
callbacks as operation `:clean`.
"""
def clean_with(exp, cleaner) do
put_in(exp.clean, cleaner)
end
@doc """
Adds a function to the experiment that is used to determine if it should run.
If this function returns `false`, the experiment will not be run.
If an exception is thrown in `run_if_fn`, it will be reported through the `thrown` and `raised`
callbacks as operation `:run_if`.
"""
def set_run_if(exp, run_if_fn) do
put_in(exp.run_if_fn, run_if_fn)
end
@doc """
Adds a function to the experiment that should only execute when the experiment is run.
"""
def set_before_run(exp, before_run) do
put_in(exp.before_run, before_run)
end
end
|
lib/scientist/experiment.ex
| 0.893228 | 0.694251 |
experiment.ex
|
starcoder
|
defmodule ExPlasma.Builder do
@moduledoc """
Helper module to make crafting plasma transactions much simpler.
"""
alias ExPlasma.Output
alias ExPlasma.Transaction
alias ExPlasma.Transaction.Signed
@type tx_opts :: [
inputs: Transaction.outputs(),
outputs: Transaction.outputs(),
tx_data: any(),
metadata: Transaction.metadata()
]
@type input_opts :: [
position: pos_integer(),
blknum: non_neg_integer(),
txindex: non_neg_integer(),
oindex: non_neg_integer()
]
@doc """
Create a new Transaction
## Example
# Empty payment v1 transaction
iex> new(ExPlasma.payment_v1())
%ExPlasma.Transaction{tx_type: 1, inputs: [], outputs: [], metadata: <<0::256>>}
# New payment v1 transaction with metadata
iex> new(ExPlasma.payment_v1(), metadata: <<1::256>>)
%ExPlasma.Transaction{tx_type: 1, inputs: [], outputs: [], metadata: <<1::256>>}
"""
@spec new(ExPlasma.transaction_type(), tx_opts()) :: Transaction.t()
def new(tx_type, opts \\ []), do: struct(%Transaction{tx_type: tx_type}, opts)
@doc """
Decorates the transaction with a nonce when given valid params for the type.
## Example
iex> tx = new(ExPlasma.fee())
iex> with_nonce(tx, %{blknum: 1000, token: <<0::160>>})
{:ok, %ExPlasma.Transaction{
inputs: [],
metadata: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0>>,
nonce: <<61, 119, 206, 68, 25, 203, 29, 23, 147, 224, 136, 32, 198, 128, 177, 74,
227, 250, 194, 173, 146, 182, 251, 152, 123, 172, 26, 83, 175, 194, 213, 238>>,
outputs: [],
sigs: [],
tx_data: 0,
tx_type: 3,
witnesses: []
}}
"""
@spec with_nonce(Transaction.t(), map()) :: {:ok, Transaction.t()} | {:error, atom()}
defdelegate with_nonce(transaction, params), to: Transaction
@spec with_nonce!(Transaction.t(), map()) :: Transaction.t() | no_return()
def with_nonce!(transaction, params) do
{:ok, transaction} = Transaction.with_nonce(transaction, params)
transaction
end
@doc """
Adds an input to the Transaction
## Example
iex> ExPlasma.payment_v1()
...> |> new()
...> |> add_input(blknum: 1, txindex: 0, oindex: 0)
...> |> add_input(blknum: 2, txindex: 0, oindex: 0)
%ExPlasma.Transaction{
tx_type: 1,
inputs: [
%ExPlasma.Output{output_id: %{blknum: 1, txindex: 0, oindex: 0}},
%ExPlasma.Output{output_id: %{blknum: 2, txindex: 0, oindex: 0}},
]
}
"""
@spec add_input(Transaction.t(), input_opts()) :: Transaction.t()
def add_input(txn, opts) do
input = %Output{output_id: Enum.into(opts, %{})}
%{txn | inputs: txn.inputs ++ [input]}
end
@doc """
Adds an output to the Transaction
## Example
iex> ExPlasma.payment_v1()
...> |> new()
...> |> add_output(output_type: 1, output_data: %{output_guard: <<1::160>>, token: <<0::160>>, amount: 1})
...> |> add_output(output_guard: <<1::160>>, token: <<0::160>>, amount: 2)
%ExPlasma.Transaction{
tx_type: 1,
outputs: [
%ExPlasma.Output{output_type: 1, output_data: %{output_guard: <<1::160>>, token: <<0::160>>, amount: 1}},
%ExPlasma.Output{output_type: 1, output_data: %{output_guard: <<1::160>>, token: <<0::160>>, amount: 2}},
]
}
"""
@spec add_output(Transaction.t(), list()) :: Transaction.t()
def add_output(txn, output_type: type, output_data: data) do
output = %Output{output_type: type, output_data: data}
%{txn | outputs: txn.outputs ++ [output]}
end
def add_output(txn, opts) when is_list(opts) do
output = %Output{output_type: 1, output_data: Enum.into(opts, %{})}
%{txn | outputs: txn.outputs ++ [output]}
end
@doc """
Sign the inputs of the transaction with the given keys in the corresponding order.
Returns a tuple {:ok, transaction} if success or {:error, atom} otherwise.
## Example
iex> key = "<KEY>"
...> ExPlasma.payment_v1()
...> |> new()
...> |> sign([key])
{:ok, %ExPlasma.Transaction{
inputs: [],
metadata: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
outputs: [],
tx_data: 0,
sigs: [
<<129, 213, 32, 15, 183, 218, 255, 22, 82, 95, 22, 86, 103, 227, 92, 109, 9,
89, 7, 142, 235, 107, 203, 29, 20, 231, 91, 168, 255, 119, 204, 239, 44,
125, 76, 109, 200, 196, 204, 230, 224, 241, 84, 75, 9, 3, 160, 177, 37,
181, 174, 98, 51, 15, 136, 235, 47, 96, 15, 209, 45, 85, 153, 2, 28>>
],
tx_type: 1
}}
"""
defdelegate sign(txn, sigs), to: Transaction
@spec sign!(Transaction.t(), Signed.sigs()) :: Transaction.t() | no_return()
def sign!(txn, sigs) do
{:ok, signed} = Transaction.sign(txn, sigs)
signed
end
end
|
lib/ex_plasma/builder.ex
| 0.875202 | 0.418162 |
builder.ex
|
starcoder
|
defmodule Donut.GraphQL.Result do
@moduledoc """
Create a result type for GraphQL queries.
These result types standardize the result interfaces and error behaviour
that the client with interact with.
## Example
\# Declare a result with only one custom type
result :new_type, [:foo]
\# Declare a result with multiple custom types
result :new_type, [:foo, :bar], fn
%Foo{}, _ -> :foo
%Bar{}, _ -> :bar
end
\# Setting up your query
field :foo, type: result(:new_type) do
\# ...
end
"""
@type resolver :: (any, Absinthe.Resolution.t -> atom | nil)
@doc """
Get the name for a result type.
"""
@spec result(atom) :: atom
def result(name), do: String.to_atom(to_string(name) <> "_result")
@doc """
Create a result type that can represent a custom type.
See `result/3` for more details.
"""
@spec result(atom, [atom]) :: Macro.t
defmacro result(name, types = [type]) do
quote do
result(unquote(name), unquote(types), fn _, _ -> unquote(type) end)
end
end
defmacro result(name, []) do
quote do
result(unquote(name), [], nil)
end
end
defmacro result(name, types) do
quote do
result(unquote(name), unquote(types), fn value, env ->
Donut.GraphQL.Result.type_resolver(value, env, unquote(types))
end)
end
end
@doc """
Create a result type that can represent a custom type
The `name` field should be the name used to refer to this new result type.
The `types` field should be list of custom types to associate with this
result type.
The `resolver` is a function that should return the type for the given
object. For more details see `Absinthe.Schema.Notation.resolve_type/1`.
"""
@spec result(atom, [atom], resolver) :: Macro.t
defmacro result(name, types, resolver) do
quote do
union unquote(result(name)) do
types unquote(types ++ [:error, :internal_error])
resolve_type &Donut.GraphQL.Result.get_type(&1, &2, unquote(resolver))
end
end
end
@doc false
@spec type_resolver(any, Absinthe.Resolution.t, [atom]) :: atom | nil
def type_resolver(value, %{ schema: schema }, types) do
Enum.find_value(types, fn type ->
case Absinthe.Schema.lookup_type(schema, type) do
%{ is_type_of: resolves } when is_function(resolves) ->
if resolves.(value) do
type
else
false
end
_ -> false
end
end)
end
@doc false
@spec get_type(any, Absinthe.Resolution.t, resolver) :: atom | nil
def get_type(%Donut.GraphQL.Result.Error{}, _, _), do: :error
def get_type(%Donut.GraphQL.Result.InternalError{}, _, _), do: :internal_error
def get_type(object, env, resolver) when is_function(resolver), do: resolver.(object, env)
end
|
apps/donut_graphql/lib/donut.graphql/result.ex
| 0.877608 | 0.609728 |
result.ex
|
starcoder
|
defmodule Example_Macro do
defmodule Example do
defmacro macro_inspect(value) do
IO.inspect(value)
value
end
def fun_inspect(value) do
IO.inspect(value)
value
end
end
defmodule MySigils do
defmacro sigil_x(term, [?r]) do
quote do
unquote(term) |> String.reverse()
end
end
defmacro sigil_x(term, _modifiers) do
term
end
defmacro sigil_X(term, [?r]) do
quote do
unquote(term) |> String.reverse()
end
end
defmacro sigil_X(term, _modifiers) do
term
end
end
def start do
Macro.camelize("foo/bar")
end
def start2 do
Macro.classify_atom(:foo)
end
def start3 do
Macro.decompose_call(quote(do: Elixir.M.foo(1, 2, 3)))
end
def start4 do
Macro.escape({:a, :b, :c})
end
def start5 do
value = {:a, :b, :c}
quote do: unquote(value)
end
defmacro defmodule_with_length(name, do: block) do
length = length(Atom.to_charlist(name))
quote do
defmodule unquote(name) do
def name_length, do: unquote(length)
unquote(block)
end
end
end
defmacro defmodule_with_length(name, do: block) do
expanded = Macro.expand(name, __CALLER__)
length = length(Atom.to_charlist(expanded))
quote do
defmodule unquote(name) do
def name_length, do: unquote(length)
unquote(block)
end
end
end
def start6 do
Macro.generate_arguments(2, __MODULE__)
end
def start7 do
[var1, var2] = Macro.generate_unique_arguments(2, __MODULE__)
{:arg1, [counter: c1], __MODULE__} = var1
{:arg2, [counter: c2], __MODULE__} = var2
is_integer(c1) and is_integer(c2)
end
def start8 do
Macro.inspect_atom(:literal, Foo.Bar)
end
def start9 do
Macro.inspect_atom(:remote_call, :Foo)
end
def start10 do
Macro.operator?(:++, 2)
end
def start11 do
ast = quote do: foo(1, "abc")
Enum.map(Macro.postwalker(ast), & &1)
end
def start12 do
ast = quote do: 5 + 3 * 7
new_ast = Macro.prewalk(ast, fn
{:+, meta, children} -> {:*, meta, children}
{:*, meta, children} -> {:+, meta, children}
other -> other
end)
Code.eval_quoted(ast)
Code.eval_quoted(new_ast)
end
def start13 do
ast = quote do: foo(1, "abc")
Enum.map(Macro.prewalker(ast), & &1)
end
def start14 do
Macro.to_string(quote(do: 1 + 2), fn
1, _string -> "one"
2, _string -> "two"
_ast, string -> string
end)
end
def start15 do
{:foo, [counter: c], __MODULE__} = Macro.unique_var(:foo, __MODULE__)
is_integer(c)
end
def start16 do
Macro.unpipe(quote do: 100 |> div(5) |> div(2))
end
def start17 do
Macro.validate({:two_element, :tuple})
end
def start18 do
Macro.var(:foo, __MODULE__)
end
end
|
lib/beam/macro/macro.ex
| 0.62681 | 0.627737 |
macro.ex
|
starcoder
|
defmodule Retex.Node.PNode do
@moduledoc """
Production node. This is like a production node in Rete algorithm. It is activated if all
the conditions in a rule are matching and contains the action that can be executed as consequence.
"""
defstruct action: nil, id: nil, raw_action: nil, bindings: %{}, filters: []
def new(action, filters \\ []) do
item = %__MODULE__{action: action, raw_action: action, filters: filters}
%{item | id: Retex.hash(item)}
end
defimpl Retex.Protocol.Activation do
def activate(
neighbor = %{filters: filters},
%Retex{tokens: tokens, graph: graph, activations: activations} = rete,
_wme,
_bindings,
_tokens
) do
[parent] =
parents =
Graph.in_neighbors(graph, neighbor)
|> Enum.filter(fn node ->
Map.get(activations, node.id)
end)
tokens = Map.get(tokens, parent.id)
if Enum.all?(parents, &Map.get(activations, &1.id)) do
productions =
tokens
|> Enum.map(fn token -> Retex.replace_bindings(neighbor, token.bindings) end)
|> List.flatten()
|> Enum.uniq()
|> apply_filters(filters)
new_rete = %{
rete
| agenda: ([productions] ++ rete.agenda) |> List.flatten() |> Enum.uniq()
}
Retex.stop_traversal(new_rete, %{})
else
Retex.stop_traversal(rete, %{})
end
end
def active?(_, _) do
raise "Not implemented"
end
def apply_filters(nodes, filters) do
Enum.filter(nodes, fn node ->
Enum.reduce_while(filters, true, fn filter, _ ->
if test_pass?(node, filter), do: {:cont, true}, else: {:halt, false}
end)
end)
end
def test_pass?(%Retex.Node.PNode{bindings: bindings}, %Retex.Fact.Filter{
predicate: predicate,
value: value,
variable: variable
}) do
case Map.get(bindings, variable, :_undefined) do
:_undefined -> true
current_value -> apply(Kernel, predicate, [current_value, value])
end
end
end
defimpl Inspect do
def inspect(node, _opts) do
"PNode(#{inspect(node.action)})"
end
end
end
|
lib/nodes/p_node.ex
| 0.726717 | 0.534005 |
p_node.ex
|
starcoder
|
defmodule Rihanna.Migration do
@max_32_bit_signed_integer (:math.pow(2, 31) |> round) - 1
@moduledoc """
A set of tools for creating the Rihanna jobs table.
Rihanna stores jobs in a table in your database. The default table name is
"rihanna_jobs". The name is configurable by either passing it as an argument
to the functions below or setting `:jobs_table_name` in Rihanna's config.
#### Using Ecto
The easiest way to create the database is with Ecto.
Run `mix ecto.gen.migration create_rihanna_jobs` and make your migration file
look like this:
```elixir
defmodule MyApp.CreateRihannaJobs do
use Rihanna.Migration
end
```
Now you can run `mix ecto.migrate`.
#### Without Ecto
Ecto is not required to run Rihanna. If you want to create the table yourself,
without Ecto, take a look at either `statements/0` or `sql/0`.
"""
defmacro __using__(opts) do
table_name = Keyword.get(opts, :table_name, Rihanna.Config.jobs_table_name()) |> to_string
quote do
use Ecto.Migration
def up do
Enum.each(Rihanna.Migration.statements(unquote(table_name)), fn statement ->
execute(statement)
end)
end
def down do
Enum.each(Rihanna.Migration.drop_statements(unquote(table_name)), fn statement ->
execute(statement)
end)
end
end
end
@doc """
Returns a list of SQL statements that will drop the Rihanna jobs table if
executed sequentially.
By default it takes the name of the table from the application config.
You may optionally supply a table name as an argument if you want to override
this.
## Examples
> Rihanna.Migration.drop_statements
[...]
> Rihanna.Migration.drop_statements("my_alternative_table_name")
[...]
"""
def drop_statements(table_name \\ Rihanna.Config.jobs_table_name()) do
[
"""
DROP TABLE IF EXISTS "#{table_name}";
""",
"""
DROP SEQUENCE IF EXISTS #{table_name}_id_seq;
"""
]
end
@doc """
Returns a list of SQL statements that will create the Rihanna jobs table if
executed sequentially.
By default it takes the name of the table from the application config.
You may optionally supply a table name as an argument if you want to override
this.
## Examples
> Rihanna.Migration.statements
[...]
> Rihanna.Migration.statements("my_alternative_table_name")
[...]
"""
@spec statements() :: list[String.t()]
@spec statements(String.t() | atom) :: list[String.t()]
def statements(table_name \\ Rihanna.Config.jobs_table_name())
when is_binary(table_name) or is_atom(table_name) do
[
"""
CREATE TABLE #{table_name} (
id int NOT NULL,
term bytea NOT NULL,
priority integer NOT NULL DEFAULT 50,
enqueued_at timestamp with time zone NOT NULL,
due_at timestamp with time zone,
failed_at timestamp with time zone,
fail_reason text,
rihanna_internal_meta jsonb NOT NULL DEFAULT '{}',
CONSTRAINT failed_at_required_fail_reason CHECK((failed_at IS NOT NULL AND fail_reason IS NOT NULL) OR (failed_at IS NULL and fail_reason IS NULL))
);
""",
"""
COMMENT ON CONSTRAINT failed_at_required_fail_reason ON #{table_name} IS 'When setting failed_at you must also set a fail_reason';
""",
"""
CREATE SEQUENCE #{table_name}_id_seq
START WITH 1
INCREMENT BY 1
MINVALUE 1
MAXVALUE #{@max_32_bit_signed_integer}
CACHE 1
CYCLE;
""",
"""
ALTER SEQUENCE #{table_name}_id_seq OWNED BY #{table_name}.id;
""",
"""
ALTER TABLE ONLY #{table_name} ALTER COLUMN id SET DEFAULT nextval('#{table_name}_id_seq'::regclass);
""",
"""
ALTER TABLE ONLY #{table_name}
ADD CONSTRAINT #{table_name}_pkey PRIMARY KEY (id);
""",
"""
CREATE INDEX #{table_name}_priority_enqueued_at_id ON #{table_name} (priority ASC, enqueued_at ASC, id ASC);
"""
]
end
@doc """
Returns a string of semi-colon-terminated SQL statements that you can execute
directly to create the Rihanna jobs table.
"""
@spec sql(String.t() | atom) :: String.t()
def sql(table_name \\ Rihanna.Config.jobs_table_name()) do
Enum.join(statements(table_name), "\n")
end
@migrate_help_message """
The Rihanna jobs table must be created.
Rihanna stores jobs in a table in your database.
The default table name is "rihanna_jobs".
The easiest way to create the database is with Ecto.
Run `mix ecto.gen.migration create_rihanna_jobs` and make your migration look
like this:
defmodule MyApp.CreateRihannaJobs do
use Rihanna.Migration
end
Now you can run `mix ecto.migrate`.
"""
@doc false
# Check that the rihanna jobs table exists
def check_table!(pg) do
case Postgrex.query(
pg,
"""
SELECT EXISTS (
SELECT 1
FROM information_schema.tables
WHERE table_name = $1
);
""",
[Rihanna.Job.table()]
) do
{:ok, %{rows: [[true]]}} ->
:ok
{:ok, %{rows: [[false]]}} ->
raise_jobs_table_missing!()
end
end
@doc false
def raise_jobs_table_missing!() do
raise ArgumentError, @migrate_help_message
end
@upgrade_help_message """
The Rihanna jobs table must be upgraded.
The easiest way to upgrade the database is with Ecto.
Run `mix ecto.gen.migration upgrade_rihanna_jobs` and make your migration look
like this:
defmodule MyApp.UpgradeRihannaJobs do
use Rihanna.Migration.Upgrade
end
Now you can run `mix ecto.migrate`.
"""
@doc false
# Check that the required upgrades have been added
def check_upgrade_not_required!(pg) do
required_upgrade_columns = ["due_at", "rihanna_internal_meta", "priority"]
table_name = Rihanna.Job.table()
case Postgrex.query(
pg,
"""
SELECT column_name
FROM information_schema.columns
WHERE table_name = $1 and column_name = ANY($2);
""",
# Migration adds due_at, test if this is present
[table_name, required_upgrade_columns]
) do
{:ok, %{rows: rows}} when length(rows) < length(required_upgrade_columns) ->
raise_upgrade_required!()
{:ok, %{rows: rows}} when length(rows) == length(required_upgrade_columns) ->
:ok
end
required_indexes = ["#{table_name}_pkey", "#{table_name}_priority_enqueued_at_id"]
case Postgrex.query(
pg,
"""
SELECT
DISTINCT i.relname AS index_name
FROM
pg_class t,
pg_class i,
pg_index ix,
pg_attribute a
WHERE
t.oid = ix.indrelid
AND i.oid = ix.indexrelid
AND a.attrelid = t.oid
AND a.attnum = ANY(ix.indkey)
AND t.relkind = 'r'
AND t.relname = $1
AND i.relname = ANY($2);
""",
[table_name, required_indexes]
) do
{:ok, %{rows: rows}} when length(rows) < length(required_indexes) ->
raise_upgrade_required!()
{:ok, %{rows: rows}} when length(rows) == length(required_indexes) ->
:ok
end
end
@doc false
def raise_upgrade_required!() do
raise ArgumentError, @upgrade_help_message
end
end
|
lib/rihanna/migration.ex
| 0.88984 | 0.725083 |
migration.ex
|
starcoder
|
defmodule Stats do
require Integer
@doc """
returns the mean of a list of numbers
## Examples
iex> Stats.mean([])
0.0
iex> Stats.mean([2])
2.0
iex> Stats.mean([7, 2, 9])
6.0
iex> Stats.mean([7, 2, 9, 5])
5.75
"""
@spec mean(list) :: float
def mean([]) do
0.0
end
def mean([num]) do
num * 1.0
end
def mean(list) when Kernel.length(list) > 1 do
n = Kernel.length(list) * 1.0
sum = List.foldl(list, 0, fn (x, acc) -> x + acc end)
sum / n
end
@doc """
returns the median of a list of numbers
In practice, the median may be calculated as follows:
if there are n numeric data points, then by ordering the data values
(either non-decreasingly or non-increasingly),
(a) the (n+1)/2-th data point is the median if n is odd, and
(b) the midpoint of the (n -1)-th and the (n+1)-th data points is the median if n is even.
## Examples
iex> Stats.median([26.1, 25.6, 25.7, 25.2, 25.0])
25.6
iex> Stats.median([24.7, 25.6, 25.0, 26.1, 25.7, 25.2])
25.4
iex> Stats.median([10.0])
10.0
iex> Stats.median([1, 3, 4, 9, 2, 6, 5, 8, 7])
5
iex> Stats.median([1, 3, 4, 9, 2, 10, 6, 5, 8, 7])
5.5
"""
def median([]) do
0.0
end
def median(list) when Kernel.length(list) > 0 do
n = Kernel.length(list)
nlist = Enum.sort(list)
cond do
Integer.is_odd(n) ->
elem_at(nlist, Kernel.div((n - 1), 2))
true ->
(elem_at(nlist, Kernel.div(n - 1, 2)) + elem_at(nlist, Kernel.div(n + 1, 2))) / 2.0
end
end
@doc """
returns the standard deviation of a list of numbers
## Examples
iex> Stats.stdv([])
:error
iex> Stats.stdv([10])
:error
iex> Stats.stdv([7, 2, 9])
3.60555127546398912486
iex> Stats.stdv([7, 2, 9, 5])
2.9860788111948193
"""
def stdv([]) do
:error
end
def stdv([num]) do
:error
end
def stdv(list) do
n = Kernel.length(list) * 1.0
[sum, sum_squ] = List.foldl(list, [0, 0], fn (x, [s, sq]) -> [x + s, x * x + sq] end)
:math.sqrt (n * sum_squ - sum * sum ) / (n * (n - 1))
end
# private helper functions
defp elem_at([], _) do
:error
end
defp elem_at(list, n) when n > Kernel.length(list) do
:error
end
defp elem_at(_, n) when n < 0 do
:error
end
defp elem_at(list, 0) when Kernel.length(list) > 0 do
List.first(list)
end
defp elem_at([car | cdr], n) do
if n == 0 do
car
else
elem_at(cdr, n - 1)
end
end
end
|
higher_order/lib/stats.ex
| 0.856902 | 0.549701 |
stats.ex
|
starcoder
|
defmodule Logrex do
@moduledoc """
An Elixir package for more easily adding Logger metadata and formatting the
console output so it's easier for humans to parse.
It wraps Elixir's Logger module to let you write code like this:
```
> use Logrex
> name = "Matt"
> user_info = %{login_count: 1}
> Logrex.info "New login", [name, user_info.login_count]
```
To display this:
```
> INFO 20:56:40 New login user=Matt login_count=1
```
"""
@doc """
Custom Logger format function, which receives the Logger arguments and
returns a string with formatted key/value metadata pairs broken out to the
right of the message.
"""
defdelegate format(level, message, timestamp, metadata), to: Logrex.Formatter
defmacro __using__(_opts) do
quote do
require Logger
require Logrex
end
end
@doc """
Log a debug message with dynamic metadata.
"""
defmacro debug(chardata_or_fun, metadata \\ []) do
quote do
Logger.debug(unquote(chardata_or_fun), unquote(build_metadata(metadata)))
end
end
@doc """
Log an error message with dynamic metadata.
"""
defmacro error(chardata_or_fun, metadata \\ []) do
quote do
Logger.error(unquote(chardata_or_fun), unquote(build_metadata(metadata)))
end
end
@doc """
Log an info message with dynamic metadata.
"""
defmacro info(chardata_or_fun, metadata \\ []) do
quote do
Logger.info(unquote(chardata_or_fun), unquote(build_metadata(metadata)))
end
end
@doc """
Log a warning message with dynamic metadata.
"""
defmacro warn(chardata_or_fun, metadata \\ []) do
quote do
Logger.warn(unquote(chardata_or_fun), unquote(build_metadata(metadata)))
end
end
@doc """
Logs a metadata-only message.
It is a shorthand and more explicit way of using one of the level
functions with an empty string as the first parameter. By default, all
`meta/1` calls are logged as debug, but that can be changed via the
`:meta_level` config.
## Examples
Logrex.meta foo: bar
Logrex.meta [var1, var2]
"""
defmacro meta(metadata \\ []) do
level = Application.get_env(:logrex, :meta_level, :debug)
case level do
:debug ->
quote do
Logger.debug("", unquote(build_metadata(metadata)))
end
:error ->
quote do
Logger.error("", unquote(build_metadata(metadata)))
end
:info ->
quote do
Logger.info("", unquote(build_metadata(metadata)))
end
:warn ->
quote do
Logger.warn("", unquote(build_metadata(metadata)))
end
end
end
defp build_metadata(metadata) do
metadata
|> List.wrap()
|> Enum.map(fn
{var, _, _} when is_atom(var) ->
quote do: {unquote(var), var!(unquote(Macro.var(var, nil)))}
{_, _} = var ->
var
ast ->
build_path(ast)
end)
|> Enum.reverse()
end
defp build_path(ast) do
{_, vars} =
Macro.prewalk(ast, [], fn
{var, _, x} = node, acc when x in [nil] ->
{node, [var | acc]}
node, acc when node in [:get, Access] ->
{node, acc}
node, acc when is_atom(node) or is_binary(node) ->
{node, [node | acc]}
node, acc ->
{node, acc}
end)
[meta_key | _tail] = vars
[var | key_path] = Enum.reverse(vars)
meta_key =
case meta_key do
k when is_binary(k) -> :erlang.binary_to_atom(meta_key, :utf8)
k -> k
end
quote do
{unquote(meta_key),
get_in(Map.delete(var!(unquote(Macro.var(var, nil))), :__struct__), unquote(key_path))}
end
end
end
|
lib/logrex.ex
| 0.748168 | 0.791297 |
logrex.ex
|
starcoder
|
defmodule Exkii.Text do
@moduledoc """
`Exkii.Text` module helps to transform a string
into a ASCII art string
"""
# Single letter ascii art with
@width 9
@doc """
Returns a string with each alphabet letter.
## Examples
iex> Exkii.Text.alphabet
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"""
def alphabet do
?A..?Z |> Enum.to_list |> List.to_string
end
@doc """
Read the template file to get the string used to create the ASCII art.
## Examples
iex> Exkii.Text.read_file
[" _______ ______ _______ ______ _______ ....", "......"", "...."]
"""
def read_file do
{ _ , content } = Path.expand('./lib/txt/ascii.txt') |> Path.absname |> File.read
content |> String.split("\n", trim: true)
end
@doc """
Prints the `text` ascii art.
## Parameters
- text: String to be represented as ASCII art.
## Examples
iex> Exkii.Text.print "Hello"
_______ _ _ _______
|\ /|( ____ \( \ ( \ ( ___ )
| ) ( || ( \/| ( | ( | ( ) |
| (___) || (__ | | | | | | | |
| ___ || __) | | | | | | | |
| ( ) || ( | | | | | | | |
| ) ( || (____/\| (____/\| (____/\| (___) |
|/ \|(_______/(_______/(_______/(_______)
"""
def print(text) do
banner(text) |> IO.puts
end
@doc """
Create an ascii art for the `text`
Returns ascii art string.
## Parameters
- text: String to be represented as ASCII art.
## Examples
iex> Exkii.Text.banner "Hello"
"\n _______ _ _ _______ \n \\ /|( ____ \\( \\ ( \\ ( ___ )\n ...."
"""
def banner(text) do
limit = String.length(text)
str = text |> String.upcase |> String.graphemes
get_text(str, 0, limit, "")
end
@doc """
Iterates and concatenate the `str` to obtain an ascii art string.
This function iterates each file of the file and concatenates
until the row is equal to or greater than the `ascii.txt` rows.
Each `\n` represents a new concatenated column.
Returns ascii art string.
## Parameters
- str: Array that cotains each string character to be represented as ASCII art.
- row: Number that represents the ASCII art row to obtain. can't be greater than `ascii.txt` rows.
- limit: The string length
- acc: String which will be concatenated to each row result
## Examples
iex> Exkii.Text.get_text(["H", "E", "L", "L", "O"], 0, 5, "")
"\n _______ _ _ _______ \n \\ /|( ____ \\( \\ ( \\ ( ___ )\n ...."
"""
def get_text(_, row, _, acc) when row >= 8, do: acc
def get_text(str, row, limit, acc) do
line = get_row(str, 0, row, limit, "")
get_text(str, row + 1, limit, acc <> "\n" <> line)
end
@doc """
Iterates and concatenate the `str` to obtain a row for the ascii art.
:index param can't be greater than str length.
This function iterates each string character, gets the respective row portion
and concatenate to the result.
until the index is equal to or greater than str length.
Returns ascii art string row.
## Parameters
- text: Array that cotains each string character to be represented as ASCII art.
- index: Number that represents the index of a character. can't be greater than str length.
- row: Number that represents the ASCII art row to obtain. can't be greater than `ascii.txt` rows.
- limit: The string length
- acc: String which will be concatenated to each column result
## Examples
iex> Exkii.Text.get_row(["H", "E", "L", "L", "O"], 0, 0, 5, "")
" _______ _ _ _______ "
"""
def get_row(_, index, _, limit, acc) when index >= limit, do: acc
def get_row(text, index, row, limit, acc) do
char = Enum.at(text, index)
{ position, _ } = :binary.match(Exkii.Text.alphabet(), char)
line = get_column(position, row)
get_row(text, index + 1, row, limit, acc <> line)
end
@doc """
Gets a portion of the line that corresponds to the given character
Returns ascii art character portion.
## Parameters
- index: Number that represents the index of a character. can't be greater than str length.
- row: Number that represents the ASCII art row to obtain. can't be greater than `ascii.txt` rows.
## Examples
iex> Exkii.Text.get_column(8, 0)
"_________"
"""
def get_column(_, _, row) when row >= 8, do: ""
def get_column(index, row) do
line = Enum.at(read_file(), row)
start_point = index * @width
end_point = start_point + @width
String.slice(line, start_point..end_point-1)
end
end
|
lib/exkii/text.ex
| 0.918667 | 0.49048 |
text.ex
|
starcoder
|
defmodule Bolt.Sips.Internals.BoltProtocolV3 do
alias Bolt.Sips.Internals.BoltProtocol
alias Bolt.Sips.Internals.BoltProtocolHelper
alias Bolt.Sips.Internals.Error
@doc """
Implementation of Bolt's HELLO. It initialises the connection.
Expects a transport module (i.e. `gen_tcp`) and a `Port`. Accepts
authorisation params in the form of {username, password}.
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Examples
iex> Bolt.Sips.Internals.BoltProtocolV3.hello(:gen_tcp, port, 3, {}, [])
{:ok, info}
iex> Bolt.Sips.Internals.BoltProtocolV3.hello(:gen_tcp, port, 3, {"username", "password"}, [])
{:ok, info}
"""
@spec hello(atom(), port(), integer(), tuple(), Keyword.t()) ::
{:ok, any()} | {:error, Bolt.Sips.Internals.Error.t()}
def hello(transport, port, bolt_version, auth, options \\ [recv_timeout: 15_000]) do
BoltProtocolHelper.send_message(transport, port, bolt_version, {:hello, [auth]})
case BoltProtocolHelper.receive_data(transport, port, bolt_version, options) do
{:success, info} ->
{:ok, info}
{:failure, response} ->
{:error, Error.exception(response, port, :hello)}
other ->
{:error, Error.exception(other, port, :hello)}
end
end
@doc """
Implementation of Bolt's RUN. It closes the connection.
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Examples
iex> Bolt.Sips.Internals.BoltProtocolV3.goodbye(:gen_tcp, port, 3)
:ok
iex> Bolt.Sips.Internals.BoltProtocolV3.goodbye(:gen_tcp, port, 3)
:ok
"""
def goodbye(transport, port, bolt_version) do
BoltProtocolHelper.send_message(transport, port, bolt_version, {:goodbye, []})
try do
Port.close(port)
:ok
rescue
ArgumentError -> Error.exception("Can't close port", port, :goodbye)
end
end
@doc """
Implementation of Bolt's RUN. It passes a statement for execution on the server.
Note that this message doesn't return the statement result. For this purpose, use PULL_ALL.
In bolt >= 3, run has an additional paramters; metadata
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV1.run(:gen_tcp, port, 1, "RETURN {num} AS num", %{num: 5}, %{}, [])
{:ok, {:success, %{"fields" => ["num"]}}}
"""
@spec run(atom(), port(), integer(), String.t(), map(), Bolt.Sips.Metadata.t(), Keyword.t()) ::
{:ok, any()} | {:error, Bolt.Sips.Internals.Error.t()}
def run(transport, port, bolt_version, statement, params, metadata, options) do
BoltProtocolHelper.send_message(
transport,
port,
bolt_version,
{:run, [statement, params, metadata]}
)
case BoltProtocolHelper.receive_data(transport, port, bolt_version, options) do
{:success, _} = result ->
{:ok, result}
{:failure, response} ->
{:error, Error.exception(response, port, :run)}
%Error{} = error ->
{:error, error}
other ->
{:error, Error.exception(other, port, :run)}
end
end
@doc """
Runs a statement (most likely Cypher statement) and returns a list of the
records and a summary (Act as as a RUN + PULL_ALL).
Records are represented using PackStream's record data type. Their Elixir
representation is a Keyword with the indexes `:sig` and `:fields`.
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Examples
iex> Bolt.Sips.Internals.BoltProtocol.run_statement(:gen_tcp, port, 1, "MATCH (n) RETURN n")
[
{:success, %{"fields" => ["n"]}},
{:record, [sig: 1, fields: [1, "Example", "Labels", %{"some_attribute" => "some_value"}]]},
{:success, %{"type" => "r"}}
]
"""
@spec run_statement(
atom(),
port(),
integer(),
String.t(),
map(),
Bolt.Sips.Metadata.t(),
Keyword.t()
) ::
[
Bolt.Sips.Internals.PackStream.Message.decoded()
]
| Bolt.Sips.Internals.Error.t()
def run_statement(transport, port, bolt_version, statement, params, metadata, options) do
with {:ok, run_data} <-
run(transport, port, bolt_version, statement, params, metadata, options),
{:ok, result} <- BoltProtocol.pull_all(transport, port, bolt_version, options) do
[run_data | result]
else
{:error, %Error{} = error} ->
error
other ->
Error.exception(other, port, :run_statement)
end
end
@doc """
Implementation of Bolt's BEGIN. It opens a transaction.
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV3.begin(:gen_tcp, port, 3, [])
{:ok, metadata}
"""
@spec begin(atom(), port(), integer(), Bolt.Sips.Metadata.t() | map(), Keyword.t()) ::
{:ok, any()} | Bolt.Sips.Internals.Error.t()
def begin(transport, port, bolt_version, metadata, options) do
BoltProtocolHelper.send_message(transport, port, bolt_version, {:begin, [metadata]})
case BoltProtocolHelper.receive_data(transport, port, bolt_version, options) do
{:success, info} ->
{:ok, info}
{:failure, response} ->
{:error, Error.exception(response, port, :begin)}
other ->
{:error, Error.exception(other, port, :begin)}
end
end
@doc """
Implementation of Bolt's COMMIT. It commits the open transaction.
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV3.commit(:gen_tcp, port, 3, [])
:ok
"""
@spec commit(atom(), port(), integer(), Keyword.t()) ::
{:ok, any()} | Bolt.Sips.Internals.Error.t()
def commit(transport, port, bolt_version, options) do
BoltProtocolHelper.send_message(transport, port, bolt_version, {:commit, []})
case BoltProtocolHelper.receive_data(transport, port, bolt_version, options) do
{:success, info} ->
{:ok, info}
{:failure, response} ->
{:error, Error.exception(response, port, :commit)}
other ->
{:error, Error.exception(other, port, :commit)}
end
end
@doc """
Implementation of Bolt's ROLLBACK. It rollbacks the open transaction.
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV3.rollback(:gen_tcp, port, 3, [])
:ok
"""
@spec rollback(atom(), port(), integer(), Keyword.t()) :: :ok | Bolt.Sips.Internals.Error.t()
def rollback(transport, port, bolt_version, options) do
BoltProtocolHelper.treat_simple_message(:rollback, transport, port, bolt_version, options)
end
end
|
lib/bolt_sips/internals/bolt_protocol_v3.ex
| 0.816113 | 0.566019 |
bolt_protocol_v3.ex
|
starcoder
|
defmodule Ueberauth.Strategy.ADFS do
@moduledoc """
ADFS Strategy for Überauth.
In ADFS Server setup a new Client using Powershell:
```powershell
Add-AdfsClient -Name "OAUTH2 Client" -ClientId "unique-custom-client-id" -RedirectUri "http://localhost:4000/auth/adfs/callback"
Add-ADFSRelyingPartyTrust -Name "OAUTH2 Client" -Identifier "http://localhost:4000/auth/adfs"
Set-AdfsRelyingPartyTrust -IssuanceAuthorizationRulesFile "TransformRules.txt"
```
In TransformRules.txt put the following:
```
@RuleTemplate = "LdapClaims"
@RuleName = "User Details"
c:[Type == "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsaccountname", Issuer == "AD AUTHORITY"]
=> issue(store = "Active Directory", types = ("http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsaccountname", "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname", "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname", "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress", "groups", "userPrincipalName"), query = ";sAMAccountName,givenName,sn,mail,tokenGroups,userPrincipalName;{0}", param = c.Value);
```
Add 'adfs_url', 'adfs_metadata_url', 'client_id', 'resource_identifier' and optionally adfs_handler
to your configuration:
```elixir
config :ueberauth, Ueberauth.Strategy.ADFS,
adfs_url: "https://adfs.url",
adfs_metadata_url: "https://path.to/FederationMetadata.xml",
adfs_handler: MyApp.ADFSHandler, # Use custom handler to extract information from the token claims
client_id: "the_client",
resource_identifier: "the_resource_id"
```
An example custom ADFS handler
```elixir
defmodule MyApp.ADFSHandler do
use Ueberauth.Strategy.ADFS.Handler
def credentials(conn) do
token = conn.private.adfs_token
%Credentials{
expires: token.claims["exp"] != nil,
expires_at: token.claims["exp"],
scopes: token.claims["aud"],
token: token.token
}
end
@doc false
def info(conn) do
user = conn.private.adfs_user
%Info{
nickname: user["winaccountname"],
name: "\#{user["given_name"]} \#{user["family_name"]}",
email: user["email"],
first_name: user["given_name"],
last_name: user["family_name"]
}
end
@doc false
def extra(conn) do
user = conn.private.adfs_user
%Extra{
raw_info: %{
token: conn.private[:adfs_token],
user: user,
groups: user["groups"]
}
}
end
end
```
"""
import SweetXml
use Ueberauth.Strategy
alias Ueberauth.Strategy.ADFS.OAuth
def handle_request!(conn) do
if __MODULE__.configured?() do
redirect_to_authorization(conn)
else
redirect!(conn, "/")
end
end
def logout(conn, token) do
params = %{redirect_uri: callback_url(conn), token: token}
with {:ok, signout_url} <- OAuth.signout_url(params) do
redirect!(conn, signout_url)
else
_ ->
set_errors!(conn, [error("Logout Failed", "Failed to logout, please close your browser")])
end
end
def handle_callback!(%Plug.Conn{params: %{"code" => code}} = conn) do
with {:ok, client} <- OAuth.get_token(code, redirect_uri: callback_url(conn)) do
fetch_user(conn, client)
else
{:error, %{reason: reason}} ->
set_errors!(conn, [error("Authentication Error", reason)])
{:error, %OAuth2.Response{body: %{"error_description" => reason}}} ->
set_errors!(conn, [error("Authentication Error", reason)])
end
end
def handle_callback!(
%Plug.Conn{params: %{"error" => error, "error_description" => error_description}} = conn
) do
set_errors!(conn, [error(error, error_description)])
end
def handle_callback!(conn) do
set_errors!(conn, [error("missing_code", "No code received")])
end
def handle_cleanup!(conn) do
conn
|> put_private(:adfs_user, nil)
|> put_private(:adfs_token, nil)
|> put_private(:adfs_handler, nil)
end
def uid(conn) do
uid_field =
conn
|> option(:uid_field)
|> to_string
conn.private.adfs_user[uid_field]
end
def credentials(conn) do
apply(conn.private.adfs_handler, :credentials, [conn])
end
def info(conn) do
apply(conn.private.adfs_handler, :info, [conn])
end
def extra(conn) do
apply(conn.private.adfs_handler, :extra, [conn])
end
def configured? do
:ueberauth
|> Application.get_env(__MODULE__)
|> env_present?
end
defp fetch_user(conn, %{token: %{access_token: access_token}}) do
url = config(:adfs_metadata_url)
adfs_handler = config(:adfs_handler) || Ueberauth.Strategy.ADFS.DefaultHandler
conn = put_private(conn, :adfs_handler, adfs_handler)
with {:ok, %HTTPoison.Response{body: metadata}} <-
HTTPoison.get(url, [], ssl: [versions: [:"tlsv1.2"]]),
true <- String.starts_with?(metadata, "<EntityDescriptor"),
{:ok, certificate} <- cert_from_metadata(metadata) do
key =
certificate
|> JOSE.JWK.from_pem()
|> Joken.rs256()
jwt =
access_token
|> Joken.token()
|> Joken.with_signer(key)
|> Joken.verify()
conn = put_private(conn, :adfs_token, jwt)
with %Joken.Token{claims: claims_user} <- jwt do
put_private(conn, :adfs_user, claims_user)
else
_ -> set_errors!(conn, [error("token", "unauthorized")])
end
else
{:error, %HTTPoison.Error{}} -> set_errors!(conn, [error("metadata_url", "not_found")])
{:error, :cert_not_found} -> set_errors!(conn, [error("certificate", "not_found")])
false -> set_errors!(conn, [error("metadata", "malformed")])
end
end
defp cert_from_metadata(metadata) when is_binary(metadata) do
metadata
|> xpath(~x"//EntityDescriptor/ds:Signature/KeyInfo/X509Data/X509Certificate/text()"s)
|> build_cert()
end
defp build_cert(cert_content)
when is_binary(cert_content) and byte_size(cert_content) > 0 do
{:ok,
"""
-----BEGIN CERTIFICATE-----
#{cert_content}
-----END CERTIFICATE-----
"""}
end
defp build_cert(_), do: {:error, :cert_not_found}
defp option(conn, key) do
Keyword.get(options(conn), key, Keyword.get(default_options(), key))
end
defp config(option) do
:ueberauth
|> Application.get_env(__MODULE__)
|> Keyword.get(option)
end
defp redirect_to_authorization(conn) do
authorize_url =
conn.params
|> Map.put(:resource, config(:resource_identifier))
|> Map.put(:redirect_uri, callback_url(conn))
|> OAuth.authorize_url!()
redirect!(conn, authorize_url)
end
defp env_present?(env) do
if Keyword.has_key?(env, :adfs_url)
&& Keyword.has_key?(env, :adfs_metadata_url)
&& Keyword.has_key?(env, :client_id)
&& Keyword.has_key?(env, :resource_identifier) do
env
|> Keyword.take([:adfs_url, :adfs_metadata_url, :client_id, :resource_identifier])
|> Keyword.values()
|> Enum.all?(&(byte_size(&1 || <<>>) > 0))
else
false
end
end
end
|
lib/ueberauth/adfs.ex
| 0.732496 | 0.596316 |
adfs.ex
|
starcoder
|
defmodule ExBin do
@moduledoc """
Documentation for ExBin.
"""
@doc """
Creates a stream for fetching bytes within a binary. Ideal for large
volumes of bytes, or large binaries.
## Examples
iex> ExBin.byte_stream(<<0x01, 0x02, 0x03>>) |> Enum.take(2)
[1, 2]
"""
def byte_stream(binary) when is_binary(binary) do
Stream.resource(
fn -> binary end,
fn bin ->
case bin do
"" ->
{:halt, bin}
bin ->
<<_>> <> acc = bin
{[byte_at(bin, 0)], acc}
end
end,
fn bin -> bin end
)
end
def byte_at(<<>>, index) when index >= 0 do
{:error, :index_out_of_bounds}
end
def byte_at(binary, 0) when is_binary(binary) do
<<byte>> <> _ = binary
byte
end
@doc """
Retrives the byte at index `index` (zero-based) from `binary`.
If the binary is empty, or already exhausted, byte_at will
return `{:error, :index_out_of_bounds}`.
## Examples
iex> ExBin.byte_at(<<0x0b, 0xc4, 0x3f>>, 1)
196
iex> ExBin.byte_at(<<0x0b, 0xc4, 0x3f>>, 7)
{:error, :index_out_of_bounds}
"""
def byte_at(binary, index) when is_binary(binary) and index > 0 do
try do
ignored = index * 8
<<_::size(ignored), target::size(8), _::binary>> = binary
target
rescue
_ in MatchError ->
{:error, :index_out_of_bounds}
end
end
@doc """
Converts the given `bitstr` into a list of bits (integers).
## Examples:
iex> ExBin.bits(<<0xa5, 0x93>>)
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1]
"""
def bits(bitstr) when is_bitstring(bitstr) do
extract(bitstr, [])
end
defp extract(<<b::size(1), bits::bitstring>>, acc) when is_bitstring(bits) do
extract(bits, [b | acc])
end
defp extract(<<>>, acc), do: acc |> Enum.reverse()
@doc """
Creates a stream for fetching bits from a bitstring in a lazily evaluated manner.
## Examples
iex> ExBin.bit_stream(<<0xa5,0x93>>) |> Enum.take(16)
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1]
"""
def bit_stream(bitstr) when is_bitstring(bitstr) do
Stream.resource(
fn -> bitstr end,
fn bin ->
case bin do
"" ->
{:halt, bin}
bin ->
<<b::size(1), bits::bitstring>> = bin
{[b], bits}
end
end,
fn bin -> bin end
)
end
def bit_at(<<>>, index) when index >= 0 do
{:error, :index_out_of_bounds}
end
def bit_at(bitstr, 0) when is_bitstring(bitstr) do
<<head::size(1), _::bitstring>> = bitstr
head
end
@doc """
Returns the bit at a specific index. If the index exceeds the length
of the bitstring, `{:error, :index_out_of_bounds}` is returned.
## Examples
iex> ExBin.bit_at(<<0b1011 :: size(4)>>, 2)
1
iex> ExBin.bit_at(<<0b1011 :: size(4)>>, 10)
{:error, :index_out_of_bounds}
"""
def bit_at(bitstr, index) when is_bitstring(bitstr) and index > 0 do
try do
<<_::size(index), result::size(1), _::bitstring>> = bitstr
result
rescue
_ in MatchError ->
{:error, :index_out_of_bounds}
end
end
@doc """
Returns a "slice" of bits from a given bitstring,
from `range.first` (inclusive) to `range.last` (exclusive).
If the range exceeds the length of the bitstring,
the bits from `range.first` to the end of the bitstring
will be returned.
## Examples
iex> ExBin.bit_slice(<<0b10101::5>>, 0..3)
<<5::size(3)>>
iex> ExBin.bit_slice(<<0b10101::5>>, 2..10)
<<5::size(3)>>
"""
def bit_slice(bitstr, range) when is_bitstring(bitstr) do
to = range.last
from = range.first
dist = to - from
try do
<<_::size(from), res::size(dist), _::bitstring>> = bitstr
<<res::size(dist)>>
rescue
_ in MatchError ->
<<_::size(from), res::bitstring>> = bitstr
res
end
end
@doc """
Calculates the parity bit value of a given bitstring.
Defaults to a even parity bit type which results in 0
if the number of 1's in the bitstring are even.
For odd parity bit type, set `is_even` to `false`;
this will result in the inverse behavior.
## Examples
iex> ExBin.parity(<<0b1001::4>>, true)
0
iex> ExBin.parity(<<0b1001::4>>, false)
1
"""
@spec parity(bitstring, boolean) :: 0 | 1
def parity(bitstr, is_even \\ true) do
parity_bit = bit_stream(bitstr)
|> Enum.count(&(&1 == 1))
|> rem(2)
case {is_even, parity_bit} do
{true, bit} -> bit
{false, bit} -> (bit * -1) + 1
end
end
@doc """
Splits a bitstring into chunks of `chunk_size` bits each.
## Examples
iex> ExBin.chunks(<<0b1011000101111::size(13)>>, 3) |> Enum.to_list
[[1, 0, 1], [1, 0, 0], [0, 1, 0], [1, 1, 1], [1]]
iex> ExBin.chunks(<<0b10010111::size(8)>>, 4) |> Enum.to_list
[[1, 0, 0, 1], [0, 1, 1, 1]]
"""
@spec chunks(bitstring, pos_integer) :: Enumerable.t()
def chunks(bitstr, chunk_size) when is_bitstring(bitstr) and is_number(chunk_size) and chunk_size > 0 do
bit_stream(bitstr)
|> Stream.chunk_every(chunk_size)
end
end
|
lib/ex_bin.ex
| 0.812793 | 0.564939 |
ex_bin.ex
|
starcoder
|
defmodule Remedy.Consumer do
@moduledoc """
Consumer process for gateway event handling.
## Consuming Dispatch Events
To handle events, Remedy uses a GenStage implementation.
Remedy defines the `producer` and `producer_consumer` in the GenStage design.
To consume the events you must create at least one `consumer` process.
Remedy uses a ConsumerSupervisor to dispatch events, meaning your handlers
will each be ran in their own seperate task.
The full list of dispatch events and their inner payload is described in the type specs within this module.
- Regular payloads are delivered after casting to their schema and ratified against the cache.
- Irregular payloads are classified as those which do not directly map to a standard discord object. They undergo extensive manipulation prior to updating the cache. They are described under the `DISPATCH` section of the documentation.
## Example
It is recommended that you supervise your consumers. First we set up a supervisor module for our consumers.
```elixir
#example_supervisor.ex
defmodule MyApp.ExampleSupervisor do
use Supervisor
def start_link(args) do
Supervisor.start_link(__MODULE__, args, name: __MODULE__)
end
@impl true
def init(_init_arg) do
children = [ExampleConsumer]
Supervisor.init(children, strategy: :one_for_one)
end
end
```
You can then set up your consumer module.
```elixir
#example_consumer.ex
defmodule ExampleConsumer do
use Remedy.Consumer
alias Remedy.Api
def start_link do
Consumer.start_link(__MODULE__)
end
def handle_event({:MESSAGE_CREATE, %Message{content: content}, _ws_state}) do
case content do
"!sleep" ->
Api.create_message(msg.channel_id, "Going to sleep...")
Process.sleep(3000)
"!ping" ->
Api.create_message(msg.channel_id, "pyongyang!")
"!raise" ->
raise "No problems here!"
end
end
def handle_event(_event) do
:noop
end
end
```
"""
use ConsumerSupervisor
alias Remedy.Gateway.{EventBuffer, WSState}
alias Remedy.Schema.{
Channel,
Guild,
Integration,
Interaction,
Member,
Message,
MessageDeleteBulk,
MessageReactionRemoveAll,
MessageReactionRemoveEmoji,
Ready,
ThreadListSync,
ThreadMember,
TypingStart,
User,
VoiceState,
WebhooksUpdate
}
@callback handle_event(event) :: any
@callback handle_event(any()) :: :noop
@type event ::
channel_create
| channel_delete
| channel_pins_update
| channel_update
| guild_available
| guild_ban_add
| guild_ban_remove
| guild_create
| guild_delete
| guild_emojis_update
| guild_integrations_update
| guild_member_add
| guild_member_remove
| guild_member_update
| guild_members_chunk
| guild_role_create
| guild_role_delete
| guild_role_update
| guild_unavailable
| guild_update
| integration_create
| integration_update
| integration_delete
| interaction_create
| message_create
| message_delete_bulk
| message_delete
| message_reaction_add
| message_reaction_remove_all
| message_reaction_remove_emoji
| message_reaction_remove
| message_update
| presence_update
| ready
| thread_create
| thread_delete
| thread_list_sync
| thread_member_update
| thread_members_update
| thread_update
| typing_start
| user_update
| webhooks_update
@typedoc """
Sent when a new channel is created.
[Read More](https://discord.com/developers/docs/topics/gateway#channel-create)
"""
@type channel_create ::
{:CHANNEL_CREATE, Channel.t(), WSState.t()}
@typedoc """
Sent when a channel is updated.
This is not sent when the field `:last_message_id` is altered. To keep track of the `:last_message_id` changes, you must listen for `t:message_create/0` events.
[Read More](https://discord.com/developers/docs/topics/gateway#channel-update)
"""
@type channel_update ::
{:CHANNEL_UPDATE, Channel.t(), WSState.t()}
@typedoc """
Sent when a channel relevant to the current user is deleted.
[Read More](https://discord.com/developers/docs/topics/gateway#channel-delete)
"""
@type channel_delete ::
{:CHANNEL_DELETE, Channel.t(), WSState.t()}
@typedoc """
Sent when a message is pinned or unpinned in a text channel.
This is not sent when a pinned message is deleted.
[Read More](https://discord.com/developers/docs/topics/gateway#channel-pins-update)
"""
@type channel_pins_update ::
{:CHANNEL_UPDATE, Channel.t(), WSState.t()}
## Is this real?
@typep guild_available ::
{:GUILD_AVAILABLE, Guild.t(), WSState.t()}
@typedoc """
Sent when a user is banned from a guild.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-ban-add)
"""
@type guild_ban_add ::
{:GUILD_BAN_ADD, Ban.t(), WSState.t()}
@typedoc """
Sent when a user is unbanned from a guild.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-ban-remove)
"""
@type guild_ban_remove ::
{:GUILD_BAN_REMOVE, Ban.t(), WSState.t()}
@typedoc """
This event can be sent in three different scenarios:
1. When a user is initially connecting, to lazily load and backfill information for all unavailable guilds sent in the Ready event. Guilds that are unavailable due to an outage will send a Guild Delete event.
2. When a Guild becomes available again to the client.
3. When the current user joins a new Guild.
The inner payload is a guild object, with all the extra fields specified.
> Note: If your bot does not have the `:GUILD_PRESENCES` Gateway Intent, or if the guild has over 75k members, members and presences returned in this event will only contain your bot and users in voice channels.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-create)
"""
@type guild_create :: {:GUILD_CREATE, Guild.t(), WSState.t()}
@typedoc """
Sent when a guild becomes or was already unavailable due to:
1. An outage
2. The user leaves or is removed from a guild.
The inner payload is an unavailable guild object. If the unavailable field is not set, the user was removed from the guild.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-delete)
"""
@type guild_delete :: {:GUILD_DELETE, Guild.t(), WSState.t()}
@typedoc """
Sent when a guild's emojis have been updated.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-emojis-update)
"""
@type guild_emojis_update :: {:GUILD_EMOJIS_UPDATE, Guild.t(), WSState.t()}
@typedoc """
Sent when a guild integration is updated.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-integrations-update)
"""
@type guild_integrations_update ::
{:GUILD_INTEGRATIONS_UPDATE, Guild.t(), WSState.t()}
@typedoc """
Sent when a new user joins a guild.
## Intents
- `:GUILD_MEMBERS`
The inner payload is a guild member object with an extra guild_id.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-member-add)
"""
@type guild_member_add ::
{:GUILD_MEMBER_ADD, Member.t(), WSState.t()}
@typedoc """
Sent when a used is removed from a guild.
## Intents
- `:GUILD_MEMBERS`
[Read More](https://discord.com/developers/docs/topics/gateway#guild-member-remove)
"""
@type guild_member_remove ::
{:GUILD_MEMBER_REMOVE, Member.t(), WSState.t()}
@typedoc """
Sent when a guild member is updated.
## Intents
- `:GUILD_MEMBERS`
This will also fire when the user object of a guild member changes.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-member-update)
"""
@type guild_member_update ::
{:GUILD_MEMBER_UPDATE, Member.t(), Member.t(), WSState.t()}
@typedoc """
Sent in response to Guild Request Members.
> Note: While this event *can* be consumed if you so desire, it is kind of pointles, and used internally for the cache.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-members-chunk)
"""
@type guild_members_chunk ::
{:GUILD_MEMBERS_CHUNK, GuildMembersChunk.t(), WSState.t()}
@typedoc """
Sent when a guild role is created.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-role-create)
"""
@type guild_role_create ::
{:GUILD_ROLE_CREATE, Role.t(), WSState.t()}
@typedoc """
Sent when a guild role is deleted.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-role-delete)
"""
@type guild_role_delete ::
{:GUILD_ROLE_DELETE, Role.t(), WSState.t()}
@typedoc """
Sent when a guild role is updated.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-role-update)
"""
@type guild_role_update ::
{:GUILD_ROLE_UPDATE, Role.t(), WSState.t()}
## does this exist?
@typep guild_unavailable ::
{:GUILD_UNAVAILABLE, Guild.t(), WSState.t()}
@typedoc """
Sent when a guild is updated.
The inner payload is a guild object.
[Read More](https://discord.com/developers/docs/topics/gateway#guild-update)
"""
@type guild_update ::
{:GUILD_UPDATE, Guild.t(), WSState.t()}
@typedoc """
Sent when an integration is created.
[Read More](https://discord.com/developers/docs/topics/gateway#integration-create)
"""
@type integration_create ::
{:INTEGRATION_CREATE, Integration.t(), WSState.t()}
@typedoc """
Sent when an integration is updated.
[Read More](https://discord.com/developers/docs/topics/gateway#integration-update)
"""
@type integration_update ::
{:INTEGRATION_UPDATE, Integration.t(), WSState.t()}
@typedoc """
Sent when an integration is deleted.
[Read More](https://discord.com/developers/docs/topics/gateway#integration-delete)
"""
@type integration_delete ::
{:INTEGRATION_DELETE, Integration.t(), WSState.t()}
@typedoc """
Sent when a user triggers an `Application Command`
Inner payload is an Interaction.
[Read More](https://discord.com/developers/docs/topics/gateway#interaction-create)
"""
@type interaction_create ::
{:INTERACTION_CREATE, Interaction.t(), WSState.t()}
@typedoc """
Sent when a message is created.
The inner payload is a message object.
[Read More](https://discord.com/developers/docs/topics/gateway#message-create)
"""
@type message_create ::
{:MESSAGE_CREATE, Message.t(), WSState.t()}
@typedoc """
Sent when multiple messages are deleted at once.
[Read More](https://discord.com/developers/docs/topics/gateway#message-delete-bulk)
"""
@type message_delete_bulk ::
{:MESSAGE_DELETE_BULK, MessageDeleteBulk.t(), WSState.t()}
@typedoc """
Sent when a messgae is deleted.
[Read More](https://discord.com/developers/docs/topics/gateway#message-delete)
"""
@type message_delete ::
{:MESSAGE_DELETE, Message.t(), WSState.t()}
@typedoc """
Sent when a user adds a reaction to a message.
[Read More](https://discord.com/developers/docs/topics/gateway#message-reaction-add)
"""
@type message_reaction_add ::
{:MESSAGE_REACTION_ADD, Reaction.t(), WSState.t()}
@typedoc """
Sent when a user removes a reaction from a message.
[Read More](https://discord.com/developers/docs/topics/gateway#message-reaction-remove)
"""
@type message_reaction_remove_all ::
{:MESSAGE_REACTION_REMOVE_ALL, MessageReactionRemoveAll.t(), WSState.t()}
@typedoc """
Sent when a bot removes all instances of a given emoji from the reactions of a message.
[Read More](https://discord.com/developers/docs/topics/gateway#message-reaction-remove-emoji)
"""
@type message_reaction_remove_emoji ::
{:MESSAGE_REACTION_REMOVE_EMOJI, MessageReactionRemoveEmoji.t(), WSState.t()}
@typedoc """
Sent when a user removes a reaction from a message.
[Read More](https://discord.com/developers/docs/topics/gateway#message-reaction-remove)
"""
@type message_reaction_remove ::
{:MESSAGE_REACTION_REMOVE, MessageReactionRemove.t(), WSState.t()}
@typedoc """
Sent when a message is updated.
> Note: Unlike creates, message updates may contain only a subset of the full message object payload (but will always contain an id and channel_id).
[Read More](https://discord.com/developers/docs/topics/gateway#message-update)
"""
@type message_update ::
{:MESSAGE_UPDATE, Message.t(), WSState.t()}
@typedoc """
This event is sent when a user's presence or info, such as name or avatar, is updated.
## Intents
- `:GUILD_PRESENCES`
> Note: The user object within this event can be partial, the only field which must be sent is the id field, everything else is optional. Along with this limitation, no fields are required, and the types of the fields are not validated. Your client should expect any combination of fields and types within this event.
"""
@type presence_update :: {:PRESENCE_UPDATE, User.t(), WSState.t()}
@type ready :: {:READY, Ready.t(), WSState.t()}
@typedoc """
Sent when a thread is created or when the user is added to a thread.
When being added to an existing private thread, includes a thread member object.
[Read More](https://discord.com/developers/docs/topics/gateway#channel-delete)
"""
@type thread_create :: {:THREAD_CREATE, Thread.t(), WSState.t()}
@typedoc """
Sent when a thread relevant to the current user is deleted.
The inner payload is a subset of the channel object, containing just the id, guild_id, parent_id, and type fields.
[Read More](https://discord.com/developers/docs/topics/gateway#thread-delete)
"""
@type thread_delete ::
{:THREAD_DELETE, Thread.t(), WSState.t()}
@typedoc """
Sent when the current user gains access to a channel.
[Read More](https://discord.com/developers/docs/topics/gateway#thread-list-sync-thread-list-sync-event-fields)
"""
@type thread_list_sync ::
{:THREAD_LIST_SYNC, ThreadListSync.t(), WSState.t()}
@typedoc """
Sent when the thread member object for the current user is updated.
The inner payload is a thread member object. This event is documented for completeness, but unlikely to be used by most bots. For bots, this event largely is just a signal that you are a member of the thread. See the threads docs for more details.
"""
@type thread_member_update ::
{:THREAD_MEMBER_UPDATE, ThreadMember.t(), WSState.t()}
@typedoc """
Sent when anyone is added to or removed from a thread.
If the current user does not have the `GUILD_MEMBERS` Gateway Intent, then this event will only be sent if the current user was added to or removed from the thread.
"""
@type thread_members_update ::
{:THREAD_MEMBERS_UPDATE, ThreadMember.t(), WSState.t()}
@typedoc """
Sent when a thread is updated.
The inner payload is a channel object. This is not sent when the field `:last_message_id` is altered. To keep track of the `:last_message_id` changes, you must listen for `t:message_create/0` events.
"""
@type thread_update ::
{:THREAD_UPDATE, Thread.t(), WSState.t()}
@typedoc """
Sent when a user begins typing in a channel.
"""
@type typing_start ::
{:TYPING_START, TypingStart.t(), WSState.t()}
@typedoc """
Sent when a user is updated.
"""
@type user_update ::
{:USER_UPDATE, User.t(), WSState.t()}
@typedoc """
Sent when a user's voice state is updated.
"""
@type voice_state_update ::
{:VOICE_STATE_UPDATE, VoiceState.t(), WSState.t()}
@typedoc """
Sent when a webhook is updated.
"""
@type webhooks_update ::
{:WEBHOOKS_UPDATE, WebhooksUpdate.t(), WSState.t()}
@type voice_server_update :: {:VOICE_SERVER_UPDATE, VoiceServerUpdate.t(), WSState.t()}
@type voice_ready :: {:VOICE_READY, VoiceReady.t(), VoiceWSState.t()}
@type voice_speaking_update :: {:VOICE_SPEAKING_UPDATE, SpeakingUpdate.t(), VoiceWSState.t()}
defmacro __using__(opts) do
quote location: :keep do
@behaviour Remedy.Consumer
alias Remedy.Consumer
def start_link(event) do
Task.start_link(fn -> __MODULE__.handle_event(event) end)
end
def child_spec(_arg) do
spec = %{id: __MODULE__, start: {__MODULE__, :start_link, []}}
Supervisor.child_spec(spec, unquote(Macro.escape(opts)))
end
def handle_event(_event) do
:ok
end
defoverridable handle_event: 1, child_spec: 1
end
end
@spec start_link(any, keyword) :: :ignore | {:error, any} | {:ok, pid}
@doc false
def start_link(mod, opts \\ []) do
{mod_and_opts, cs_opts} =
case Keyword.pop(opts, :name) do
{nil, mod_opts} -> {[mod, mod_opts], []}
{cs_name, mod_opts} -> {[mod, mod_opts], [name: cs_name]}
end
ConsumerSupervisor.start_link(__MODULE__, mod_and_opts, cs_opts)
end
@doc false
def init([mod, opts]) do
default = [strategy: :one_for_one, subscribe_to: [EventBuffer]]
opts = Keyword.merge(default, opts)
child_spec = [%{id: mod, start: {mod, :start_link, []}, restart: :transient}]
ConsumerSupervisor.init(child_spec, opts)
end
end
|
lib/remedy/consumer.ex
| 0.8575 | 0.710013 |
consumer.ex
|
starcoder
|
defmodule Enphex do
@moduledoc """
Enphex is an Elixir wrapper for [Enphase API](https://developer.enphase.com/docs)
All the API calls defined here follow the [API documentation](https://developer.enphase.com/docs)
Please refer to the documentation.
You can pass a map of URL parameters as the `params` value.
Currently, `Enphex` requires you to specify `user_id` in your configuration as the authentication flow
is not implemented on `Enphex` yet. However, to use as API, this implementation is more than enough.
"""
@type system_id :: pos_integer | binary
@type params :: map | keyword
use Enphex.Api
alias Enphex.Parser
@doc """
Returns a time series of energy produced on the system over its lifetime.
"""
@spec energy_lifetime(system_id, params) :: Parser.response()
def energy_lifetime(system_id, params \\ %{}) do
do_get("systems/#{system_id}/energy_lifetime", params)
end
@doc """
Returns a listing of all active Envoys currently deployed on the system.
"""
@spec envoys(system_id, params) :: Parser.response()
def envoys(system_id, params \\ %{}) do
do_get("systems/#{system_id}/envoys", params)
end
@doc """
Returns a list of systems for which the user can make API requests.
Pagination and limit must be handled by user right now through the params.
"""
@spec systems(params) :: Parser.response()
def systems(params \\ %{}) do
do_get("systems", params)
end
@doc """
Returns a listing of active devices on the given system.
A device is considered active if it has not been retired in Enlighten.
“Active” does not imply that the device is currently reporting, producing, or measuring energy.
"""
@spec inventory(system_id, params) :: Parser.response()
def inventory(system_id, params \\ %{}) do
do_get("systems/#{system_id}/inventory", params)
end
@doc """
Returns the energy production of the system for the month starting on the given date.
The start date must be at least one month ago.
"""
@spec monthly_production(system_id, params) :: Parser.response()
def monthly_production(system_id, params \\ %{}) do
do_get("systems/#{system_id}/monthly_production", params)
end
@doc """
Returns performance statistics as measured by the revenue-grade meters installed on the specified system.
"""
@spec rgm_stats(system_id, params) :: Parser.response()
def rgm_stats(system_id, params \\ %{}) do
do_get("systems/#{system_id}/rgm_stats", params)
end
@doc """
Returns performance statistics for the specified system as reported by microinverters installed on the system.
"""
@spec stats(system_id, params) :: Parser.response()
def stats(system_id, params \\ %{}) do
do_get("systems/#{system_id}/stats", params)
end
@doc """
Returns summary information for the specified system.
"""
@spec summary(system_id, params) :: Parser.response()
def summary(system_id, params \\ %{}) do
do_get("systems/#{system_id}/summary", params)
end
end
|
lib/enphex.ex
| 0.828835 | 0.515803 |
enphex.ex
|
starcoder
|
defmodule Options do
@moduledoc false
@doc """
List that represents an available option names for new pool creation.
"""
@spec names :: [
:fifo
| :max_active
| :max_idle
| :max_idle_time
| :max_wait
| :min_idle
| :test_on_borrow
| :test_on_return
| :when_exhausted_action
| :max_wait
| :max_idle_time
]
defmacro names do
[:max_active, :max_idle, :min_idle, :test_on_borrow, :test_on_return, :fifo, :when_exhausted_action, :max_wait, :max_idle_time]
end
end
defmodule ResourcePool do
@moduledoc """
Facade for resource pool.
"""
require Options
@doc """
Creates and runs new generic server for ResourcePool with registered name `pool_name`. The new resource pool will use
`factory_module` as a resource factory and `resource_metadata` as a metadata to create a new resource.
"""
@spec new((atom | pid), module(), list()) :: :ignore | {:error, any} | {:ok, pid}
def new(pool_name, factory_module, resource_metadata) do
new(pool_name, factory_module, resource_metadata, [])
end
@doc """
Creates and runs new generic server for ResourcePool with registered name `pool_name`. The new resource pool will use
`factory_module` as a resource factory and `resource_metadata` as a metadata to create a new resource.
`options` defines behaviour of the pool.
The available options are:
* `max_active: integer()` - defines the maximum number of resource instances that can be allocated by the pool at a given time.
If non-positive, there is no limit to the number of instances that can be managed by the pool at one time.
When `max_active` is reached, the pool is said to be exhausted.
The default setting for this parameter is 8.
* `max_idle: integer()` defines the maximum number of objects that can sit idle in the pool at any time.
If negative, there is no limit to the number of objects that may be idle at one time.
The default setting for this parameter equals `max_active`.
* `min_idle: integer()` defines the minimum number of "sleeping" instances in the pool. Default value is 0.
* `test_on_borrow: boolean()` If true the pool will attempt to validate each resource before it is returned from the borrow function
(Using the provided resource factory's validate function).
Instances that fail to validate will be dropped from the pool, and a different object will
be borrowed. The default setting for this parameter is `false.`
* `test_on_return: boolean()` If true the pool will attempt to validate each resource instance before it is returned to the pool in the
return function (Using the provided resource factory's validate function). Objects that fail to validate
will be dropped from the pool. The default setting for this option is `false.`
* `fifo: boolean()` The pool can act as a LIFO queue with respect to idle resource instances
always returning the most recently used resource from the pool,
or as a FIFO queue, where borrow always returns the oldest instance from the idle resource list.
`fifo` determines whether or not the pool returns idle objects in
first-in-first-out order. The default setting for this parameter is `false.`
* `when_exhausted_action: (:fail | :block | :grow)` specifies the behaviour of the `borrow` function when the pool is exhausted:
* `:fail` will return an error.
* `:block` will block until a new or idle object is available. If a positive `max_wait`
value is supplied, then `borrow` will block for at most that many milliseconds,
after which an error will be returned. If `max_wait` is non-positive,
the `borrow` function will block infinitely.
* `:grow` will create a new object and return it (essentially making `max_active` meaningless.)
The default `when_exhausted_action:` setting is `:block` and
the default `max_wait:` setting is `:infinity`. By default, therefore, `borrow` will
block infinitely until an idle instance becomes available.
* `max_wait: (integer() | infinity)` The maximum amount of time to wait when the `borrow` function
is invoked, the pool is exhausted (the maximum number
of "active" resource instances has been reached) and `when_exhausted_action:` equals `:block`.
* `max_idle_time: (integer() | infinity)` The maximum amount of time an resource instance may sit idle in the pool,
with the extra condition that at least `min_idle` amount of object remain in the pool.
When infinity, no instances will be evicted from the pool due to maximum idle time limit.
"""
@spec new((atom | pid), module(), list(), list()) :: :ignore | {:error, any} | {:ok, pid}
def new(pool_name, factory_module, resource_metadata, options) do
failedOptions =
for {key, _} <- options, not Enum.member?(Options.names, key) do
key
end
case failedOptions do
[] ->
case factory?(factory_module) do
true ->
GenServer.start_link(ResourcePool.GenServer,
{options, factory_module, resource_metadata}, [timeout: 300000, name: pool_name])
{:error, _} = er -> er
end
t ->
{:error, "Wrong options: " <> Enum.join(Enum.map(t, fn(key) -> Atom.to_string(key) end), ", ")}
end
end
# Check `factory_module` if it implements resource_factory behaviour?
defp factory?(factory_module) do
moduleInfo = try do
factory_module.__info__(:attributes)
rescue _ -> []
end
case moduleInfo do
[] -> {:error, :factory_does_not_exist}
_ ->
behaviours = Keyword.get(moduleInfo, :behaviour, [])
case Enum.member?(behaviours, ResourceFactory) do
true -> true
false -> {:error, :not_factory}
end
end
end
@doc """
Borrows resource from pool. Returns resource `pid` for client use.
"""
@spec borrow(atom | pid) :: pid | {:error, any}
def borrow(pool_name) do
case GenServer.call(pool_name, :borrow, 300000) do
{:ok, resource} -> resource
{:error, _} = r -> r
{:wait, max_wait} ->
recv =
receive do
{:ok, pid} -> pid
after max_wait -> {:error, :pool_timeout}
end
GenServer.call(pool_name, {:ack_borrow, recv}, 300000)
# flush message that probably came to the mailbox after timeout
receive do
{:ok, _} -> :ok
after 0 -> :ok
end
recv
end
end
@doc """
The function sends `resource` to the pool's `idle` container after client does not need it any more.
"""
@spec return(atom | pid, pid) :: :ok
def return(pool_name, resource) do
GenServer.cast(pool_name, {:return, resource, self()})
end
@doc """
Adds one more resource to pool (as an idle resource).
"""
@spec add(atom | pid) :: :ok
def add(pool_name) do
GenServer.cast(pool_name, :add)
end
@doc """
Invalidates resource - makes it ready to dispose.
"""
@spec invalidate(atom | pid, pid) :: :ok
def invalidate(pool_name, resource) do
GenServer.call(pool_name, {:invalidate, resource}, 300000)
end
@doc """
Returns number of active (busy) resources in pool.
"""
@spec get_num_active(atom | pid) :: integer
def get_num_active(pool_name) do
GenServer.call(pool_name, :get_num_active, 300000)
end
@doc """
Returns number of idle (ready to use) resources in pool.
"""
@spec get_num_idle(atom | pid) :: integer
def get_num_idle(pool_name) do
GenServer.call(pool_name, :get_num_idle, 300000)
end
@doc """
Returns total number of resources in pool as a tuple {active, idle}.
"""
@spec get_number(atom | pid) :: {integer, integer}
def get_number(pool_name) do
GenServer.call(pool_name, :get_number, 300000)
end
@doc """
Disposes all resources from the pool.
"""
@spec clear(atom | pid) :: :ok
def clear(pool_name) do
GenServer.cast(pool_name, :clear)
end
@doc """
Disposes all resources from the pool and close the pool (shut down generic server).
"""
@spec close(atom | pid) :: :ok
def close(pool_name) do
GenServer.cast(pool_name, :close)
end
end
|
lib/resource_pool.ex
| 0.886224 | 0.558929 |
resource_pool.ex
|
starcoder
|
defimpl String.Chars, for: Protocol.Dns do
@doc """
Prints a DNS packet to a human readable string
"""
@spec to_string(Protocol.Dns.t) :: String.t
def to_string(dns) do
"""
DNS:
#{dns.header}
Length: #{byte_size(dns.data)}
Parsed:
Questions:
#{dns.parsed |> elem(0) |> Enum.map(&String.Chars.to_string/1) |> Enum.join("\n ")}
Answers:
#{dns.parsed |> elem(1) |> Enum.map(&String.Chars.to_string/1) |> Enum.join("\n ")}
Authorities:
#{dns.parsed |> elem(2) |> Enum.map(&String.Chars.to_string/1) |> Enum.join("\n ")}
Additionals:
#{dns.parsed |> elem(3) |> Enum.map(&String.Chars.to_string/1) |> Enum.join("\n ")}
Raw: #{ExPcap.Binaries.to_raw(dns.data)}
""" |> String.trim
end
end
defimpl String.Chars, for: Protocol.Dns.Header do
@doc """
Prints a DNS packet header to a human readable string
"""
@spec to_string(Protocol.Dns.t) :: String.t
def to_string(dns) do
"""
id: #{ExPcap.Binaries.to_string(dns.id)} #{ExPcap.Binaries.to_hex(dns.id)}
qr: #{ExPcap.Binaries.to_string(dns.qr)} #{Protocol.Dns.Header.qr_name(dns.qr)}
opcode: #{ExPcap.Binaries.to_string(dns.opcode)} #{Protocol.Dns.Header.opcode_name(dns.opcode)}
aa: #{ExPcap.Binaries.to_string(dns.aa)} #{Protocol.Dns.Header.aa_name(dns.aa)}
tc: #{ExPcap.Binaries.to_string(dns.tc)} #{Protocol.Dns.Header.tc_name(dns.tc)}
rd: #{ExPcap.Binaries.to_string(dns.rd)} #{Protocol.Dns.Header.rd_name(dns.rd)}
ra: #{ExPcap.Binaries.to_string(dns.ra)} #{Protocol.Dns.Header.ra_name(dns.ra)}
z: #{ExPcap.Binaries.to_string(dns.z)} #{Protocol.Dns.Header.z_name(dns.z)}
rcode: #{ExPcap.Binaries.to_string(dns.rcode)} #{Protocol.Dns.Header.rcode_name(dns.rcode)}
qdcnt: #{ExPcap.Binaries.to_string(dns.qdcnt)}
ancnt: #{ExPcap.Binaries.to_string(dns.ancnt)}
nscnt: #{ExPcap.Binaries.to_string(dns.nscnt)}
arcnt: #{ExPcap.Binaries.to_string(dns.arcnt)}
""" |> String.trim
end
end
defimpl PayloadType, for: Protocol.Dns do
@doc """
Returns the parser that will parse the body of the DNS packet
"""
@spec payload_parser(Protocol.Dns.t) :: PayloadType.t
def payload_parser(_dns) do
nil
# case dns.header.qr do
# <<0 :: size(1)>> -> Protocol.Dns.Question
# <<1 :: size(1)>> -> Protocol.Dns.ResourceRecord
# end
end
end
defimpl PayloadParser, for: Protocol.Dns do
@doc """
Parses the body of the DNS packet
"""
@spec from_data(binary) :: any
def from_data(data) do
data |> Protocol.Dns.from_data
end
end
defmodule Protocol.Dns.Header do
@moduledoc """
A parsed DNS packet header
"""
defstruct id: <<>>,
qr: <<>>,
opcode: <<>>,
aa: <<>>,
tc: <<>>,
rd: <<>>,
ra: <<>>,
z: <<>>,
rcode: <<>>,
qdcnt: <<>>,
ancnt: <<>>,
nscnt: <<>>,
arcnt: <<>>
@type t :: %Protocol.Dns.Header{
id: binary,
qr: bitstring,
opcode: bitstring,
aa: bitstring,
tc: bitstring,
rd: bitstring,
ra: bitstring,
z: bitstring,
rcode: bitstring,
qdcnt: non_neg_integer,
ancnt: non_neg_integer,
nscnt: non_neg_integer,
arcnt: non_neg_integer
}
@doc """
Is this a query or a response?
"""
@spec qr_name(binary) :: :QUERY | :ANSWER
def qr_name(qr) do
case qr do
<<0 :: size(1)>> -> :QUERY
<<1 :: size(1)>> -> :ANSWER
end
end
@doc """
Is this response authoritative?
"""
@spec aa_name(binary) :: :NOT_AUTHORITATIVE | :AUTHORITATIVE | :""
def aa_name(aa) do
case aa do
<<0 :: size(1)>> -> :NOT_AUTHORITATIVE
<<1 :: size(1)>> -> :AUTHORITATIVE
_ -> :""
end
end
@doc """
Is this response truncated?
"""
@spec tc_name(binary) :: :NOT_TRUNCATED | :TRUNCATED | :""
def tc_name(tc) do
case tc do
<<0 :: size(1)>> -> :NOT_TRUNCATED
<<1 :: size(1)>> -> :TRUNCATED
_ -> :""
end
end
@doc """
Is recursion desired?
"""
@spec rd_name(binary) :: :NO_RECURSION_DESIRED | :RECURSION_DESIRED | :""
def rd_name(rd) do
case rd do
<<0 :: size(1)>> -> :NO_RECURSION_DESIRED
<<1 :: size(1)>> -> :RECURSION_DESIRED
_ -> :""
end
end
@doc """
Is recursion available?
"""
@spec ra_name(binary) :: :NO_RECURSION_AVAILABLE | :RECURSION_AVAILABLE | :""
def ra_name(ra) do
case ra do
<<0 :: size(1)>> -> :NO_RECURSION_AVAILABLE
<<1 :: size(1)>> -> :RECURSION_AVAILABLE
_ -> :""
end
end
@doc """
The first bit is reserved.
The second bit indciates if the response was authenticated or not.
The third bit indciates if the data was authenticated or not.
"""
@spec z_name(binary) :: :atom
def z_name(z) do
case z do
<<0b000 :: size(3)>> -> :"RESERVED - NOT AUTHENTICATED - NON AUTHENTICATED DATA"
<<0b001 :: size(3)>> -> :"RESERVED - NOT AUTHENTICATED - AUTHENTICATED DATA"
<<0b010 :: size(3)>> -> :"RESERVED - AUTHENTICATED - NON AUTHENTICATED DATA"
<<0b011 :: size(3)>> -> :"RESERVED - AUTHENTICATED - AUTHENTICATED DATA"
<<0b100 :: size(3)>> -> :"RESERVED - NOT AUTHENTICATED - NON AUTHENTICATED DATA"
<<0b101 :: size(3)>> -> :"RESERVED - NOT AUTHENTICATED - AUTHENTICATED DATA"
<<0b110 :: size(3)>> -> :"RESERVED - AUTHENTICATED - NON AUTHENTICATED DATA"
<<0b111 :: size(3)>> -> :"RESERVED - AUTHENTICATED - AUTHENTICATED DATA"
end
end
@doc """
What is the op code of this DNS packet?
"""
@spec opcode_name(non_neg_integer) :: :QUERY | :STATUS | :NOTIFY | :UPDATE | :""
def opcode_name(opcode) do
case opcode do
0 -> :QUERY
2 -> :STATUS
4 -> :NOTIFY
5 -> :UPDATE
_ -> :""
end
end
@doc """
What is the r code of this DNS packet?
"""
@spec rcode_name(non_neg_integer) :: :atom
def rcode_name(rcode) do
case rcode do
0 -> :NOERROR
1 -> :FORMERR
2 -> :SERVFAIL
3 -> :NXDOMAIN
4 -> :NOTIMPL
5 -> :REFUSED
6 -> :YXDOMAIN
7 -> :YXRRSET
8 -> :NXRRSET
9 -> :NOTAUTH
10 -> :NOTZONE
16 -> :BADVERS_OR_BADSIG
17 -> :BADKEY
18 -> :BADTIME
19 -> :BADMODE
20 -> :BADNAME
21 -> :BADALG
22 -> :BADTRUNC
_ -> :""
end
end
end
defmodule Protocol.Dns do
@moduledoc """
A parsed DNS packet
"""
@bytes_in_header 12
defstruct header: %Protocol.Dns.Header{},
parsed: {
[%Protocol.Dns.Question{}], # questions
[%Protocol.Dns.ResourceRecord{}], # answers
[%Protocol.Dns.ResourceRecord{}], # authorities
[%Protocol.Dns.ResourceRecord{}], # additionals
<<>> # leftover bytes
},
data: <<>>
@type t :: %Protocol.Dns{
header: Protocol.Dns.Header.t,
parsed: {
[Protocol.Dns.Question.t], # questions
[Protocol.Dns.ResourceRecord.t], # answers
[Protocol.Dns.ResourceRecord.t], # authorities
[Protocol.Dns.ResourceRecord.t], # additionals
binary
},
data: binary
}
@doc """
Parses a DNS header
"""
@spec header(binary) :: Protocol.Dns.Header.t
def header(data) do
<<
id :: bytes-size(2),
qr :: bits-size(1),
opcode :: bits-size(4),
aa :: bits-size(1),
tc :: bits-size(1),
rd :: bits-size(1),
ra :: bits-size(1),
z :: bits-size(3),
rcode :: bits-size(4),
qdcnt :: unsigned-integer-size(16),
ancnt :: unsigned-integer-size(16),
nscnt :: unsigned-integer-size(16),
arcnt :: unsigned-integer-size(16),
_payload :: binary
>> = data
%Protocol.Dns.Header{
id: id,
qr: qr,
opcode: ExPcap.Binaries.to_uint4(opcode),
aa: aa,
tc: tc,
rd: rd,
ra: ra,
z: z,
rcode: ExPcap.Binaries.to_uint4(rcode),
qdcnt: qdcnt,
ancnt: ancnt,
nscnt: nscnt,
arcnt: arcnt
}
end
@doc """
Returns a parsed DNS packet
"""
@spec from_data(binary) :: Protocol.Dns.t
def from_data(data) do
<< _header :: bytes-size(@bytes_in_header), payload :: binary >> = data
header = header(data)
dns = Protocol.Dns.ResourceRecord.read_dns(header, data, payload)
%Protocol.Dns{
header: header,
parsed: dns,
data: payload
}
end
end
|
lib/protocol/dns.ex
| 0.613468 | 0.44559 |
dns.ex
|
starcoder
|
defmodule Linear do
@doc """
Apply a linear rule to a signed formula.
Returns a list of conclusions.
## Examples
iex> Linear.apply_linear({:t, {:not, :p}})
[{:f, :p}]
iex> Linear.apply_linear({:f, {:not, :p}})
[{:t, :p}]
iex> Linear.apply_linear({:t, {:p, :and, :q}})
[{:t, :p}, {:t, :q}]
"""
def apply_linear(signed_formula)
def apply_linear({:t, {:not, formula}}) do
[{:f, formula}]
end
def apply_linear({:f, {:not, formula}}) do
[{:t, formula}]
end
def apply_linear({:t, {left, :and, right}}) do
[{:t, left}, {:t, right}]
end
def apply_linear({:f, {left, :or, right}}) do
[{:f, left}, {:f, right}]
end
def apply_linear({:f, {left, :implies, right}}) do
[{:t, left}, {:f, right}]
end
def apply_linear(_) do
[]
end
defp apply_all_linear_recursively_aux([], result) do
result
end
defp apply_all_linear_recursively_aux(formulas, result) do
apply_all_linear_recursively_aux(
apply_all_linear_once(formulas),
result ++ formulas
)
end
@doc """
Apply all linear rules recursively to a list of signed formulas.
Returns a list containing the signed formulas and the conclusions.
## Examples
iex> Linear.apply_all_linear_recursively([{:t, {:not, :p}}])
[t: {:not, :p}, f: :p]
iex> Linear.apply_all_linear_recursively([{:t, {:not, {:not, :a}}}, {:t, {:c, :and, {:d, :and, :g}}}, {:f, {:u, :and, :u}}])
[
t: {:not, {:not, :a}},
t: {:c, :and, {:d, :and, :g}},
f: {:u, :and, :u},
f: {:not, :a},
t: :c,
t: {:d, :and, :g},
t: :a,
t: :d,
t: :g
]
"""
def apply_all_linear_recursively(signed_formulas) do
apply_all_linear_recursively_aux(signed_formulas, [])
end
@doc """
Apply all linear rules to a list of signed formulas.
Returns a list containing the conclusions.
"""
def apply_all_linear_once(formulas) do
formulas |> Enum.map(&apply_linear/1) |> List.flatten() |> Enum.uniq()
end
def apply_linear_rules(proof = %Proof{}) do
%{proof | formulas: Enum.uniq(apply_all_linear_recursively(proof.formulas))}
end
end
|
lib/linear.ex
| 0.874573 | 0.793506 |
linear.ex
|
starcoder
|
defmodule MailerLite.Stats do
@moduledoc """
Account statitistics.
"""
@typedoc """
MailerLite stats object
"""
@type stats :: %{subscribed: non_neg_integer,
unsubscribed: non_neg_integer,
campaigns: non_neg_integer,
sent_emails: non_neg_integer,
open_rate: float,
click_rate: float,
bounce_rate: float}
@endpoint "https://api.mailerlite.com/api/v2/stats"
@doc ~S"""
Gets basic stats for the account, subscriber count, click rates etc.
[](https://developers.mailerlite.com/reference#stats)
## Example requests
MailerLite.Stats.get
## Example response
{:ok, %{bounce_rate: 0.05,
campaigns: 4,
click_rate: 0.05,
open_rate: 0.1,
sent_emails: 2,
subscribed: 10187,
unsubscribed: 1}}
## Tests
iex> {:ok, response} = MailerLite.Stats.get
iex> is_map(response)
true
"""
@spec get() :: {:ok, stats} | {:error, atom}
def get do
do_get(:now)
end
@doc ~S"""
The same as `MaileLite.Stats.get` but accepts an `integer` UNIX timestamp for retrieving stats for a specific time in the past.
[](https://developers.mailerlite.com/reference#stats)
## Example requests
MailerLite.Stats.get(1491855902)
## Example response
{:ok, %{bounce_rate: 0.05,
campaigns: 4,
click_rate: 0.05,
open_rate: 0.1,
sent_emails: 2,
subscribed: 10187,
unsubscribed: 1}}
## Tests
iex> {:ok, response} = MailerLite.Stats.get(1491855902)
iex> is_map(response)
true
iex> MailerLite.Stats.get("time")
{:error, :invalid_argument}
"""
@spec get(MailerLite.unix_timestamp) :: {:ok, stats} | {:error, atom}
def get(unix_timestamp) when is_integer(unix_timestamp) do
do_get(unix_timestamp)
end
def get(_unix_timestamp), do: {:error, :invalid_argument}
defp do_get(unix_timestamp) do
url = case unix_timestamp do
:now -> @endpoint
_ -> @endpoint <> "?timestamp=" <> Integer.to_string(unix_timestamp)
end
MailerLite.get(url)
end
end
|
lib/stats.ex
| 0.832475 | 0.421969 |
stats.ex
|
starcoder
|
defmodule OT.Server do
@moduledoc """
A safe API for interacting with operations and the data they operate against.
"""
use GenServer
@typedoc """
A map containing OT-related information.
This map must contain at least three keys:
- `type`: A string representing the OT type, which will be used to find the
appropriate OT module.
- `version`: A non-negative integer representing the current `t:version/0` of
the datum.
- `content`: The contents of the datum that `t:operation/0`s will be applied
to.
"""
@type datum :: %{required(:type) => String.t,
required(:version) => non_neg_integer,
required(:content) => any,
any => any}
@typedoc """
A piece of information that can uniquely identify a `t:datum/0`.
"""
@type datum_id :: any
@typedoc """
A list of units of work performed against a single piece of data (a
`t:datum/0`).
"""
@type operation :: [any]
@typedoc """
A non-negative integer representing an operation or `t:datum/0` version.
"""
@type version :: non_neg_integer
@typedoc """
A tuple representing an `t:operation/0` and its `t:version/0`.
"""
@type operation_info :: {operation, version}
@doc false
def start_link(_) do
GenServer.start_link(__MODULE__, [])
end
@doc """
Submit an operation.
## Example
iex> {:ok, pid} = OT.Server.start_link([])
iex> :ets.insert(:ot_data,
...> {"id", %{id: "id", content: "Hllo, ", type: "text", version: 0}})
iex> OT.Server.submit_operation(pid, "id", {[1, %{i: "e"}], 1})
iex> OT.Server.submit_operation(pid, "id", {[6, %{i: "world."}], 1})
{:ok, {[7, %{i: "world."}], 2}}
iex> OT.Server.get_datum(pid, "id")
{:ok, %{id: "id", content: "Hello, world.", type: "text", version: 2}}
If the operation succeeds, a tuple will be returned with the operation and
its version. Otherwise, an error will be returned.
"""
@spec submit_operation(pid, any, {OT.Operation.t, pos_integer}, any) ::
{:ok, {OT.Operation.t, pos_integer}} | {:error, any}
def submit_operation(pid, datum_id, {op, vsn}, meta \\ nil) do
GenServer.call(pid, {:submit_operation, datum_id, {op, vsn}, meta})
end
@doc """
Get a datum.
This will call the configured adapter's `c:OT.Server.Adapter.get_datum/1`
function and return that value.
## Example
iex> {:ok, pid} = OT.Server.start_link([])
iex> :ets.insert(:ot_data, {"id", %{id: "id"}})
iex> OT.Server.get_datum(pid, "id")
{:ok, %{id: "id"}}
If the datum is found, it will be returned. Otherwise, an error is returned.
Also, note that this function does get called in a worker, so shares worker
bandwidth with `submit_operation/3`.
"""
@spec get_datum(pid, any) :: {:ok, any} | {:error, any}
def get_datum(pid, id) do
GenServer.call(pid, {:get_datum, id})
end
@impl true
def handle_call(call_args, _from, state) do
[command | args] = Tuple.to_list(call_args)
result = apply(OT.Server.Impl, command, args)
{:reply, result, state}
end
end
|
lib/ot/server.ex
| 0.906867 | 0.658361 |
server.ex
|
starcoder
|
defmodule Cocktail.Validation do
@moduledoc false
alias Cocktail.Validation.{
Day,
HourOfDay,
Interval,
MinuteOfHour,
ScheduleLock,
SecondOfMinute,
TimeOfDay,
TimeRange
}
@type validation_key ::
:base_sec
| :base_min
| :base_hour
| :base_wday
| :day
| :hour_of_day
| :minute_of_hour
| :second_of_minute
| :time_of_day
| :time_range
| :interval
@type validations_map :: %{validation_key => t}
@type t ::
ScheduleLock.t()
| Interval.t()
| Day.t()
| HourOfDay.t()
| MinuteOfHour.t()
| SecondOfMinute.t()
| TimeOfDay.t()
| TimeRange.t()
@spec build_validations(Cocktail.rule_options()) :: validations_map
def build_validations(options) do
{frequency, options} = Keyword.pop(options, :frequency)
{interval, options} = Keyword.pop(options, :interval, 1)
frequency
|> build_basic_interval_validations(interval)
|> apply_options(options)
end
@spec build_basic_interval_validations(Cocktail.frequency(), pos_integer) :: validations_map
defp build_basic_interval_validations(:weekly, interval) do
%{
base_sec: ScheduleLock.new(:second),
base_min: ScheduleLock.new(:minute),
base_hour: ScheduleLock.new(:hour),
base_wday: ScheduleLock.new(:wday),
interval: Interval.new(:weekly, interval)
}
end
defp build_basic_interval_validations(:daily, interval) do
%{
base_sec: ScheduleLock.new(:second),
base_min: ScheduleLock.new(:minute),
base_hour: ScheduleLock.new(:hour),
interval: Interval.new(:daily, interval)
}
end
defp build_basic_interval_validations(:hourly, interval) do
%{
base_sec: ScheduleLock.new(:second),
base_min: ScheduleLock.new(:minute),
interval: Interval.new(:hourly, interval)
}
end
defp build_basic_interval_validations(:minutely, interval) do
%{
base_sec: ScheduleLock.new(:second),
interval: Interval.new(:minutely, interval)
}
end
defp build_basic_interval_validations(:secondly, interval) do
%{
interval: Interval.new(:secondly, interval)
}
end
@spec apply_options(validations_map, Cocktail.rule_options()) :: validations_map
defp apply_options(map, []), do: map
defp apply_options(map, [{:days, days} | rest]) when length(days) > 0 do
map
|> Map.delete(:base_wday)
|> Map.put(:day, Day.new(days))
|> apply_options(rest)
end
defp apply_options(map, [{:hours, hours} | rest]) when length(hours) > 0 do
map
|> Map.delete(:base_hour)
|> Map.put(:hour_of_day, HourOfDay.new(hours))
|> apply_options(rest)
end
defp apply_options(map, [{:minutes, minutes} | rest]) when length(minutes) > 0 do
map
|> Map.delete(:base_min)
|> Map.put(:minute_of_hour, MinuteOfHour.new(minutes))
|> apply_options(rest)
end
defp apply_options(map, [{:seconds, seconds} | rest]) when length(seconds) > 0 do
map
|> Map.delete(:base_sec)
|> Map.put(:second_of_minute, SecondOfMinute.new(seconds))
|> apply_options(rest)
end
defp apply_options(map, [{:times, times} | rest]) when length(times) > 0 do
map
|> Map.delete(:base_sec)
|> Map.delete(:base_min)
|> Map.delete(:base_hour)
|> Map.put(:time_of_day, TimeOfDay.new(times))
|> apply_options(rest)
end
defp apply_options(map, [{:time_range, time_range} | rest]) do
map
|> Map.delete(:base_sec)
|> Map.delete(:base_min)
|> Map.delete(:base_hour)
|> Map.put(:time_range, TimeRange.new(time_range))
|> apply_options(rest)
end
# unhandled option, just discard and continue
defp apply_options(map, [{_, _} | rest]), do: map |> apply_options(rest)
end
|
lib/cocktail/validation.ex
| 0.812904 | 0.485295 |
validation.ex
|
starcoder
|
defmodule SvgBuilder.Element do
@moduledoc """
Create and manipulate basic SVG elements.
Elements are represented the same as XmlBuilder elements as a
tuple with three values, the tag name, atttributes and child elements.
## Example
iex> Element.element(:text, %{}, "A text element")
{:text, %{}, "A text element"}
"""
@type element_t() ::
:a
| :altGlyph
| :altGlyphDef
| :altGlyphItem
| :animate
| :animateColor
| :animateMotion
| :animateTransform
| :circle
| :clipPath
| :"color-profile"
| :cursor
| :defs
| :desc
| :ellipse
| :feBlend
| :feColorMatrix
| :feComponentTransfer
| :feComposite
| :feConvolveMatrix
| :feDiffuseLighting
| :feDisplacementMap
| :feDistantLight
| :feFlood
| :feFuncA
| :feFuncB
| :feFuncG
| :feFuncR
| :feGaussianBlur
| :feImage
| :feMerge
| :feMergeNode
| :feMorphology
| :feOffset
| :fePointLight
| :feSpecularLighting
| :feSpotLight
| :feTile
| :feTurbulence
| :filter
| :font
| :"font-face"
| :"font-face-format"
| :"font-face-name"
| :"font-face-src"
| :"font-face-uri"
| :foreignObject
| :g
| :glyph
| :glyphRef
| :hkern
| :image
| :line
| :linearGradient
| :marker
| :mask
| :metadata
| :"missing-glyph"
| :mpath
| :path
| :pattern
| :polygon
| :polyline
| :radialGradient
| :rect
| :script
| :set
| :stop
| :style
| :svg
| :switch
| :symbol
| :text
| :textPath
| :title
| :tref
| :tspan
| :use
| :view
| :vkern
@type t() :: {element_t, map, [__MODULE__.t() | binary]}
@doc """
Create an element.
Returns: element
## Example
iex> Element.element(:text, %{}, ["A text element"])
{:text, %{}, [{nil, nil, "A text element"}]}
"""
@spec element(atom, map, [t | binary]) :: t
def element(name, attributes, children) do
XmlBuilder.element(name, attributes, children)
end
@doc """
Creates a group of elements,
Returns: a group element
## Example
iex> Element.group([Shape.rect(1,1,1,1), Shape.rect(2,2,1,1)])
{:g, %{},
[
{:rect, %{height: "1", width: "1", x: "1", y: "1"}, []},
{:rect, %{height: "1", width: "1", x: "2", y: "2"}, []}
]}
"""
@spec group([t]) :: t
def group(elements \\ []) do
element(:g, %{}, elements)
end
@doc """
Adds an attribute to an element.
If the value is nil, then no attribute will be added.
## Example
iex> Shape.rect(1,1,2,2) |> Element.add_attribute(:id, "test")
{:rect, %{height: "2", id: "test", width: "2", x: "1", y: "1"}, []}
"""
@spec add_attribute(t, atom, any) :: t
def add_attribute(element, _, nil) do
element
end
def add_attribute({type, attrs, children}, name, value) do
{type, Map.put(attrs, name, value), children}
end
@doc """
Add multiple attributes to an element.
The attributes must be a map.
"""
@spec add_attributes(t, map) :: t
def add_attributes({type, attrs, children}, attributes) do
{type, Map.merge(attrs, attributes), children}
end
@doc """
Remove an attribute from an element.
## Example
iex> {:rect, %{foo: 123, bar: 1234}, []} |> Element.remove_attribute(:foo)
{:rect, %{bar: 1234}, []}
"""
@spec remove_attribute(t, atom) :: t
def remove_attribute({type, attrs, children}, name) do
{type, Map.delete(attrs, name), children}
end
@doc """
Sets the `id` attribute on an element.
"""
@spec id(t, binary) :: t
def id(element, id) do
add_attribute(element, :id, id)
end
@doc """
Sets the `xml:base` attribute on an element.
"""
@spec xml_base(t, binary) :: t
def xml_base(element, xml_base) do
add_attribute(element, :"xml:base", xml_base)
end
@doc """
Sets the `class` attribute on an element.
The class can refer to any CSS class used by the SVG.
"""
@spec class(t, binary) :: t
def class(element, class) do
add_attribute(element, :class, class)
end
@doc """
Sets the `style` attribute on an element.
The style value is a CSS type style string.
"""
@spec style(t, binary) :: t
def style(element, style) do
add_attribute(element, :style, style)
end
@doc """
Add a child to the element.
Children are prepended to the list of children.
"""
@spec add_child(t, t) :: t
def add_child({type, attrs, children}, child_element) do
{type, attrs, [child_element | children]}
end
end
|
lib/element.ex
| 0.888569 | 0.710892 |
element.ex
|
starcoder
|
defmodule SNTP do
@moduledoc """
SNTP v4 client [RFC4330](https://tools.ietf.org/html/rfc4330) for Elixir
"""
alias SNTP.{Retriever, RetrieverError, Socket, Timestamp}
@doc """
Returns the system time in milliseconds.
If the `SNTP.Retriever` is running then it will return the adjusted system time.
## Examples
iex> SNTP.now()
System.system_time(1000)
"""
@spec now() :: pos_integer()
def now() do
case :ets.info(:sntp) do
:undefined -> System.system_time(1000)
_ ->
[lastest: %{t: offset, is_valid?: is_valid?}] = :ets.lookup(:sntp, :lastest)
case is_valid? do
false -> System.system_time(1000)
true -> System.system_time(1000) + Kernel.round(offset)
end
end
end
@doc """
Returns the latest retrieved offset from the `SNTP.Retriever`
## Examples
iex> SNTP.offset()
{:ok, 12}
iex> SNTP.offset()
{:error, {SNTP.RetrieverError, "SNTP Retriever is not started"}}
"""
@spec offset() :: {:ok, number()} | {:error, {Exception.t(), binary()}}
def offset() do
case :ets.info(:sntp) do
:undefined -> {:error, {RetrieverError, "SNTP Retriever is not started"}}
[] ->
[lastest: %{t: offset}] = :ets.lookup(:sntp, :lastest)
{:ok, offset}
end
end
@doc """
Starts the `SNTP.Retriever`
* `options` are an `Enumerable.t()` with these keys:
* `auto_start` is a `boolean()` defaults to `true`
* `retreive_every` is a `non_neg_integer()` defaults to `86400000` every 24 hour
* `host` is `binary() | charlist()` defualts to `'pool.ntp.org'`
* `port` is an `non_neg_integer()` between `0..65535` defualts to `123`
* `timeout` is an `non_neg_integer()` defualts to `:infinity`
* `resolve_reference` is a `boolean()` defualts to `false`
## Examples
iex> SNTP.start()
{:ok, #PID<0.000.0>}
"""
@spec start(Enumerable.t()) :: {:ok, pid()}
defdelegate start(config \\ []), to: Retriever, as: :start_link
@doc """
Stops the `SNTP.Retriever`
## Examples
iex> SNTP.stop()
:ok
"""
@spec stop(pid()) :: :ok
defdelegate stop(pid \\ GenServer.whereis(Retriever)), to: Retriever
@doc """
Sends a new NTP request on an `SNTP.Socket` and gracefully closes the socket.
Returns `{:ok, %SNTP.Timestamp{}}` or `{:error, reason}`
* `options` an `Enumerable.t()` with these keys:
* `host` is `binary() | charlist()` defualts to `'pool.ntp.org'`
* `port` is an `non_neg_integer()` between `0..65535` defualts to `123`
* `timeout` is an `non_neg_integer()` defualts to `:infinity`
* `resolve_reference` is a `boolean()` defualts to `false`
## Examples
iex> {:ok, timestamp} = SNTP.time()
iex> timestamp.is_valid?
true
iex> SNTP.time(host: 'ntp.exnet.com', port: 123, timeout: 100))
{:error, [timeout: "Server Timeout after 100"]}
"""
@spec time(Enumerable.t()) :: {:ok, integer()} | {:error, term()}
def time(options \\ []) do
options
|> Socket.new()
|> Socket.send()
|> Socket.close()
|> parse()
end
defp parse(%Socket{errors: []} = socket) do
socket
|> Timestamp.parse()
|> parse()
end
defp parse(%Socket{errors: errors}) do
{:error, errors}
end
defp parse(timestamp) do
case timestamp.is_valid? do
false -> {:error, timestamp.errors}
true -> {:ok, timestamp}
end
end
end
|
lib/sntp.ex
| 0.912221 | 0.540621 |
sntp.ex
|
starcoder
|
defmodule Day14.Table do
@moduledoc """
Functions for creating and working with a table of chemical reactions.
A chemical reaction table is represented as a digraph where each vertex is a
material. The graph has edges from the output of a reaction to each input
required for the reaction, with labels for the quantity of each component.
This structure supports being able to determine the amount of ore needed to
create different amounts of fuel (or other materials).
"""
alias Day14.Reaction, as: Reaction
@typedoc """
A table of reactions.
"""
@opaque t :: :digraph.graph()
@doc """
Create a new reaction table from a string of lines describing each reaction.
"""
@spec from_string(String.t()) :: t
def from_string(str) do
String.split(str, "\n") |> Enum.map(&Reaction.from_string/1) |> new
end
@doc """
Create a new reaction table from a list of reactions.
"""
@spec new(list(Reaction.t())) :: t
def new(reactions) do
g = :digraph.new([:acyclic])
:digraph.add_vertex(g, "ORE")
Enum.each(reactions, fn {_, {n, name}} ->
:digraph.add_vertex(g, name, n)
end)
Enum.each(reactions, fn {ins, {_, name_out}} ->
Enum.each(ins, fn {n_in, name_in} ->
[:"$e" | _] = :digraph.add_edge(g, name_out, name_in, n_in)
end)
end)
g
end
defp sorted_materials(table) do
:digraph_utils.topsort(table)
end
@doc ~S|
Calculate the amount of ore required to produce a certain amount of a material.
## Examples
iex> table = Day14.Table.from_string(String.trim("""
...> 10 ORE => 10 A
...> 1 ORE => 1 B
...> 7 A, 1 B => 1 C
...> 7 A, 1 C => 1 D
...> 7 A, 1 D => 1 E
...> 7 A, 1 E => 1 FUEL
...> """))
iex> Day14.Table.required_ore(table, {1, "FUEL"})
31
|
@spec required_ore(t, Reaction.component()) :: number
def required_ore(table, component)
def required_ore(table, {amount, material}) do
required_ore(table, sorted_materials(table), %{material => amount})
end
defp required_ore(_, ["ORE" | _], %{"ORE" => n}), do: n
defp required_ore(table, [mat | rest], reqs) do
case Map.get(reqs, mat, 0) do
0 ->
required_ore(table, rest, reqs)
amount ->
ratio = ceil(amount / output_amount(table, mat))
required_ore(
table,
rest,
Enum.reduce(reagents(table, mat), Map.delete(reqs, mat), fn {in_mat, n_in}, reqs ->
new_amount = ratio * n_in
Map.update(reqs, in_mat, new_amount, &(&1 + new_amount))
end)
)
end
end
defp output_amount(table, mat) do
{_, amount} = :digraph.vertex(table, mat)
amount
end
defp reagents(table, mat) do
:digraph.out_edges(table, mat)
|> Enum.map(fn e ->
{_, _, input, amount} = :digraph.edge(table, e)
{input, amount}
end)
end
@doc """
Determines the maximum amount of fuel that can be made by starting with the
given amount of oil.
"""
@spec fuel_possible(t, number) :: number
def fuel_possible(table, ore) do
fuel_possible(table, ore, floor(ore / required_ore(table, {1, "FUEL"})))
end
defp fuel_possible(table, max_ore, lower_bound) do
ore = required_ore(table, {lower_bound, "FUEL"})
cond do
ore < max_ore -> fuel_possible(table, max_ore, lower_bound + 50000)
true -> fuel_possible(table, max_ore, lower_bound - 50000, lower_bound)
end
end
defp fuel_possible(table, max_ore, lower_bound, upper_bound) do
ore = required_ore(table, {lower_bound, "FUEL"})
cond do
ore > max_ore -> lower_bound - 1
true -> fuel_possible(table, max_ore, lower_bound + 1, upper_bound)
end
end
end
|
aoc2019_elixir/apps/day14/lib/table.ex
| 0.903736 | 0.839471 |
table.ex
|
starcoder
|
defmodule Mox do
@moduledoc """
Mox is a library for defining concurrent mocks in Elixir.
The library follows the principles outlined in
["Mocks and explicit contracts"](http://blog.plataformatec.com.br/2015/10/mocks-and-explicit-contracts/),
summarized below:
1. No ad-hoc mocks. You can only create mocks based on behaviours
2. No dynamic generation of modules during tests. Mocks are preferably defined
in your `test_helper.exs` or in a `setup_all` block and not per test
3. Concurrency support. Tests using the same mock can still use `async: true`
4. Rely on pattern matching and function clauses for asserting on the
input instead of complex expectation rules
## Example
As an example, imagine that your library defines a calculator behaviour:
defmodule MyApp.Calculator do
@callback add(integer(), integer()) :: integer()
@callback mult(integer(), integer()) :: integer()
end
If you want to mock the calculator behaviour during tests, the first step
is to define the mock, usually in your `test_helper.exs`:
Mox.defmock(MyApp.CalcMock, for: MyApp.Calculator)
Now in your tests, you can define expectations and verify them:
use ExUnit.Case, async: true
import Mox
# Make sure mocks are verified when the test exits
setup :verify_on_exit!
test "invokes add and mult" do
MyApp.CalcMock
|> expect(:add, fn x, y -> x + y end)
|> expect(:mult, fn x, y -> x * y end)
assert MyApp.CalcMock.add(2, 3) == 5
assert MyApp.CalcMock.mult(2, 3) == 6
end
In practice, you will have to pass the mock to the system under the test.
If the system under test relies on application configuration, you should
also set it before the tests starts to keep the async property. Usually
in your config files:
config :my_app, :calculator, MyApp.CalcMock
Or in your `test_helper.exs`:
Application.put_env(:my_app, :calculator, MyApp.CalcMock)
All expectations are defined based on the current process. This
means multiple tests using the same mock can still run concurrently
unless the Mox is set to global mode. See the "Multi-process collaboration"
section.
## Multiple behaviours
Mox supports defining mocks for multiple behaviours.
Suppose your library also defines a scientific calculator behaviour:
defmodule MyApp.ScientificCalculator do
@callback exponent(integer(), integer()) :: integer()
end
You can mock both the calculator and scientific calculator behaviour:
Mox.defmock(MyApp.SciCalcMock, for: [MyApp.Calculator, MyApp.ScientificCalculator])
## Compile-time requirements
If the mock needs to be available during the project compilation, for
instance because you get undefined function warnings, then instead of
defining the mock in your `test_helper.exs`, you should instead define
it under `test/support/mocks.ex`:
Mox.defmock(MyApp.CalcMock, for: MyApp.Calculator)
Then you need to make sure that files in `test/support` get compiled
with the rest of the project. Edit your `mix.exs` file to add the
`test/support` directory to compilation paths:
def project do
[
...
elixirc_paths: elixirc_paths(Mix.env),
...
]
end
defp elixirc_paths(:test), do: ["test/support", "lib"]
defp elixirc_paths(_), do: ["lib"]
## Multi-process collaboration
Mox supports multi-process collaboration via two mechanisms:
1. explicit allowances
2. global mode
The allowance mechanism can still run tests concurrently while
the global one doesn't. We explore both next.
### Explicit allowances
An allowance permits a child process to use the expectations and stubs
defined in the parent process while still being safe for async tests.
test "invokes add and mult from a task" do
MyApp.CalcMock
|> expect(:add, fn x, y -> x + y end)
|> expect(:mult, fn x, y -> x * y end)
parent_pid = self()
Task.async(fn ->
MyApp.CalcMock |> allow(parent_pid, self())
assert MyApp.CalcMock.add(2, 3) == 5
assert MyApp.CalcMock.mult(2, 3) == 6
end)
|> Task.await
end
Note: if you're running on Elixir 1.8.0 or greater and your concurrency comes
from a `Task` then you don't need to add explicit allowances. Instead
`$callers` is used to determine the process that actually defined the
expectations.
### Global mode
Mox supports global mode, where any process can consume mocks and stubs
defined in your tests. To manually switch to global mode use:
set_mox_global()
which can be done as a setup callback:
setup :set_mox_global
setup :verify_on_exit!
test "invokes add and mult from a task" do
MyApp.CalcMock
|> expect(:add, fn x, y -> x + y end)
|> expect(:mult, fn x, y -> x * y end)
Task.async(fn ->
assert MyApp.CalcMock.add(2, 3) == 5
assert MyApp.CalcMock.mult(2, 3) == 6
end)
|> Task.await
end
The global mode must always be explicitly set per test. By default
mocks run on `private` mode.
You can also automatically choose global or private mode depending on
if your tests run in async mode or not. In such case Mox will use
private mode when `async: true`, global mode otherwise:
setup :set_mox_from_context
### Blocking on expectations
If your mock is called in a different process than the test process,
in some cases there is a chance that the test will finish executing
before it has a chance to call the mock and meet the expectations.
Imagine this:
test "calling a mock from a different process" do
expect(MyApp.CalcMock, :add, fn x, y -> x + y end)
spawn(fn -> MyApp.CalcMock.add(4, 2) end)
verify!()
end
The test above has a race condition because there is a chance that the
`verify!/0` call will happen before the spawned process calls the mock.
In most cases, you don't control the spawning of the process so you can't
simply monitor the process to know when it dies in order to avoid this
race condition. In those cases, the way to go is to "sync up" with the
process that calls the mock by sending a message to the test process
from the expectation and using that to know when the expectation has been
called.
test "calling a mock from a different process" do
parent = self()
ref = make_ref()
expect(MyApp.CalcMock, :add, fn x, y ->
send(parent, {ref, :add})
x + y
end)
spawn(fn -> MyApp.CalcMock.add(4, 2) end)
assert_receive {^ref, :add}
verify!()
end
This way, we'll wait until the expectation is called before calling
`verify!/0`.
"""
defmodule UnexpectedCallError do
defexception [:message]
end
defmodule VerificationError do
defexception [:message]
end
@doc """
Sets the Mox to private mode, where mocks can be set and
consumed by the same process unless other processes are
explicitly allowed.
setup :set_mox_private
"""
def set_mox_private(_context \\ %{}), do: Mox.Server.set_mode(self(), :private)
@doc """
Sets the Mox to global mode, where mocks can be consumed
by any process.
setup :set_mox_global
An ExUnit case where tests use Mox in global mode cannot be
`async: true`.
"""
def set_mox_global(context \\ %{}) do
if Map.get(context, :async) do
raise "Mox cannot be set to global mode when the ExUnit case is async. " <>
"If you want to use Mox in global mode, remove \"async: true\" when using ExUnit.Case"
else
Mox.Server.set_mode(self(), :global)
end
end
@doc """
Chooses the Mox mode based on context. When `async: true` is used
the mode is `:private`, otherwise `:global` is chosen.
setup :set_mox_from_context
"""
def set_mox_from_context(%{async: true} = _context), do: set_mox_private()
def set_mox_from_context(_context), do: set_mox_global()
@doc """
Defines a mock with the given name `:for` the given behaviour(s).
Mox.defmock(MyMock, for: MyBehaviour)
With multiple behaviours:
Mox.defmock(MyMock, for: [MyBehaviour, MyOtherBehaviour])
## Skipping optional callbacks
By default, functions are created for all callbacks, including all optional
callbacks. But if for some reason you want to skip optional callbacks, you can
provide the list of callback names to skip (along with their arities) as
`:skip_optional_callbacks`:
Mox.defmock(MyMock, for: MyBehaviour, skip_optional_callbacks: [on_success: 2])
This will define a new mock (`MyMock`) that has a defined function for each
callback on `MyBehaviour` except for `on_success/2`. Note: you can only skip
optional callbacks, not required callbacks.
You can also pass `true` to skip all optional callbacks, or `false` to keep
the default of generating functions for all optional callbacks.
"""
def defmock(name, options) when is_atom(name) and is_list(options) do
behaviours =
case Keyword.fetch(options, :for) do
{:ok, mocks} -> List.wrap(mocks)
:error -> raise ArgumentError, ":for option is required on defmock"
end
skip_optional_callbacks = Keyword.get(options, :skip_optional_callbacks, [])
compile_header = generate_compile_time_dependency(behaviours)
callbacks_to_skip = validate_skip_optional_callbacks!(behaviours, skip_optional_callbacks)
mock_funs = generate_mock_funs(behaviours, callbacks_to_skip)
define_mock_module(name, behaviours, compile_header ++ mock_funs)
name
end
defp validate_behaviour!(behaviour) do
cond do
not Code.ensure_compiled?(behaviour) ->
raise ArgumentError,
"module #{inspect(behaviour)} is not available, please pass an existing module to :for"
not function_exported?(behaviour, :behaviour_info, 1) ->
raise ArgumentError,
"module #{inspect(behaviour)} is not a behaviour, please pass a behaviour to :for"
true ->
behaviour
end
end
defp generate_compile_time_dependency(behaviours) do
for behaviour <- behaviours do
validate_behaviour!(behaviour)
quote do
unquote(behaviour).module_info(:module)
end
end
end
defp generate_mock_funs(behaviours, callbacks_to_skip) do
for behaviour <- behaviours,
{fun, arity} <- behaviour.behaviour_info(:callbacks),
{fun, arity} not in callbacks_to_skip do
args = 0..arity |> Enum.to_list() |> tl() |> Enum.map(&Macro.var(:"arg#{&1}", Elixir))
quote do
def unquote(fun)(unquote_splicing(args)) do
Mox.__dispatch__(__MODULE__, unquote(fun), unquote(arity), unquote(args))
end
end
end
end
defp validate_skip_optional_callbacks!(behaviours, skip_optional_callbacks) do
all_optional_callbacks =
for behaviour <- behaviours,
{fun, arity} <- behaviour.behaviour_info(:optional_callbacks) do
{fun, arity}
end
case skip_optional_callbacks do
false ->
[]
true ->
all_optional_callbacks
skip_list when is_list(skip_list) ->
for callback <- skip_optional_callbacks, callback not in all_optional_callbacks do
raise ArgumentError,
"all entries in :skip_optional_callbacks must be an optional callback in one " <>
"of the behaviours specified in :for. #{inspect(callback)} was not in the " <>
"list of all optional callbacks: #{inspect(all_optional_callbacks)}"
end
skip_list
_ ->
raise ArgumentError, ":skip_optional_callbacks is required to be a list or boolean"
end
end
defp define_mock_module(name, behaviours, body) do
info =
quote do
def __mock_for__ do
unquote(behaviours)
end
end
Module.create(name, [info | body], Macro.Env.location(__ENV__))
end
@doc """
Expects the `name` in `mock` with arity given by `code`
to be invoked `n` times.
If you're calling your mock from an asynchronous process and want
to wait for the mock to be called, see the "Blocking on expectations"
section in the module documentation.
When `expect/4` is invoked, any previously declared `stub` for the same `name` and arity will
be removed. This ensures that `expect` will fail if the function is called more than `n` times.
If a `stub/3` is invoked **after** `expect/4` for the same `name` and arity, the stub will be
used after all expectations are fulfilled.
## Examples
To expect `MyMock.add/2` to be called once:
expect(MyMock, :add, fn x, y -> x + y end)
To expect `MyMock.add/2` to be called five times:
expect(MyMock, :add, 5, fn x, y -> x + y end)
To expect `MyMock.add/2` to not be called:
expect(MyMock, :add, 0, fn x, y -> :ok end)
`expect/4` can also be invoked multiple times for the same
name/arity, allowing you to give different behaviours on each
invocation:
MyMock
|> expect(:add, fn x, y -> x + y end)
|> expect(:add, fn x, y -> x * y end)
"""
def expect(mock, name, n \\ 1, code)
when is_atom(mock) and is_atom(name) and is_integer(n) and n >= 0 and is_function(code) do
calls = List.duplicate(code, n)
add_expectation!(mock, name, code, {n, calls, nil})
mock
end
@doc """
Allows the `name` in `mock` with arity given by `code` to
be invoked zero or many times.
Unlike expectations, stubs are never verified.
If expectations and stubs are defined for the same function
and arity, the stub is invoked only after all expectations are
fulfilled.
## Examples
To allow `MyMock.add/2` to be called any number of times:
stub(MyMock, :add, fn x, y -> x + y end)
`stub/3` will overwrite any previous calls to `stub/3`.
"""
def stub(mock, name, code)
when is_atom(mock) and is_atom(name) and is_function(code) do
add_expectation!(mock, name, code, {0, [], code})
mock
end
@doc """
Stubs all functions described by the shared behaviours in the `mock` and `module`.
## Examples
defmodule Calculator do
@callback add(integer(), integer()) :: integer()
@callback mult(integer(), integer()) :: integer()
end
defmodule TestCalculator do
@behaviour Calculator
def add(a, b), do: a + b
def mult(a, b), do: a * b
end
defmock(CalcMock, for: Calculator)
stub_with(CalcMock, TestCalculator)
This is the same as calling `stub/3` for each behaviour in `CalcMock`:
stub(CalcMock, :add, &TestCalculator.add/2)
stub(CalcMock, :mult, &TestCalculator.mult/2)
"""
def stub_with(mock, module) when is_atom(mock) and is_atom(module) do
mock_behaviours = mock.__mock_for__()
behaviours =
case module_behaviours(module) do
[] ->
raise ArgumentError, "#{inspect(module)} does not implement any behaviour"
behaviours ->
case Enum.filter(behaviours, &(&1 in mock_behaviours)) do
[] ->
raise ArgumentError,
"#{inspect(module)} and #{inspect(mock)} do not share any behaviour"
common ->
common
end
end
for behaviour <- behaviours,
{fun, arity} <- behaviour.behaviour_info(:callbacks) do
stub(mock, fun, :erlang.make_fun(module, fun, arity))
end
mock
end
defp module_behaviours(module) do
module.module_info(:attributes)
|> Keyword.get_values(:behaviour)
|> List.flatten()
end
defp add_expectation!(mock, name, code, value) do
validate_mock!(mock)
arity = :erlang.fun_info(code)[:arity]
key = {mock, name, arity}
unless function_exported?(mock, name, arity) do
raise ArgumentError, "unknown function #{name}/#{arity} for mock #{inspect(mock)}"
end
case Mox.Server.add_expectation(self(), key, value) do
:ok ->
:ok
{:error, {:currently_allowed, owner_pid}} ->
inspected = inspect(self())
raise ArgumentError, """
cannot add expectations/stubs to #{inspect(mock)} in the current process (#{inspected}) \
because the process has been allowed by #{inspect(owner_pid)}. \
You cannot define expectations/stubs in a process that has been allowed
"""
{:error, {:not_global_owner, global_pid}} ->
inspected = inspect(self())
raise ArgumentError, """
cannot add expectations/stubs to #{inspect(mock)} in the current process (#{inspected}) \
because Mox is in global mode and the global process is #{inspect(global_pid)}. \
Only the process that set Mox to global can set expectations/stubs in global mode
"""
end
end
@doc """
Allows other processes to share expectations and stubs
defined by owner process.
## Examples
To allow `child_pid` to call any stubs or expectations defined for `MyMock`:
allow(MyMock, self(), child_pid)
`allow/3` also accepts named process or via references:
allow(MyMock, self(), SomeChildProcess)
"""
def allow(mock, owner_pid, allowed_via) when is_atom(mock) and is_pid(owner_pid) do
allowed_pid = GenServer.whereis(allowed_via)
if allowed_pid == owner_pid do
raise ArgumentError, "owner_pid and allowed_pid must be different"
end
case Mox.Server.allow(mock, owner_pid, allowed_pid) do
:ok ->
mock
{:error, {:already_allowed, actual_pid}} ->
raise ArgumentError, """
cannot allow #{inspect(allowed_pid)} to use #{inspect(mock)} from #{inspect(owner_pid)} \
because it is already allowed by #{inspect(actual_pid)}.
If you are seeing this error message, it is because you are either \
setting up allowances from different processes or your tests have \
async: true and you found a race condition where two different tests \
are allowing the same process
"""
{:error, :expectations_defined} ->
raise ArgumentError, """
cannot allow #{inspect(allowed_pid)} to use #{inspect(mock)} from #{inspect(owner_pid)} \
because the process has already defined its own expectations/stubs
"""
{:error, :in_global_mode} ->
# Already allowed
mock
end
end
@doc """
Verifies the current process after it exits.
If you want to verify expectations for all tests, you can use
`verify_on_exit!/1` as a setup callback:
setup :verify_on_exit!
"""
def verify_on_exit!(_context \\ %{}) do
pid = self()
Mox.Server.verify_on_exit(pid)
ExUnit.Callbacks.on_exit(Mox, fn ->
verify_mock_or_all!(pid, :all)
Mox.Server.exit(pid)
end)
end
@doc """
Verifies that all expectations set by the current process
have been called.
"""
def verify! do
verify_mock_or_all!(self(), :all)
end
@doc """
Verifies that all expectations in `mock` have been called.
"""
def verify!(mock) do
validate_mock!(mock)
verify_mock_or_all!(self(), mock)
end
defp verify_mock_or_all!(pid, mock) do
pending = Mox.Server.verify(pid, mock)
messages =
for {{module, name, arity}, total, pending} <- pending do
mfa = Exception.format_mfa(module, name, arity)
called = total - pending
" * expected #{mfa} to be invoked #{times(total)} but it was invoked #{times(called)}"
end
if messages != [] do
raise VerificationError,
"error while verifying mocks for #{inspect(pid)}:\n\n" <> Enum.join(messages, "\n")
end
:ok
end
defp validate_mock!(mock) do
cond do
not Code.ensure_compiled?(mock) ->
raise ArgumentError, "module #{inspect(mock)} is not available"
not function_exported?(mock, :__mock_for__, 0) ->
raise ArgumentError, "module #{inspect(mock)} is not a mock"
true ->
:ok
end
end
@doc false
def __dispatch__(mock, name, arity, args) do
all_callers = [self() | caller_pids()]
case Mox.Server.fetch_fun_to_dispatch(all_callers, {mock, name, arity}) do
:no_expectation ->
mfa = Exception.format_mfa(mock, name, arity)
raise UnexpectedCallError,
"no expectation defined for #{mfa} in #{format_process()} with args #{inspect(args)}"
{:out_of_expectations, count} ->
mfa = Exception.format_mfa(mock, name, arity)
raise UnexpectedCallError,
"expected #{mfa} to be called #{times(count)} but it has been " <>
"called #{times(count + 1)} in process #{format_process()}"
{:ok, fun_to_call} ->
apply(fun_to_call, args)
end
end
defp times(1), do: "once"
defp times(n), do: "#{n} times"
defp format_process do
callers = caller_pids()
"process #{inspect(self())}" <>
if Enum.empty?(callers) do
""
else
" (or in its callers #{inspect(callers)})"
end
end
# Find the pid of the actual caller
defp caller_pids do
case Process.get(:"$callers") do
nil -> []
pids when is_list(pids) -> pids
end
end
end
|
lib/mox.ex
| 0.861786 | 0.679664 |
mox.ex
|
starcoder
|
defmodule RepoCache.Log do
@moduledoc """
Responsible for logging hit/miss rates for the different caches.
Maintains an ETS table of hit/miss rates, which looks like:
{{repo_module, function_name}, hit_count, miss_count}
Every 60 seconds, we reset the counts and log the rates, along with the
sizes of the tables.
"""
use GenServer
require Logger
@table __MODULE__
def start_link([]) do
GenServer.start_link(__MODULE__, [])
end
def log(hit_or_miss, mod, name) do
do_log({mod, name}, hit_or_miss)
rescue
ArgumentError ->
:error
end
defp do_log(key, :hit) do
default = {key, 1, 0}
:ets.update_counter(
@table,
key,
{2, 1},
default
)
:ok
end
defp do_log(key, :miss) do
default = {key, 0, 1}
:ets.update_counter(
@table,
key,
{3, 1},
default
)
:ok
end
def all do
:ets.tab2list(@table)
end
def init([]) do
table_opts = [:named_table, :public, :set, {:write_concurrency, true}]
@table =
try do
:ets.new(@table, table_opts)
rescue
ArgumentError ->
true = :ets.delete(@table)
:ets.new(@table, table_opts)
end
schedule_log()
{:ok, []}
end
def schedule_log do
Process.send_after(self(), :output_log, 60_000)
end
def handle_info(:output_log, state) do
_ =
all()
|> reset_table()
|> output_cache_hit_rates()
|> output_sizes()
schedule_log()
{:noreply, state, :hibernate}
end
def handle_info(msg, state) do
_ = Logger.error("module=#{__MODULE__} error=unexpected_message message=#{inspect(msg)}")
{:noreply, state}
end
def reset_table(values) do
for {key, hit, miss} <- values do
# remove the previous hit/miss count from each
_ = :ets.update_counter(@table, key, [{2, -hit}, {3, -miss}])
end
values
end
def output_cache_hit_rates(values) do
for value <- values do
_ =
Logger.info(fn ->
{{mod, name}, hit, miss} = value
total = hit + miss
hit_rate =
case total do
0 -> 0.0
_ -> Float.round(hit / total, 2)
end
"repocache_report mod=#{mod} fun=#{name} hit=#{hit} miss=#{miss} total=#{total} hit_rate=#{
hit_rate
}"
end)
end
values
end
def output_sizes(values) do
modules = values |> Enum.map(&elem(elem(&1, 0), 0)) |> Enum.uniq()
for mod <- modules do
{:ok, table} = ets_table(mod)
_ =
Logger.info(fn ->
size = :ets.info(table, :size)
memory = :ets.info(table, :memory)
"repocache_report table=#{mod} size=#{size} memory=#{memory}"
end)
end
values
end
defp ets_table(mod) do
{:ok, ConCache.ets(mod)}
rescue
_ -> :error
end
end
|
apps/repo_cache/lib/log.ex
| 0.66628 | 0.468365 |
log.ex
|
starcoder
|
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
defmodule CFXXL.Client do
@moduledoc """
Handle the client to be passed as parameter when using `CFXXL` functions.
"""
@api_prefix "api/v1/cfssl"
@accepted_opts [:timeout, :recv_timeout, :proxy, :proxy_auth, :ssl]
@doc """
The struct representing a Client, it contains the endpoint and options
to be passed when making a request.
"""
defstruct endpoint: "http://localhost:8888/#{@api_prefix}", options: []
@doc """
Returns a default client
## Examples
```
iex> CFXXL.Client.new()
%CFXXL.Client{endpoint: "http://localhost:8888/api/v1/cfssl", options: []}
```
"""
def new(), do: %__MODULE__{}
@doc """
Creates a client with the given parameters.
## Arguments
* `base_url`: the base URL to reach CFSSL, without the API prefix.
* `options`: a keyword list with options passed to HTTPoison when making
a request.
## Options
* `:timeout`
* `:recv_timeout`
* `:proxy`
* `:proxy_auth`
* `:ssl`
For the documentation of the options see `HTTPoison.request/5`
## Examples
```
iex> CFXXL.Client.new("https://ca.example.com:10000", recv_timeout: 15000)
%CFXXL.Client{endpoint: "https://ca.example.com:10000/api/v1/cfssl", options: [recv_timeout: 15000]}
```
"""
def new(base_url, options \\ []) do
endpoint = if String.ends_with?(base_url, "/") do
"#{base_url}#{@api_prefix}"
else
"#{base_url}/#{@api_prefix}"
end
filtered_opts =
options
|> Enum.filter(fn {k, _} -> k in @accepted_opts end)
%__MODULE__{endpoint: endpoint, options: filtered_opts}
end
end
|
lib/cfxxl/client.ex
| 0.747339 | 0.559771 |
client.ex
|
starcoder
|
defmodule CImg do
@moduledoc """
CImg image processing extention.
"""
alias __MODULE__
# image object
defstruct handle: nil, shape: {}
@doc """
load the image file and create new image object.
"""
def create(fname) do
with {:ok, h, [shape]} <- CImgNIF.cimg_load(fname),
do: %CImg{handle: h, shape: shape}
end
def create(x, y, z, c, val) when is_integer(val) do
with {:ok, h, [shape]} <- CImgNIF.cimg_create(x, y, z, c, val),
do: %CImg{handle: h, shape: shape}
end
@doc "save image object to the file"
defdelegate save(cimg, fname),
to: CImgNIF, as: :cimg_save
@doc "resize the image object"
def resize(cimg, [x, y]), do: CImgNIF.cimg_resize(cimg, x, y)
@doc "get a image object resized [x, y]"
def get_resize(cimg, [x, y]) do
with {:ok, resize, [shape]} <- CImgNIF.cimg_get_resize(cimg, x, y),
do: %CImg{handle: resize, shape: shape}
end
@doc "get a image object packed into the box[x,y]"
def get_packed(cimg, [x, y], fill) do
with {:ok, packed, [shape]} <- CImgNIF.cimg_get_packed(cimg, x, y, fill),
do: %CImg{handle: packed, shape: shape}
end
defdelegate blur(cimg, sigma, boundary_conditions \\ true, is_gaussian \\ true),
to: CImgNIF, as: :cimg_blur
@doc "mirroring the image object on the axis"
def mirror(cimg, axis) when axis in [:x, :y] do
CImgNIF.cimg_mirror(cimg, axis)
end
@doc """
create new gray image object from the image object
"""
def get_gray(cimg, opt_pn \\ 0) do
with {:ok, gray, [shape]} <- CImgNIF.cimg_get_gray(cimg, opt_pn),
do: %CImg{handle: gray, shape: shape}
end
def get_crop(cimg, x0, y0, z0, c0, x1, y1, z1, c1, boundary_conditions \\ 0) do
with {:ok, crop, [shape]} <- CImgNIF.cimg_get_crop(cimg, x0, y0, z0, c0, x1, y1, z1, c1, boundary_conditions),
do: %CImg{handle: crop, shape: shape}
end
@doc "get the flat binary from the image object"
def to_flatbin(cimg, nchw \\ false, bgr \\ false) do
with {:ok, bin} <- CImgNIF.cimg_get_flatbin(cimg, nchw, bgr),
do: %{descr: "<u1", fortran_order: false, shape: {size(cimg)}, data: bin}
end
@doc "get the float32 flat binary from the image object"
def to_flatf4(cimg, nchw \\ false, bgr \\ false) do
with {:ok, bin} <- CImgNIF.cimg_get_flatf4(cimg, nchw, bgr, false),
do: %{descr: "<f4", fortran_order: false, shape: {size(cimg)}, data: bin}
end
@doc "get the normalized float32 flat binary from the image object"
def to_flatnorm(cimg, nchw \\ false, bgr \\ false) do
with {:ok, bin} <- CImgNIF.cimg_get_flatf4(cimg, nchw, bgr, true),
do: %{descr: "<f4", fortran_order: false, shape: {size(cimg)}, data: bin}
end
@doc "create new image object from byte binaries."
def create_from_u8bin(x, y, z, c, u8) when is_binary(u8) do
with {:ok, h, [shape]} <- CImgNIF.cimg_from_u8bin(x, y, z, c, u8),
do: %CImg{handle: h, shape: shape}
end
@doc "create new image object from float binaries."
def create_from_f4bin(x, y, z, c, f4) when is_binary(f4) do
with {:ok, h, [shape]} <- CImgNIF.cimg_from_f4bin(x, y, z, c, f4),
do: %CImg{handle: h, shape: shape}
end
@doc """
draw the colored box on the image object
"""
def draw_box(cimg, x0, y0, x1, y1, rgb) do
CImgNIF.cimg_draw_box(cimg, x0, y0, x1, y1, rgb)
end
defdelegate display(cimg, disp),
to: CImgNIF, as: :cimg_display
defdelegate fill(cimg, val),
to: CImgNIF, as: :cimg_fill
defdelegate draw_graph(cimg, data, color, opacity \\ 1.0, plot_type \\ 1, vertex_type \\ 1, ymin \\ 0.0, ymax \\ 0.0, pattern \\ 0xFFFFFFFF),
to: CImgNIF, as: :cimg_draw_graph
defdelegate set(val, cimg, x, y \\ 0, z \\ 0, c \\ 0),
to: CImgNIF, as: :cimg_set
defdelegate get(cimg, x, y \\ 0, z \\ 0, c \\ 0),
to: CImgNIF, as: :cimg_get
defdelegate assign(cimg, cimg_src),
to: CImgNIF, as: :cimg_assign
defdelegate draw_circle(cimg, x0, y0, radius, color, opacity \\ 1.0),
to: CImgNIF, as: :cimg_draw_circle
defdelegate draw_circle(cimg, x0, y0, radius, color, opacity, pattern),
to: CImgNIF, as: :cimg_draw_circle
defdelegate shape(cimg),
to: CImgNIF, as: :cimg_shape
defdelegate size(cimg),
to: CImgNIF, as: :cimg_size
defdelegate transfer(cimg, cimg_src, mapping, cx \\ 0, cy \\ 0, cz \\ 0),
to: CImgNIF, as: :cimg_transfer
end
defmodule CImgMap do
alias __MODULE__
# image object
defstruct handle: nil, shape: {}
@doc "load the image file and create new image object."
def create(x, y, z, c, list) when is_list(list) do
with {:ok, h, [shape]} <- CImgNIF.cimgmap_create(x, y, z, c, list),
do: %CImgMap{handle: h, shape: shape}
end
defdelegate set(val, cimgmap, x, y \\ 0, z \\ 0, c \\ 0),
to: CImgNIF, as: :cimgmap_set
defdelegate get(cimgmap, x, y \\ 0, z \\ 0, c \\ 0),
to: CImgNIF, as: :cimgmap_get
end
defmodule CImgDisplay do
alias __MODULE__
defstruct handle: nil
def create(%CImg{} = cimg, title \\ "", normalization \\ 3, is_fullscreen \\ false, is_closed \\ false) do
with {:ok, h, _} <- CImgNIF.cimgdisplay_u8(cimg, title, normalization, is_fullscreen, is_closed),
do: %CImgDisplay{handle: h}
end
defdelegate wait(cimgdisplay),
to: CImgNIF, as: :cimgdisplay_wait
defdelegate wait(cimgdisplay, milliseconds),
to: CImgNIF, as: :cimgdisplay_wait
defdelegate is_closed(cimgdisplay),
to: CImgNIF, as: :cimgdisplay_is_closed
defdelegate button(cimgdisplay),
to: CImgNIF, as: :cimgdisplay_button
defdelegate mouse_y(cimgdisplay),
to: CImgNIF, as: :cimgdisplay_mouse_y
end
defmodule CImgNIF do
@moduledoc """
NIFs entries.
"""
# loading NIF library
@on_load :load_nif
def load_nif do
nif_file = Application.app_dir(:cimg, "priv/cimg_nif")
:erlang.load_nif(nif_file, 0)
end
# stub implementations for NIFs (fallback)
def cimg_create(_x, _y, _z, _c, _v),
do: raise("NIF cimg_create/5 not implemented")
def cimg_create(_cimgu8),
do: raise("NIF cimg_create/1 not implemented")
def cimg_clear(_cimgu8),
do: raise("NIF cimg_clear/1 not implemented")
def cimg_load(_s),
do: raise("NIF cimg_load/1 not implemented")
def cimg_save(_c, _s),
do: raise("NIF cimg_save/2 not implemented")
def cimg_resize(_c, _x, _y),
do: raise("NIF cimg_resize/3 not implemented")
def cimg_get_resize(_c, _x, _y),
do: raise("NIF cimg_resize/3 not implemented")
def cimg_get_packed(_c, _x, _y, _v),
do: raise("NIF cimg_packed/4 not implemented")
def cimg_mirror(_c, _axis),
do: raise("NIF cimg_mirror/2 not implemented")
def cimg_get_gray(_c, _pn),
do: raise("NIF cimg_get_gray/2 not implemented")
def cimg_blur(_c, _s, _b, _g),
do: raise("NIF cimg_blur/4 not implemented")
def cimg_get_crop(_c, _x0, _y0, _z0, _c0, _x1, _y1, _z1, _c1, _b),
do: raise("NIF cimg_get_crop/10 not implemented")
def cimg_fill(_c, _val),
do: raise("NIF cimg_fill/2 not implemented")
def cimg_draw_graph(_c, _d, _color, _o, _p, _v, _ymin, _ymax, _pat),
do: raise("NIF cimg_draw_graph/9 not implemented")
def cimg_get_flatbin(_c, _nchw, _bgr),
do: raise("NIF cimg_get_flatbin/3 not implemented")
def cimg_get_flatf4(_c, _nchw, _bgr, _norm),
do: raise("NIF cimg_get_flatf4/4 not implemented")
def cimg_draw_box(_c, _x0, _y0, _x1, _y1, _rgb),
do: raise("NIF cimg_draw_box/6 not implemented")
def cimg_display(_cimgu8, _disp),
do: raise("NIF cimg_display/2 not implemented")
def cimg_set(_val, _cimgu8, _x, _y, _z, _c),
do: raise("NIF cimg_set/6 not implemented")
def cimg_get(_cimgu8, _x, _y, _z, _c),
do: raise("NIF cimg_get/5 not implemented")
def cimg_assign(_cimgu8, _cimgu8_src),
do: raise("NIF cimg_assign/2 not implemented")
def cimg_draw_circle(_cimgu8, _x0, _y0, _radius, _color, _opacity),
do: raise("NIF cimg_draw_circle/6 not implemented")
def cimg_draw_circle(_cimgu8, _x0, _y0, _radius, _color, _opacity, _pattern),
do: raise("NIF cimg_draw_circle/7 not implemented")
def cimg_shape(_cimgu8),
do: raise("NIF cimg_shape/1 not implemented")
def cimg_size(_cimgu8),
do: raise("NIF cimg_size/1 not implemented")
def cimg_transfer(_cimgu8, _cimgu8_src, _mapping, _cx, _cy, _cz),
do: raise("NIF cimg_transfer/6 not implemented")
def cimg_from_u8bin(_x, _y, _z, _c, _f4),
do: raise("NIF cimg_from_u8bin/5 not implemented")
def cimg_from_f4bin(_x, _y, _z, _c, _f4),
do: raise("NIF cimg_from_f4bin/5 not implemented")
def cimgmap_create(_x, _y, _z, _c, _list),
do: raise("NIF cimgmap_create/5 not implemented")
def cimgmap_set(_val, _cimgu8, _x, _y, _z, _c),
do: raise("NIF cimgmap_set/6 not implemented")
def cimgmap_get(_cimgu8, _x, _y, _z, _c),
do: raise("NIF cimgmap_get/5 not implemented")
def cimgdisplay_u8(_cimgu8, _title, _normalization, _is_fullscreen, _is_close),
do: raise("NIF cimgdisplay_u8/5 not implemented")
def cimgdisplay_wait(_disp),
do: raise("NIF cimgdisplay_wait/1 not implemented")
def cimgdisplay_wait(_disp, _milliseconds),
do: raise("NIF cimgdisplay_wait/2 not implemented")
def cimgdisplay_is_closed(_disp),
do: raise("NIF cimgdisplay_is_closed/1 not implemented")
def cimgdisplay_button(_disp),
do: raise("NIF cimgdisplay_button/1 not implemented")
def cimgdisplay_mouse_y(_disp),
do: raise("NIF cimgdisplay_mouse_y/1 not implemented")
end
|
lib/cimg.ex
| 0.806738 | 0.552238 |
cimg.ex
|
starcoder
|
defmodule BlueHeron.ErrorCode do
@moduledoc """
Defines all error codes and functions to map between error code and name.
> When a command fails, or an LMP, LL, or AMP message needs to indicate a failure, error codes
> are used to indicate the reason for the error. Error codes have a size of one octet.
Reference: Version 5.0, Vol 2, Part D, 1
"""
# Reference: Version 5.2, Vol 1, Part F, 1.3
@error_codes [
{0x00, :ok, "Success"},
{0x01, :unknown_hci_command, "Unknown HCI Command"},
{0x02, :unknown_connection_id, "Unknown Connection Identifier"},
{0x03, :hardware_failure, "Hardware Failure"},
{0x04, :page_timeout, "Page Timeout"},
{0x05, :auth_failure, "Authentication Failure"},
{0x06, :pin_or_key_missing, "PIN or Key Missing"},
{0x07, :memory_capacity_exceeded, "Memory Capacity Exceeded"},
{0x08, :connection_timeout, "Connection Timeout"},
{0x09, :connection_limit_exceeded, "Connection Limit Exceeded"},
{0x0A, :synchronous_connection_limit_to_a_device_exceeded,
"Synchronous Connection Limit To A Device Exceeded"},
{0x0B, :connection_already_exists, "Connection Already Exists"},
{0x0C, :command_disallowed, "Command Disallowed"},
{0x0D, :connection_rejected_due_to_limited_resources,
"Connection Rejected due to Limited Resources"},
{0x0E, :connection_rejected_due_to_security_reasons,
"Connection Rejected Due To Security Reasons"},
{0x0F, :connection_rejected_due_to_unacceptable_bd_addr,
"Connection Rejected due to Unacceptable BD_ADDR"},
{0x10, :connection_accept_timeout_exceeded, "Connection Accept Timeout Exceeded"},
{0x11, :unsupported_feature_or_parameter_value, "Unsupported Feature or Parameter Value"},
{0x12, :invalid_hci_command_parameters, "Invalid HCI Command Parameters"},
{0x13, :remote_user_terminated_connection, "Remote User Terminated Connection"},
{0x14, :remote_device_terminated_connection_due_to_low_resources,
"Remote Device Terminated Connection due to Low Resources"},
{0x15, :remote_device_terminated_connection_due_to_power_off,
"Remote Device Terminated Connection due to Power Off"},
{0x16, :connection_terminated_by_local_host, "Connection Terminated By Local Host"},
{0x17, :repeated_attempts, "Repeated Attempts"},
{0x18, :pairing_not_allowed, "Pairing Not Allowed"},
{0x19, :unknown_lmp_pdu, "Unknown LMP PDU"},
{0x1A, :unsupported_remote_feature, "Unsupported Remote Feature / Unsupported LMP Feature"},
{0x1B, :sco_offset_rejected, "SCO Offset Rejected"},
{0x1C, :sco_interval_rejected, "SCO Interval Rejected"},
{0x1D, :sco_air_mode_rejected, "SCO Air Mode Rejected"},
{0x1E, :invalid_lmp_parameters, "Invalid LMP Parameters / Invalid LL Parameters"},
{0x1F, :unspecified_error, "Unspecified Error"},
{0x20, :unsupported_lmp_parameter_value,
"Unsupported LMP Parameter Value / Unsupported LL Parameter Value"},
{0x21, :role_change_not_allowed, "Role Change Not Allowed"},
{0x22, :lmp_response_timeout, "LMP Response Timeout / LL Response Timeout"},
{0x23, :lmp_error_transaction_collision,
"LMP Error Transaction Collision / LL Procedure Collision"},
{0x24, :lmp_pdu_not_allowed, "LMP PDU Not Allowed"},
{0x25, :encryption_mode_not_acceptable, "Encryption Mode Not Acceptable"},
{0x26, :link_key_cannot_be_changed, "Link Key cannot be Changed"},
{0x27, :requested_qos_not_supported, "Requested QoS Not Supported"},
{0x28, :instant_passed, "Instant Passed"},
{0x29, :pairing_with_unit_key_not_supported, "Pairing With Unit Key Not Supported"},
{0x2A, :different_transaction_collision, "Different Transaction Collision"},
{0x2B, :reserved, "Reserved for Future Use (0x2B)"},
{0x2C, :qos_unacceptable_parameter, "QoS Unacceptable Parameter"},
{0x2D, :qos_rejected, "QoS Rejected"},
{0x2E, :channel_classification_not_supported, "Channel Classification Not Supported"},
{0x2F, :insufficient_security, "Insufficient Security"},
{0x30, :parameter_out_of_mandatory_range, "Parameter Out Of Mandatory Range"},
{0x31, :reserved, "Reserved for Future Use (0x31)"},
{0x32, :role_switch_pending, "Role Switch Pending"},
{0x33, :reserved, "Reserved for Future Use (0x33)"},
{0x34, :reserved_slot_violation, "Reserved Slot Violation"},
{0x35, :role_switch_failed, "Role Switch Failed"},
{0x36, :extended_inquiry_response_too_large, "Extended Inquiry Response Too Large"},
{0x37, :secure_simple_pairing_not_supported, "Secure Simple Pairing Not Supported By Host"},
{0x38, :host_busy_pairing, "Host Busy - Pairing"},
{0x39, :connection_rejected_no_suitable_channel,
"Connection Rejected due to No Suitable Channel Found"},
{0x3A, :controller_busy, "Controller Busy"},
{0x3B, :unacceptable_connection_parameters, "Unacceptable Connection Parameters"},
{0x3C, :advertising_timeout, "Advertising Timeout"},
{0x3D, :connection_terminated_due_to_mic_failure, "Connection Terminated due to MIC Failure"},
{0x3E, :connection_failed_to_be_established, "Connection Failed to be Established"},
{0x3F, :mac_connection_failed, "MAC Connection Failed"},
{0x40, :course_clock_adjustment_rejected,
"Coarse Clock Adjustment Rejected but Will Try to Adjust Using Clock Dragging"},
{0x41, :type0_submap_not_defined, "Type0 Submap Not Defined"},
{0x42, :unknown_advertising_identifier, "Unknown Advertising Identifier"},
{0x43, :limit_reached, "Limit Reached"},
{0x44, :operation_cancelled_by_host, "Operation Cancelled by Host"},
{0x45, :packet_too_long, "Packet Too Long"}
]
@spec to_atom(non_neg_integer()) :: atom()
def to_atom(code) when is_integer(code) do
List.keyfind(@error_codes, code, 0, :unknown)
end
@spec to_code(non_neg_integer() | atom()) :: non_neg_integer() | :error
def to_code(status) when is_atom(status) do
List.keyfind(@error_codes, status, 1, :error)
end
def to_code(status) when is_integer(status), do: status
@spec to_code!(non_neg_integer() | atom()) :: non_neg_integer()
def to_code!(status) do
case to_code(status) do
:error -> raise "[#{inspect(__MODULE__)}] No code for #{inspect(status)}"
code -> code
end
end
@spec to_string(atom()) :: String.t() | :error
def to_string(atom) when is_atom(atom) do
List.keyfind(@error_codes, atom, 2, :error)
end
end
|
lib/blue_heron/error_code.ex
| 0.796094 | 0.462716 |
error_code.ex
|
starcoder
|
defmodule Day11 do
def part1(program) do
robot = operate_robot(program)
Robot.num_painted_panels(robot)
end
def part2(program) do
robot = operate_robot(program, %{{0,0} => 1})
Robot.draw_grid(robot)
end
defp operate_robot(program, initial_grid \\ %{}) do
machine = Intcode.new(program)
robot = spawn(fn -> robot(machine, initial_grid) end)
Intcode.set_sink(machine, robot)
Intcode.go(machine)
send(robot, :go)
receive do
{:halted, ^machine} ->
Intcode.terminate(machine)
send(robot, {:done, self()})
receive do
robot -> robot
end
end
end
defp robot(machine, initial_grid) do
robot = Robot.new(initial_grid)
receive do
:go ->
robot_loop(robot, machine)
end
end
defp robot_loop(robot, machine) do
color = Robot.read_color(robot)
send(machine, color)
receive do
color ->
case color do
{:done, from} ->
send(from, robot)
_ ->
receive do
turn ->
robot = Robot.move(robot, color, turn)
robot_loop(robot, machine)
end
end
end
end
# The following functions are only used by the test suite.
def test_robot(moves) do
robot = Robot.new()
robot = move_robot(robot, moves)
Robot.num_painted_panels(robot)
end
defp move_robot(robot, [color, turn | moves]) do
robot = Robot.move(robot, color, turn)
move_robot(robot, moves)
end
defp move_robot(robot, []), do: robot
end
defmodule Intcode do
def new(program) do
spawn(fn -> machine(program) end)
end
def set_sink(machine, sink) do
send(machine, {:set_sink, sink})
end
def go(machine) do
send(machine, {:go, self()})
end
def terminate(machine) do
send(machine, :terminate)
end
defp machine(input) do
memory = read_program(input)
machine_loop(memory)
end
defp machine_loop(memory) do
receive do
{:set_sink, sink} ->
memory = Map.put(memory, :sink, sink)
machine_loop(memory)
{:go, from} ->
memory = execute(memory)
send(from, {:halted, self()})
machine_loop(memory)
:terminate ->
nil
end
end
defp execute(memory, ip \\ 0) do
{opcode, modes} = fetch_opcode(memory, ip)
case opcode do
1 ->
memory = exec_arith_op(&+/2, modes, memory, ip)
execute(memory, ip + 4)
2 ->
memory = exec_arith_op(&*/2, modes, memory, ip)
execute(memory, ip + 4)
3 ->
memory = exec_input(modes, memory, ip)
execute(memory, ip + 2)
4 ->
memory = exec_output(modes, memory, ip)
execute(memory, ip + 2)
5 ->
ip = exec_if(&(&1 !== 0), modes, memory, ip)
execute(memory, ip)
6 ->
ip = exec_if(&(&1 === 0), modes, memory, ip)
execute(memory, ip)
7 ->
memory = exec_cond(&(&1 < &2), modes, memory, ip)
execute(memory, ip + 4)
8 ->
memory = exec_cond(&(&1 === &2), modes, memory, ip)
execute(memory, ip + 4)
9 ->
memory = exec_inc_rel_base(modes, memory, ip)
execute(memory, ip + 2)
99 ->
memory
end
end
defp exec_arith_op(op, modes, memory, ip) do
[in1, in2] = read_operand_values(memory, ip + 1, modes, 2)
out_addr = read_out_address(memory, div(modes, 100), ip + 3)
result = op.(in1, in2)
write(memory, out_addr, result)
end
defp exec_input(modes, memory, ip) do
out_addr = read_out_address(memory, modes, ip + 1)
receive do
value ->
write(memory, out_addr, value)
end
end
defp exec_output(modes, memory, ip) do
[value] = read_operand_values(memory, ip + 1, modes, 1)
sink = Map.fetch!(memory, :sink)
send(sink, value)
memory
end
defp exec_if(op, modes, memory, ip) do
[value, new_ip] = read_operand_values(memory, ip + 1, modes, 2)
case op.(value) do
true -> new_ip
false -> ip + 3
end
end
defp exec_cond(op, modes, memory, ip) do
[operand1, operand2] = read_operand_values(memory, ip + 1, modes, 2)
out_addr = read_out_address(memory, div(modes, 100), ip + 3)
result = case op.(operand1, operand2) do
true -> 1
false -> 0
end
write(memory, out_addr, result)
end
defp exec_inc_rel_base(modes, memory, ip) do
[offset] = read_operand_values(memory, ip + 1, modes, 1)
base = get_rel_base(memory) + offset
Map.put(memory, :rel_base, base)
end
defp read_operand_values(_memory, _addr, _modes, 0), do: []
defp read_operand_values(memory, addr, modes, n) do
operand = read(memory, addr)
operand = case rem(modes, 10) do
0 -> read(memory, operand)
1 -> operand
2 -> read(memory, operand + get_rel_base(memory))
end
[operand | read_operand_values(memory, addr + 1, div(modes, 10), n - 1)]
end
defp read_out_address(memory, modes, addr) do
out_addr = read(memory, addr)
case modes do
0 -> out_addr
2 -> get_rel_base(memory) + out_addr
end
end
defp fetch_opcode(memory, ip) do
opcode = read(memory, ip)
modes = div(opcode, 100)
opcode = rem(opcode, 100)
{opcode, modes}
end
defp get_rel_base(memory) do
Map.get(memory, :rel_base, 0)
end
defp read(memory, addr) do
Map.get(memory, addr, 0)
end
defp write(memory, addr, value) do
Map.put(memory, addr, value)
end
defp read_program(input) do
String.split(input, ",")
|> Stream.map(&String.to_integer/1)
|> Stream.with_index
|> Stream.map(fn {code, index} -> {index, code} end)
|> Map.new
end
end
defmodule Robot do
defstruct position: {0, 0}, direction: {0, 1}, grid: %{}
def new(grid \\ %{}) do
%Robot{grid: grid}
end
def read_color(robot) do
Map.get(robot.grid, robot.position, 0)
end
def move(robot, color, turn) do
pos = robot.position
grid = Map.put(robot.grid, pos, color)
{dx, dy} = robot.direction
dir = case turn do
0 -> {-dy, dx}
1 -> {dy, -dx}
end
pos = vec_add(pos, dir)
%{robot | grid: grid, direction: dir, position: pos}
end
def num_painted_panels(robot) do
map_size(robot.grid)
end
def draw_grid(robot) do
IO.write("\n")
grid = robot.grid
{{min_col, _}, {max_col, _}} =
grid
|> Map.keys
|> Enum.min_max_by(&(elem(&1, 0)))
grid
|> Enum.group_by(fn {{_col, row}, _color} -> row end)
|> Enum.sort_by(fn {row, _} -> -row end)
|> Enum.map(fn {_, row} -> draw_row(row, min_col..max_col) end)
|> Enum.intersperse("\n")
|> IO.write
IO.write("\n")
end
defp draw_row(row, col_range) do
row = Enum.map(row, fn {{col, _row}, color} -> {col, color} end)
|> Map.new
col_range
|> Enum.map(fn col ->
case Map.get(row, col, 0) do
0 -> ?\s
1 -> ?\*
end
end)
end
defp vec_add({x1, y1}, {x2, y2}), do: {x1 + x2, y1 + y2}
end
|
day11/lib/day11.ex
| 0.586523 | 0.555496 |
day11.ex
|
starcoder
|
defmodule Pharams do
@moduledoc """
Functions and macros for validating requests to Phoenix
Controllers.
"""
alias Pharams.Utils
@doc """
Takes a nested struct data structure and turns it into a map with
atoms as keys.
"""
def schema_to_atom_map(map) when is_map(map) do
map
|> Map.delete(:__struct__)
|> Map.delete(:__meta__)
|> Enum.map(fn
{key, %{__struct__: _} = value} ->
{key, schema_to_atom_map(value)}
{key, val} when is_list(val) ->
{key,
Enum.map(val, fn entry ->
schema_to_atom_map(entry)
end)}
key_val ->
key_val
end)
|> Map.new()
end
def schema_to_atom_map(val) do
val
end
@doc """
Takes a nested struct data structure and turns it into a map with
strings as keys
"""
def schema_to_string_map(map) when is_map(map) do
map
|> Map.delete(:__struct__)
|> Map.delete(:__meta__)
|> Enum.map(fn
{key, %{__struct__: _} = value} ->
{Atom.to_string(key), schema_to_string_map(value)}
{key, val} when is_list(val) ->
{Atom.to_string(key),
Enum.map(val, fn entry ->
schema_to_string_map(entry)
end)}
{key, val} ->
{Atom.to_string(key), val}
end)
|> Map.new()
end
def schema_to_string_map(val) do
val
end
@doc """
Go through the map of values and remove all fields which are nil
"""
def drop_nil_fields(map) when is_map(map) do
map
|> Enum.reduce(
%{},
fn {key, val}, acc ->
val = drop_nil_fields(val)
if empty?(val) do
acc
else
Map.put(acc, key, val)
end
end
)
end
def drop_nil_fields(list) when is_list(list) do
list
|> Enum.reduce([], fn elem, acc ->
elem = drop_nil_fields(elem)
if empty?(elem) do
acc
else
[elem | acc]
end
end)
|> Enum.reverse()
end
def drop_nil_fields(val), do: val
defp empty?(val), do: val in [nil, %{}, "", []]
defmacro __using__(opts) do
key_type = Keyword.get(opts, :key_type, :atom)
drop_nil_fields = Keyword.get(opts, :drop_nil_fields, false)
error_module = Keyword.get(opts, :view_module, Pharams.ErrorView)
error_template = Keyword.get(opts, :view_template, "errors.json")
error_status = Keyword.get(opts, :error_status, :unprocessable_entity)
quote do
import Pharams, only: [pharams: 2, pharams: 3]
# TODO: This is a bit hacky, move these over to module attributes
def pharams_key_type, do: unquote(key_type)
def pharams_drop_nil_fields?, do: unquote(drop_nil_fields)
def pharams_error_view_module, do: unquote(error_module)
def pharams_error_view_template, do: unquote(error_template)
def pharams_error_status, do: unquote(error_status)
end
end
defp generate_plug(validation_module, controller_module) do
quote do
@moduledoc false
use Phoenix.Controller
import Plug.Conn
import Ecto.Changeset
def init(opts), do: opts
def call(conn, opts \\ []) do
validator = unquote(validation_module)
controller = unquote(controller_module)
# Get the individual route options, or fallback to controller options
key_type = Keyword.get(opts, :key_type, controller.pharams_key_type())
drop_nil_fields = Keyword.get(opts, :drop_nil_fields, controller.pharams_drop_nil_fields?())
error_view_module = Keyword.get(opts, :view_module, controller.pharams_error_view_module())
error_view_template = Keyword.get(opts, :view_template, controller.pharams_error_view_template())
error_status = Keyword.get(opts, :error_status, controller.pharams_error_status())
changeset =
validator
|> struct()
|> validator.changeset(conn.params)
if changeset.valid? do
new_params =
changeset
|> apply_changes()
|> convert_key_type(key_type)
|> prune_empty_fields(drop_nil_fields)
%{conn | params: new_params}
else
conn
|> put_status(error_status)
|> put_view(error_view_module)
|> render(error_view_template, changeset)
|> halt()
end
end
defp convert_key_type(data, :atom = _key_type), do: Pharams.schema_to_atom_map(data)
defp convert_key_type(data, :string = _key_type), do: Pharams.schema_to_string_map(data)
defp convert_key_type(_data, _invalid_key_type) do
raise "Pharams: Invalid key_type. Valid options are :string and :atom"
end
defp prune_empty_fields(data, false = _drop_nil_fields), do: data
defp prune_empty_fields(data, true = _drop_nil_fields), do: Pharams.drop_nil_fields(data)
defp prune_empty_fields(data, _invalid_drop_nil_fields) do
raise "Pharams: Invalid drop_nil_fields. Valid options are true and false"
end
end
end
defp generate_validation({:__block__, [], block_contents}, caller) do
root_field_declarations = Utils.generate_basic_field_schema_definitions(block_contents, caller)
root_fields = Utils.get_all_basic_fields(block_contents)
root_required_fields = Utils.get_required_basic_fields(block_contents)
root_validations = Utils.generate_basic_field_validations(block_contents, caller)
root_group_declarations = Utils.generate_group_field_schema_definitions(block_contents, caller)
root_sub_schema_casts = Utils.generate_group_field_schema_casts(block_contents, nil)
group_schema_changesets = Utils.generate_group_field_schema_changesets(block_contents, nil, caller)
module =
[
"@moduledoc false",
"",
"use Ecto.Schema",
"import Ecto.Changeset",
"",
"@primary_key false",
"embedded_schema do",
root_field_declarations,
root_group_declarations,
"end",
"",
"def changeset(schema, params) do",
"schema",
"|> cast(params, #{inspect(root_fields)})",
"|> validate_required(#{inspect(root_required_fields)})",
root_validations,
root_sub_schema_casts,
"end",
"",
group_schema_changesets
]
|> List.flatten()
formatted_module =
module
|> Enum.join("\n")
|> Code.format_string!()
module =
(module ++
[
"",
"def dump do",
"\"\"\"",
"#{formatted_module}",
"\"\"\"",
"end"
])
|> Enum.join("\n")
Code.string_to_quoted!(module)
end
defp generate_validation(ast, caller) do
generate_validation({:__block__, [], [ast]}, caller)
end
@doc """
This macro provides the ability to define validation schemas for use in Phoenix controllers
## Example
```elixir
use Pharams, view_module: Pharams.ErrorView, view_template: "errors.json", error_status: :unprocessable_entity
pharams :index do
required :terms_conditions, :boolean
required :password, :string
required :password_confirmation, :string
optional :age, :integer
end
def index(conn, params) do
# You will only get into this function if the request
# parameters have passed the above validator. The params
# variable is now just a map with atoms as keys.
render(conn, "index.html")
end
```
"""
defmacro pharams(controller_action, do: block) do
camel_action =
controller_action
|> Atom.to_string()
|> Macro.camelize()
calling_module = __CALLER__.module
# Create validation module
validation_module_name = Module.concat([calling_module, PharamsValidator, camel_action])
validation_module_ast = generate_validation(block, __CALLER__)
Module.create(validation_module_name, validation_module_ast, Macro.Env.location(__ENV__))
# Create plug module
plug_module_name = Module.concat([calling_module, PharamsPlug, camel_action])
plug_module_ast = generate_plug(validation_module_name, calling_module)
Module.create(plug_module_name, plug_module_ast, Macro.Env.location(__ENV__))
# Insert the validation plug into the controller
quote do
plug(unquote(plug_module_name) when var!(action) == unquote(controller_action))
end
end
defmacro pharams(controller_action, opts, do: block) do
camel_action =
controller_action
|> Atom.to_string()
|> Macro.camelize()
calling_module = __CALLER__.module
# Create validation module
validation_module_name = Module.concat([calling_module, PharamsValidator, camel_action])
validation_module_ast = generate_validation(block, __CALLER__)
Module.create(validation_module_name, validation_module_ast, Macro.Env.location(__ENV__))
# Create plug module
plug_module_name = Module.concat([calling_module, PharamsPlug, camel_action])
plug_module_ast = generate_plug(validation_module_name, calling_module)
Module.create(plug_module_name, plug_module_ast, Macro.Env.location(__ENV__))
# Insert the validation plug into the controller
quote do
plug(
unquote(plug_module_name),
unquote(opts) when var!(action) == unquote(controller_action)
)
end
end
end
|
lib/pharams.ex
| 0.529263 | 0.439507 |
pharams.ex
|
starcoder
|
defmodule Bouncer.Token do
@moduledoc """
A library of functions used to work with session data.
"""
alias Phoenix.Token
def adapter, do: Application.get_env(:bouncer, :adapter)
@doc """
Generates a token, uses it as a key to save user data to the store, and
associates it with the user's ID.
"""
def generate(conn, namespace, user, ttl) do
token = Token.sign(conn, namespace, user["id"])
case adapter().save(user, token, ttl) do
{:ok, ^token} ->
case adapter().add(user["id"], token) do
{:ok, _} -> {:ok, token}
response -> response
end
response -> response
end
end
@doc """
Verifies that a given token is valid. Returns data retrieved from the store
using the token as a key if token is valid.
"""
def verify(conn, namespace, token) do
case validate([conn, namespace, token]) do
^token -> adapter().get(token)
false -> {:error, "Invalid token"}
end
end
@doc """
Validates a token against a given namespace. Returns the token if valid and
false otherwise.
"""
def validate([conn, namespace, token]) do
case Token.verify(conn, namespace, token) do
{:ok, _} -> token
_ -> false
end
end
@doc """
Gets rid of any existing tokens given a namespace and user ID. Generates and
returns a new token.
"""
def regenerate(conn, namespace, user, ttl) do
delete_all(conn, namespace, user["id"])
generate(conn, namespace, user, ttl)
end
@doc """
Deletes all tokens of a given namespace and disassociates them with the given
user's ID.
"""
def delete_all(conn, namespace, id) do
{_, tokens} = adapter().all(id)
tokens
|> Enum.map(&([conn, namespace, &1]))
|> Enum.filter_map(&validate/1, fn ([_, _, token]) -> token end)
|> delete(id)
end
@doc """
Deletes token(s) and disassociates them with the given user's ID.
"""
def delete(token, id) do
adapter().delete(token)
adapter().remove(id, token)
end
end
|
lib/bouncer/token.ex
| 0.784814 | 0.585901 |
token.ex
|
starcoder
|
defmodule ISBN do
@moduledoc """
Documentation for ISBN.
"""
@isbn13_multipliers [1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3]
@isbn10_multipliers [1, 2, 3, 4, 5, 6, 7, 8, 9]
@doc """
Checks if the given string is a valid ISBN.
Works with both ISBN-10 and ISBN-13. Allows hyphens and spaces in the string.
## Examples
iex> ISBN.valid?("9971502100")
true
iex> ISBN.valid?("978-03-0640-615-7")
true
"""
def valid?(isbn) when is_binary(isbn) do
last_digit = isbn |> String.trim() |> String.last()
digits =
isbn
|> remove_spaces_and_dashes()
|> String.slice(0..-2)
|> String.codepoints()
|> Enum.map(&String.to_integer/1)
reveal_verifier(digits) == last_digit
end
def valid?(_), do: false
@doc """
Returns ISBN formatted.
## Examples
iex> ISBN.format("9992158107")
"99-9215-810-7"
iex> ISBN.format("9992158106")
nil
"""
def format(isbn) when not is_binary(isbn), do: nil
def format(isbn) do
case valid?(isbn) do
true ->
isbn
|> remove_spaces_and_dashes()
|> String.codepoints()
|> do_format()
_ ->
nil
end
end
defp do_format(digits) when length(digits) == 10 do
digits
|> List.insert_at(2, "-")
|> List.insert_at(7, "-")
|> List.insert_at(11, "-")
|> Enum.join()
end
defp do_format(digits) when length(digits) == 13 do
digits
|> List.insert_at(3, "-")
|> List.insert_at(6, "-")
|> List.insert_at(11, "-")
|> List.insert_at(15, "-")
|> Enum.join()
end
defp do_format(_isbn), do: nil
defp reveal_verifier(digits) when length(digits) == 9 do
acc = calculate(digits, @isbn10_multipliers)
rem = rem(acc, 11)
if rem == 10, do: "X", else: Integer.to_string(rem)
end
defp reveal_verifier(digits) when length(digits) == 12 do
acc = calculate(digits, @isbn13_multipliers)
rem = rem(acc, 10)
if rem == 0, do: "0", else: Integer.to_string(10 - rem)
end
defp reveal_verifier(_isbn), do: nil
defp calculate(digits, multipliers) do
multipliers
|> Enum.zip(digits)
|> Enum.reduce(0, fn {multiplier, digit}, acc ->
acc + multiplier * digit
end)
end
defp remove_spaces_and_dashes(isbn) do
isbn
|> String.trim()
|> String.replace("-", "")
|> String.replace(" ", "")
end
end
|
lib/isbn.ex
| 0.804713 | 0.429788 |
isbn.ex
|
starcoder
|
defmodule Metatags do
@moduledoc """
Metatags is used to provide an easy api to print out context-specific
metatags.
"""
alias Phoenix.HTML.Tag
@sitename Application.get_env(:metatags, :sitename)
@default_meta_tags Application.get_env(:metatags, :default_tags, %{})
@separator Application.get_env(:metatags, :separator, "-")
@doc false
def init(_opts), do: nil
@doc false
def call(conn, _opts) do
conn
|> Map.put(:metadata, @default_meta_tags)
end
@doc """
Puts a key and a value in the metadata store
example:
```
iex> %{metadata: %{}} |> Metatags.put("title", "Welcome!")
%{metadata: %{"title" => "Welcome!"}}
```
"""
@spec put(map, atom, String.t | map) :: struct
def put(conn, key, value) when is_atom(key) do
put(conn, Atom.to_string(key), value)
end
@spec put(map, String.t, String. | map) :: struct
def put(conn, key, value) do
metadata = conn.metadata
|> Map.put(key, value)
conn
|> Map.put(:metadata, metadata)
end
@doc """
turns metadata information into HTML tags
"""
@spec print_tags(map) :: Phoenix.Html.Safe.t
def print_tags(%{metadata: metadata}) do
metadata
|> Enum.reduce([], fn {key, value}, acc ->
[print_tag(metadata, key, value) | acc]
end)
end
def print_tags(_map), do: nil
defp print_tag(metadata, prefix, %{} = map) do
map
|> Enum.reduce([], fn {key, value}, acc ->
[print_tag(metadata, "#{prefix}:#{key}", value) | acc]
end)
end
defp print_tag(_, "title", value) when is_nil(value), do: Tag.content_tag :title, do: @sitename
defp print_tag(metadata, key, value) when is_atom(key) do
print_tag(metadata, Atom.to_string(key), value)
end
defp print_tag(_, "title", value) do
suffix = if @sitename, do: [@separator, @sitename], else: []
Tag.content_tag :title, do: Enum.join([value] ++ suffix, " ")
end
defp print_tag(metadata, "keywords" = key, value) when is_list(value) do
print_tag(metadata, key, Enum.join(value, ", "))
end
defp print_tag(metadata, "og:title" = key, value) do
Tag.tag :meta, name: key, content: value || metadata["title"]
end
defp print_tag(metadata, "og:url" = key, value) do
Tag.tag :meta, name: key, content: value || Map.get(metadata, "canonical", nil)
end
defp print_tag(_, key, value), do: Tag.tag :meta, name: key, content: value
end
|
lib/metatags.ex
| 0.800185 | 0.732974 |
metatags.ex
|
starcoder
|
defmodule Recase.NameCase do
@moduledoc """
Module to convert strings to `Name Case`.
This module should not be used directly.
## Examples
iex> Recase.to_name "mccarthy o'donnell"
"<NAME>"
Read about `Name Case` here:
https://metacpan.org/pod/Lingua::EN::NameCase
"""
import Recase.Replace
@spec convert(String.t()) :: String.t()
def convert(value) when is_binary(value) do
value
|> String.downcase()
|> replace(~r|\b\w|, fn first_char_of_word ->
String.upcase(first_char_of_word)
end)
|> replace(~r|\'\w\b|, fn apostophe_ess ->
String.downcase(apostophe_ess)
end)
|> replace_irish()
|> replace(~r|\bVon\b|, "von")
|> replace(~r|\bVan(?=\s+\w)|, "van")
|> replace(~r|\bAp\b|, "ap")
|> replace(~r|\bAl(?=\s+\w)|, "al")
|> replace(~r|\bEl\b|, "el")
|> replace(~r|\bLa\b|, "la")
|> replace(~r|\bBen(?=\s+\w)|, "ben")
|> replace(~r/\b(Bin|Binti|Binte)\b/, fn bin_prefix ->
String.downcase(bin_prefix)
end)
|> replace(~r|\bD([aeiou])\b|, fn da_prefix ->
String.downcase(da_prefix)
end)
|> replace(~r|\bD([ao]s)\b|, fn das_prefix ->
String.downcase(das_prefix)
end)
|> replace(~r|\bDell([ae])\b|, fn dell_prefix ->
String.downcase(dell_prefix)
end)
|> replace(~r|\bDe([lr])\b|, fn del_prefix ->
String.downcase(del_prefix)
end)
|> replace(~r|\bL([eo])\b|, fn le_prefix -> String.downcase(le_prefix) end)
|> replace_roman_numerals()
|> replace(~r|\b([YEI])\b|, fn conjunction ->
String.downcase(conjunction)
end)
end
defp replace_roman_numerals(string) do
replace(
string,
~r/\b ( (?: [Xx]{1,3} | [Xx][Ll] | [Ll][Xx]{0,3} )? (?: [Ii]{1,3} | [Ii][VvXx] | [Vv][Ii]{0,3} )? ) \b /x,
fn numeral -> String.upcase(numeral) end
)
end
defp replace_irish(string) do
replace(string, ~r|\b(Mc)([A-Za-z]+)|, fn _, mc_prefix, rest_of_word ->
mc_prefix <> String.capitalize(rest_of_word)
end)
|> replace(
~r|\b(Ma?c)([A-Za-z]{2,}[^aciozj])\b|,
fn _, mc_prefix, rest_of_word ->
mc_prefix <> String.capitalize(rest_of_word)
end
)
|> replace(~r/\bMacEdo/, "Macedo")
|> replace(~r/\bMacEvicius/, "Macevicius")
|> replace(~r/\bMacHado/, "Machado")
|> replace(~r/\bMacHar/, "Machar")
|> replace(~r/\bMacHin/, "Machin")
|> replace(~r/\bMacHlin/, "Machlin")
|> replace(~r/\bMacIas/, "Macias")
|> replace(~r/\bMacIulis/, "Maciulis")
|> replace(~r/\bMacKie/, "Mackie")
|> replace(~r/\bMacKle/, "Mackle")
|> replace(~r/\bMacKlin/, "Macklin")
|> replace(~r/\bMacKmin/, "Mackmin")
|> replace(~r/\bMacQuarie/, "Macquarie")
|> replace(~r/\bMacmurdo/, "MacMurdo")
end
end
|
lib/recase/cases/name_case.ex
| 0.687105 | 0.42674 |
name_case.ex
|
starcoder
|
defmodule Guards do
# Uses the sentinel values of
# :guard
# :empty
# :locked
# Is this space valid for visiting, on the board + empty.
defp is_valid({_board, rows, cols}, {r, c}) when r < 0 or c < 0 or r >= rows or c >= cols do
false
end
defp is_valid({board, _rows, _cols}, {r, c}) do
value(board, {r, c}) == :empty
end
# Returns the value of a space on the board
def value(board, {r, c}) when is_map(board) do
Map.get(Map.get(board, r), c)
end
# Converts :guard to 0, for next value calsulations
def int_value(:guard), do: 0
def int_value(x), do: x
# All potential neighbors, could be off the board or invalid.
def neighbors({r, c}) do
[{r - 1, c}, {r + 1, c}, {r, c - 1}, {r, c + 1}]
end
# Remove points that are off the board or already assigned.
def filter_neighbors(points, {board, rows, cols}) do
List.foldl(points, [],
fn {r, c}, acc ->
if is_valid({board, rows, cols}, {r, c}), do: acc ++ [{r, c}], else: acc
end)
end
# Searches the board for location of :guard cells
def find_guards(rtn, _row, []), do: rtn
def find_guards(rtn, row, [{col, :guard}|tail]) do
find_guards(rtn ++ [{row, col}], row, tail)
end
def find_guards(rtn, row, [{_col, _}|tail]) do
find_guards(rtn, row, tail)
end
def find_guards([]), do: []
def find_guards([{row, cols} | rows]) do
find_guards([], row, Map.to_list(cols)) ++ find_guards(rows)
end
def find_guards(board) when is_map(board) do
find_guards(Map.to_list(board))
end
# Updates a space by updating the whole board
defp update_space(board, {r, c}, value) do
Map.update!(board, r, fn map -> Map.update!(map, c, fn _ -> value end) end)
end
defp visit_neighbors([], acc, board, _) do
{acc, board}
end
defp visit_neighbors([{r, c} | tail], acc, board, val) do
case value(board, {r, c}) do
:empty -> visit_neighbors(tail, acc ++ [{r, c}], update_space(board, {r, c}, val), val)
_ -> visit_neighbors(tail, acc, board, val)
end
end
# Queue is empty
defp search([], {board, _, _}), do: board
# Process the first item in the queue
defp search([{r, c} | tail], {board, rows, cols}) do
# Visit neighbors 1 level deep.
{visited, board} = neighbors({r, c})
|> filter_neighbors({board, rows, cols})
|> visit_neighbors([], board, int_value(value(board, {r, c})) + 1)
# Append newly visited items to the end of the queue, pass updated board.
search(tail ++ visited, {board, rows, cols})
end
# Search the board and fill in :empty spaces
def search({board, rows, cols}) do
# Seed the BFS search w/ the location of all the guards.
find_guards(board)
|> search({board, rows, cols})
end
def print_row([]) do
end
def print_row([row|tail]) do
IO.inspect(Map.values(row))
print_row(tail)
end
def print(board) do
Map.values(board) |> print_row()
end
end
|
lib/guards.ex
| 0.700075 | 0.767951 |
guards.ex
|
starcoder
|
defmodule Ecto.Adapters.Mnesia.Read do
@moduledoc false
require Qlc
alias Ecto.Adapters.Mnesia.Record
alias Ecto.Adapters.Mnesia.Table
alias Ecto.Query.BooleanExpr
alias Ecto.Query.QueryExpr
alias Ecto.Query.SelectExpr
@order_mapping %{
asc: :ascending,
desc: :descending
}
def query(select, joins, sources, wheres) do
fn params ->
context = %{sources: sources, params: params}
[where] = wheres
to_read(where.expr, context)
end
end
defp to_read(
{:==, [], [{{:., [], [{:&, [], [source_index]}, field]}, [], []}, {:^, [], [index]}]},
context
) do
value = Enum.at(context[:params], index)
to_read({:==, [], [{{:., [], [{:&, [], [source_index]}, field]}, [], []}, value]}, context)
end
defp to_read(
{:==, [], [{{:., [], [{:&, [], [source_index]}, :id]}, [], []}, value]},
%{sources: sources, params: params}
) do
[{source, schema}] = sources
:mnesia.read(source, value)
end
defp to_read(
{:==, [], [{{:., [], [{:&, [], [source_index]}, field]}, [], []}, value]},
%{sources: sources, params: params}
) do
[{source, schema}] = sources
# IO.inspect(field, label: "field")
# IO.inspect(value, label: "value")
# IO.inspect(sources, label: "sources")
:mnesia.all_keys(source) |> IO.inspect(label: "all keys")
:mnesia.index_read(source, value, field)
# :mnesia.select(source, [{"$1", [{:==, field, value}], ["$_"]}])
# |> IO.inspect(label: "select result")
end
@spec sort(list(%QueryExpr{}), %SelectExpr{}, list(tuple())) ::
(query_handle :: :qlc.query_handle() -> query_handle :: :qlc.query_handle())
def sort([], _select, _sources) do
fn query -> query end
end
def sort(order_bys, select, sources) do
fn query ->
Enum.reduce(order_bys, query, fn
%QueryExpr{expr: expr}, query1 ->
Enum.reduce(expr, query1, fn {order, field_expr}, query2 ->
field = field(field_expr, sources)
field_index = Enum.find_index(fields(select, sources), fn e -> e == field end)
Qlc.keysort(query2, field_index, order: @order_mapping[order])
end)
end)
end
end
defp field({{_, _, [{:&, [], [source_index]}, field]}, [], []}, sources) do
case Enum.at(sources, source_index) do
source -> Record.Attributes.to_erl_var(field, source)
end
end
defp fields(%SelectExpr{fields: fields}, sources) do
Enum.flat_map(sources, fn _source ->
Enum.map(fields, &field(&1, sources))
|> Enum.reject(&is_nil(&1))
end)
end
defp fields(:all, [{_table_name, schema} = source | _t]) do
schema.__schema__(:fields)
|> Enum.map(&Record.Attributes.to_erl_var(&1, source))
end
defp fields(_, [{_table_name, schema} = source | _t]) do
schema.__schema__(:fields)
|> Enum.map(&Record.Attributes.to_erl_var(&1, source))
end
end
|
lib/ecto/adapters/mnesia/read.ex
| 0.583559 | 0.443902 |
read.ex
|
starcoder
|
defmodule Aoc2019Day12 do
def solve1(input, steps \\ 10) do
[a, b, c, d] = parse_input(input)
do_n_steps(a, b, c, d, steps) |> Enum.map(&total_energy_for_a_moon/1) |> Enum.sum()
end
def parse_input(input) do
input
|> String.trim()
|> String.split("\n", [:trim, true])
|> Enum.map(&parse/1)
|> Enum.map(fn {x, y, z} -> {{x, y, z}, {0, 0, 0}} end)
end
defp parse(s) do
d = Regex.named_captures(~r/x=(?<x>-?\d+), y=(?<y>-?\d+), z=(?<z>-?\d+)>/, s)
{String.to_integer(d["x"]), String.to_integer(d["y"]), String.to_integer(d["z"])}
end
defp calculate_new_velocity(v, x1, x2) when x1 > x2, do: v - 1
defp calculate_new_velocity(v, x1, x2) when x1 == x2, do: v
defp calculate_new_velocity(v, x1, x2) when x1 < x2, do: v + 1
def apply_gravity(moons) do
moons
|> Enum.map(fn {{x, y, z}, {vx, vy, vz}} ->
newvel =
Enum.reduce(moons, {vx, vy, vz}, fn {{x2, y2, z2}, _}, {vx, vy, vz} ->
{calculate_new_velocity(vx, x, x2), calculate_new_velocity(vy, y, y2),
calculate_new_velocity(vz, z, z2)}
end)
{{x, y, z}, newvel}
end)
end
def do_n_steps(a, b, c, d, n \\ 1) do
1..n
|> Enum.reduce([a, b, c, d], fn _, [e, f, g, h] ->
do_one_step(e, f, g, h)
# |> IO.inspect(label: "step")
end)
end
defp do_one_step(a, b, c, d) do
[a, b, c, d]
|> apply_gravity
|> Enum.map(&apply_velocity/1)
end
defp apply_velocity({{x, y, z}, {vx, vy, vz}}) do
{{x + vx, y + vy, z + vz}, {vx, vy, vz}}
end
@doc """
The total energy for a single moon is its potential energy multiplied by its kinetic energy.
"""
def total_energy_for_a_moon({pos, vel}) do
sum_of_absolute_xyz(pos) * sum_of_absolute_xyz(vel)
end
@doc """
A moon's potential energy is the sum of the absolute values of its x, y, and z position coordinates.
A moon's kinetic energy is the sum of the absolute values of its velocity coordinates.
"""
defp sum_of_absolute_xyz({x, y, z}) do
abs(x) + abs(y) + abs(z)
end
def steps_to_exactly_match(moons, stop_condition) do
steps_to_exactly_match(moons, moons, 0, stop_condition)
end
defp dimension_match?([e, f, g, h], [a0, b0, c0, d0], get_a_dimension) do
get_a_dimension.([e, f, g, h]) == get_a_dimension.([a0, b0, c0, d0])
end
defp x_dimension_match?(ms1, ms2) do
dimension_match?(ms1, ms2, &get_x/1)
end
defp y_dimension_match?(ms1, ms2) do
dimension_match?(ms1, ms2, &get_y/1)
end
defp z_dimension_match?(ms1, ms2) do
dimension_match?(ms1, ms2, &get_z/1)
end
defp get_x(moons) do
moons
|> Enum.map(fn {{x, _y, _z}, {vx, _vy, _vz}} -> {x, vx} end)
end
defp get_y(moons) do
moons
|> Enum.map(fn {{_x, y, _z}, {_vx, vy, _vz}} -> {y, vy} end)
end
defp get_z(moons) do
moons
|> Enum.map(fn {{_x, _y, z}, {_vx, _vy, vz}} -> {z, vz} end)
end
defp steps_to_exactly_match([a, b, c, d], [a0, b0, c0, d0], steps, stop_condition) do
[e, f, g, h] = do_n_steps(a, b, c, d, 1)
if stop_condition.([e, f, g, h], [a0, b0, c0, d0]) do
steps + 1
else
steps_to_exactly_match([e, f, g, h], [a0, b0, c0, d0], steps + 1, stop_condition)
end
end
def steps_to_exactly_match([a, b, c, d]) do
x_cycle = steps_to_exactly_match([a, b, c, d], &x_dimension_match?/2)
y_cycle = steps_to_exactly_match([a, b, c, d], &y_dimension_match?/2)
z_cycle = steps_to_exactly_match([a, b, c, d], &z_dimension_match?/2)
lcm(lcm(x_cycle, y_cycle), z_cycle)
end
defp lcm(a, b) do
trunc(a * b / gcd(a, b))
end
defp gcd(a, 0) do
a
end
defp gcd(a, b) do
gcd(b, rem(a, b))
end
def solve2(input) do
[a, b, c, d] = parse_input(input)
steps_to_exactly_match([a, b, c, d])
end
end
|
lib/aoc2019_day12.ex
| 0.698946 | 0.650342 |
aoc2019_day12.ex
|
starcoder
|
defmodule ExWire.Packet.Capability.Par.Transactions do
@moduledoc """
Par Wire Packet for communicating new transactions.
```
**Transactions** [`+0x02`: `P`, [`nonce`: `P`, `receivingAddress`: `B_20`, `value`: `P`, ...], ...]
Specify (a) transaction(s) that the peer should make sure is included on
its transaction queue. The items in the list (following the first item 0x12)
are transactions in the format described in the main Ethereum specification.
Nodes must not resend the same transaction to a peer in the same session. This
packet must contain at least one (new) transaction.
```
"""
require Logger
@behaviour ExWire.Packet
@type t :: %__MODULE__{
transactions: [any()]
}
defstruct [
:transactions
]
@doc """
Returns the relative message id offset for this message.
This will help determine what its message ID is relative to other Packets in the same Capability.
"""
@impl true
@spec message_id_offset() :: 2
def message_id_offset do
0x02
end
@doc """
Given a Transactions packet, serializes for transport over Eth Wire Protocol.
## Examples
iex> %ExWire.Packet.Capability.Par.Transactions{
...> transactions: [
...> [1, 2, 3],
...> [4, 5, 6]
...> ]
...> }
...> |> ExWire.Packet.Capability.Par.Transactions.serialize
[ [1, 2, 3], [4, 5, 6] ]
"""
@impl true
@spec serialize(t) :: ExRLP.t()
def serialize(packet = %__MODULE__{}) do
# TODO: Serialize accurately
packet.transactions
end
@doc """
Given an RLP-encoded Transactions packet from Eth Wire Protocol,
decodes into a Tranasctions struct.
## Examples
iex> ExWire.Packet.Capability.Par.Transactions.deserialize([ [1, 2, 3], [4, 5, 6] ])
%ExWire.Packet.Capability.Par.Transactions{
transactions: [
[1, 2, 3],
[4, 5, 6],
]
}
"""
@impl true
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
# TODO: Deserialize from proper struct
%__MODULE__{
transactions: rlp
}
end
@doc """
Handles a Transactions message. We should try to add the transaction
to a queue and process it. Or, right now, do nothing.
## Examples
iex> %ExWire.Packet.Capability.Par.Transactions{transactions: []}
...> |> ExWire.Packet.Capability.Par.Transactions.handle()
:ok
"""
@impl true
@spec handle(ExWire.Packet.packet()) :: :ok
def handle(packet = %__MODULE__{}) do
_ =
Logger.debug(fn ->
"[Packet] Peer sent #{Enum.count(packet.transactions)} transaction(s)."
end)
:ok
end
end
|
apps/ex_wire/lib/ex_wire/packet/capability/par/transactions.ex
| 0.838614 | 0.852752 |
transactions.ex
|
starcoder
|
defmodule TimeZoneInfo.IanaParser.Helper do
@moduledoc false
import NimbleParsec
@seconds_per_hour 3600
@seconds_per_minute 60
@op %{
">=" => :ge,
"<=" => :le
}
@default_time_standard :wall
@time_standard %{
"w" => @default_time_standard,
"s" => :standard,
"g" => :gmt,
"u" => :utc,
"z" => :zulu
}
@month %{
"Jan" => 1,
"Feb" => 2,
"Mar" => 3,
"Apr" => 4,
"May" => 5,
"Jun" => 6,
"Jul" => 7,
"Aug" => 8,
"Sep" => 9,
"Oct" => 10,
"Nov" => 11,
"Dec" => 12
}
@day %{
"Mon" => 1,
"Tue" => 2,
"Wed" => 3,
"Thu" => 4,
"Fri" => 5,
"Sat" => 6,
"Sun" => 7
}
def word(combinator \\ empty()) do
concat(combinator, do_word())
end
def word(combinator, tag) do
unwrap_and_tag(combinator, do_word(), tag)
end
defp do_word do
[{:not, ?\t}, {:not, ?\n}, {:not, ?#}, {:not, ?\s}, {:not, ?\r}]
|> utf8_char
|> repeat()
|> reduce({:reduce_word, []})
end
def reduce_word(data) do
data
|> IO.iodata_to_binary()
|> String.trim()
|> case do
"" -> nil
"-" -> nil
x -> x
end
end
def choice_map(combinator \\ empty(), map) when is_map(map) do
choice_map =
map
|> Map.keys()
|> Enum.map(&string/1)
|> choice()
|> reduce({:reduce_choice_map, [map]})
concat(combinator, choice_map)
end
def reduce_choice_map([key], map), do: Map.fetch!(map, key)
def choice_map_with_default(combinator \\ empty(), map, default) when is_map(map) do
choice_map_with_default =
map
|> Map.keys()
|> Enum.map(&string/1)
|> choice()
|> optional()
|> reduce({:reduce_choice_map_with_default, [map, default]})
concat(combinator, choice_map_with_default)
end
def reduce_choice_map_with_default([], _map, default), do: default
def reduce_choice_map_with_default([key], map, _default), do: Map.fetch!(map, key)
def month(combinator \\ empty(), tag \\ nil) do
month = choice_map(@month)
if tag == nil do
concat(combinator, month)
else
unwrap_and_tag(combinator, month, tag)
end
end
def on(combinator \\ empty(), tag \\ nil) do
day = integer(min: 1) |> unwrap_and_tag(:day)
last_day = string("last") |> ignore() |> choice_map(@day) |> unwrap_and_tag(:last)
day_from = choice_map(@day) |> choice_map(@op) |> integer(min: 1) |> tag(:day_from)
on = choice([last_day, day, day_from]) |> reduce({:reduce_on, []})
if tag == nil do
concat(combinator, on)
else
unwrap_and_tag(combinator, on, tag)
end
end
def reduce_on(day: day) do
day
end
def reduce_on(last: day) do
[last_day_of_week: day]
end
def reduce_on(day_from: [day_of_week, op, day]) do
[day: day, op: op, day_of_week: day_of_week]
end
def until(combinator \\ empty()) do
until =
int()
|> whitespace()
|> optional(month())
|> whitespace()
|> optional(on())
|> whitespace()
|> optional(time())
|> reduce({:reduce_until, []})
unwrap_and_tag(combinator, until, :until)
end
def reduce_until(data) do
case data do
[yr, mo, dy, {hr, 0, 0}] -> {yr, mo, dy, hr}
[yr, mo, dy, {hr, min, 0}] -> {yr, mo, dy, hr, min}
[yr, mo, dy, {hr, min, sec}] -> {yr, mo, dy, hr, min, sec}
list -> List.to_tuple(list)
end
end
def time_standard(combinator \\ empty()) do
unwrap_and_tag(
combinator,
choice_map_with_default(@time_standard, @default_time_standard),
:time_standard
)
end
def seconds do
seconds(empty(), nil)
end
def seconds(combinator \\ empty(), tag) do
next = ascii_char([?:]) |> optional() |> ignore() |> integer(min: 1)
time =
whitespace()
|> optional(ascii_char([?-]))
|> integer(min: 1)
|> repeat(next)
|> reduce({:reduce_seconds, []})
if tag == nil do
concat(combinator, time)
else
unwrap_and_tag(combinator, time, tag)
end
end
def time(combinator \\ empty(), tag \\ nil) do
next = ascii_char([?:]) |> optional() |> ignore() |> integer(min: 1)
time =
whitespace()
|> optional(ascii_char([?-]))
|> integer(min: 1)
|> repeat(next)
|> reduce({:reduce_time, []})
if tag == nil do
concat(combinator, time)
else
unwrap_and_tag(combinator, time, tag)
end
end
def reduce_time(data) do
case data do
[hour] ->
{hour, 0, 0}
[hour, minute] ->
{hour, minute, 0}
[hour, minute, second] ->
{hour, minute, second}
end
end
def reduce_seconds([?- | data]) do
to_seconds(data) * -1
end
def reduce_seconds(data) do
to_seconds(data)
end
def to_seconds(data) do
case data do
[hour] ->
hour * @seconds_per_hour
[hour, minute] ->
hour * @seconds_per_hour + minute * @seconds_per_minute
[hour, minute, second] ->
hour * @seconds_per_hour + minute * @seconds_per_minute + second
end
end
def collect(combinator), do: reduce(combinator, {:reduce_collect, []})
def reduce_collect(data) when is_list(data) do
data
|> Enum.group_by(
fn {tag, _data} -> tag end,
fn {_tag, data} -> data end
)
|> Enum.into(%{}, &reduce_collect/1)
end
def reduce_collect({:rule, data}) do
rules =
data
|> Enum.group_by(
fn [{:name, name} | _] -> name end,
fn [_ | data] -> data end
)
{:rules, rules}
end
def reduce_collect({:link, data}) do
links = Enum.into(data, %{}, fn [to: to, from: from] -> {from, to} end)
{:links, links}
end
def reduce_collect({:zone, data}) do
zones =
Enum.into(data, %{}, fn [name: name, states: states] ->
states =
Enum.map(states, fn state ->
case state[:until] == nil do
true ->
List.insert_at(state, 3, {:until, nil})
false ->
state
end
end)
{name, states}
end)
{:zones, zones}
end
def rules(combinator) do
rules = choice([seconds(), word()]) |> unwrap_and_tag(:rules)
concat(combinator, rules)
end
def to_year(combinator) do
only = string("only") |> replace(:only)
max = string("max") |> replace(:max)
unwrap_and_tag(combinator, choice([only, max, integer(min: 1)]), :to)
end
def unset(combinator \\ empty(), tag) do
unset = [?-] |> utf8_char() |> replace(nil)
unwrap_and_tag(combinator, unset, tag)
end
def int(combinator \\ empty(), tag \\ nil) do
int = integer(min: 1)
if tag == nil do
concat(combinator, int)
else
unwrap_and_tag(combinator, int, tag)
end
end
def close(combinator) do
concat(
combinator,
[string("\n"), string("\r\n")] |> choice() |> optional() |> ignore()
)
end
def close(combinator, tag) do
combinator
|> concat([string("\n"), string("\r\n")] |> choice() |> optional() |> ignore())
|> reduce({:reduce_close, []})
|> unwrap_and_tag(tag)
end
def reduce_close(data), do: data
def whitespace(combinator \\ empty()) do
whitespace = [?\s, ?\t] |> ascii_char() |> repeat() |> ignore()
concat(combinator, whitespace)
end
def seperator(combinator \\ empty()) do
seperator = [?\s, ?\t] |> ascii_char() |> times(min: 1) |> ignore()
concat(combinator, seperator)
end
def text(combinator) do
text = [{:not, ?\n}] |> utf8_char() |> repeat()
concat(combinator, text)
end
def format(combinator) do
format = reduce(word(), {:reduce_format, []})
unwrap_and_tag(combinator, format, :format)
end
def reduce_format([format]) do
cond do
String.contains?(format, "%s") ->
{:template, format}
String.contains?(format, "/") ->
{:choice, format |> String.split("/")}
true ->
{:string, format}
end
end
def zone_state(zone_state), do: zone_state
def record(tag), do: ignore(string(tag) |> whitespace())
end
|
lib/time_zone_info/iana_parser/helper.ex
| 0.565059 | 0.445771 |
helper.ex
|
starcoder
|
defmodule <%= inspect schema.module %>Token do
use Ecto.Schema
import Ecto.Query
@hash_algorithm :sha256
@rand_size 32
# It is very important to keep the reset password token expiry short,
# since someone with access to the email may take over the account.
@reset_password_validity_in_days 1
@confirm_validity_in_days 7
@change_email_validity_in_days 7
@session_validity_in_days 60
<%= if schema.binary_id do %>
@primary_key {:id, :binary_id, autogenerate: true}
@foreign_key_type :binary_id<% end %>
schema "<%= schema.table %>_tokens" do
field :token, :binary
field :context, :string
field :sent_to, :string
belongs_to :<%= schema.singular %>, <%= inspect schema.module %>
timestamps(updated_at: false)
end
@doc """
Generates a token that will be stored in a signed place,
such as session or cookie. As they are signed, those
tokens do not need to be hashed.
"""
def build_session_token(<%= schema.singular %>) do
token = :crypto.strong_rand_bytes(@rand_size)
{token, %<%= inspect schema.module %>Token{token: token, context: "session", <%= schema.singular %>_id: <%= schema.singular %>.id}}
end
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the <%= schema.singular %> found by the token.
"""
def verify_session_token_query(token) do
query =
from token in token_and_context_query(token, "session"),
join: <%= schema.singular %> in assoc(token, :<%= schema.singular %>),
where: token.inserted_at > ago(@session_validity_in_days, "day"),
select: <%= schema.singular %>
{:ok, query}
end
@doc """
Builds a token with a hashed counter part.
The non-hashed token is sent to the <%= schema.singular %> email while the
hashed part is stored in the database, to avoid reconstruction.
The token is valid for a week as long as <%= schema.singular %>s don't change
their email.
"""
def build_email_token(<%= schema.singular %>, context) do
build_hashed_token(<%= schema.singular %>, context, <%= schema.singular %>.email)
end
defp build_hashed_token(<%= schema.singular %>, context, sent_to) do
token = :crypto.strong_rand_bytes(@rand_size)
hashed_token = :crypto.hash(<PASSWORD>algorithm, token)
{Base.url_encode64(token, padding: false),
%<%= inspect schema.module %>Token{
token: hashed_token,
context: context,
sent_to: sent_to,
<%= schema.singular %>_id: <%= schema.singular %>.id
}}
end
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the <%= schema.singular %> found by the token.
"""
def verify_email_token_query(token, context) do
case Base.url_decode64(token, padding: false) do
{:ok, decoded_token} ->
hashed_token = :crypto.hash(<PASSWORD>, decoded_token)
days = days_for_context(context)
query =
from token in token_and_context_query(hashed_token, context),
join: <%= schema.singular %> in assoc(token, :<%= schema.singular %>),
where: token.inserted_at > ago(^days, "day") and token.sent_to == <%= schema.singular %>.email,
select: <%= schema.singular %>
{:ok, query}
:error ->
:error
end
end
defp days_for_context("confirm"), do: @confirm_validity_in_days
defp days_for_context("reset_password"), do: @reset_password_validity_in_days
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the <%= schema.singular %> token record.
"""
def verify_change_email_token_query(token, context) do
case Base.url_decode64(token, padding: false) do
{:ok, decoded_token} ->
hashed_token = :crypto.hash(@hash_algorithm, decoded_token)
query =
from token in token_and_context_query(hashed_token, context),
where: token.inserted_at > ago(@change_email_validity_in_days, "day")
{:ok, query}
:error ->
:error
end
end
@doc """
Returns the given token with the given context.
"""
def token_and_context_query(token, context) do
from <%= inspect schema.module %>Token, where: [token: ^token, context: ^context]
end
@doc """
Gets all tokens for the given <%= schema.singular %> for the given contexts.
"""
def <%= schema.singular %>_and_contexts_query(<%= schema.singular %>, :all) do
from t in <%= inspect schema.module %>Token, where: t.<%= schema.singular %>_id == ^<%= schema.singular %>.id
end
def <%= schema.singular %>_and_contexts_query(<%= schema.singular %>, [_ | _] = contexts) do
from t in <%= inspect schema.module %>Token, where: t.<%= schema.singular %>_id == ^<%= schema.singular %>.id and t.context in ^contexts
end
end
|
priv/templates/phx.gen.auth/schema_token.ex
| 0.528047 | 0.464841 |
schema_token.ex
|
starcoder
|
defmodule Membrane.H264.FFmpeg.Decoder do
@moduledoc """
Membrane element that decodes video in H264 format. It is backed by decoder from FFmpeg.
The element expects the data for each frame (Access Unit) to be received in a separate buffer,
so the parser (`Membrane.H264.FFmpeg.Parser`) may be required in a pipeline before
decoder (e.g. when input is read from `Membrane.File.Source`).
"""
use Membrane.Filter
require Membrane.Logger
alias __MODULE__.Native
alias Membrane.Buffer
alias Membrane.H264
alias Membrane.H264.FFmpeg.Common
alias Membrane.RawVideo
@no_pts -9_223_372_036_854_775_808
def_options use_shm?: [
type: :boolean,
desciption:
"If true, native decoder will use shared memory (via `t:Shmex.t/0`) for storing frames",
default: false
]
def_input_pad :input,
demand_unit: :buffers,
demand_mode: :auto,
caps: {H264, stream_format: :byte_stream, alignment: :au}
def_output_pad :output,
demand_mode: :auto,
caps: {RawVideo, pixel_format: one_of([:I420, :I422]), aligned: true}
@impl true
def handle_init(opts) do
state = %{decoder_ref: nil, caps_changed: false, use_shm?: opts.use_shm?}
{:ok, state}
end
@impl true
def handle_stopped_to_prepared(_ctx, state) do
case Native.create() do
{:ok, decoder_ref} ->
{:ok, %{state | decoder_ref: decoder_ref}}
{:error, reason} ->
{{:error, reason}, state}
end
end
@impl true
def handle_process(:input, buffer, ctx, state) do
%{decoder_ref: decoder_ref, use_shm?: use_shm?} = state
dts = if(buffer.dts, do: Common.to_h264_time_base_truncated(buffer.dts), else: @no_pts)
pts = if(buffer.pts, do: Common.to_h264_time_base_truncated(buffer.pts), else: @no_pts)
case Native.decode(
buffer.payload,
pts,
dts,
use_shm?,
decoder_ref
) do
{:ok, pts_list_h264_base, frames} ->
bufs = wrap_frames(pts_list_h264_base, frames)
in_caps = ctx.pads.input.caps
{caps, state} = update_caps_if_needed(state, in_caps)
{{:ok, caps ++ bufs}, state}
{:error, reason} ->
{{:error, reason}, state}
end
end
@impl true
def handle_caps(:input, _caps, _ctx, state) do
# only redeclaring decoder - new caps will be generated in handle_process, after decoding key_frame
case Native.create() do
{:ok, decoder_ref} ->
{:ok, %{state | decoder_ref: decoder_ref, caps_changed: true}}
{:error, reason} ->
{{:error, reason}, state}
end
end
@impl true
def handle_end_of_stream(:input, _ctx, state) do
with {:ok, best_effort_pts_list, frames} <-
Native.flush(state.use_shm?, state.decoder_ref),
bufs <- wrap_frames(best_effort_pts_list, frames) do
actions = bufs ++ [end_of_stream: :output, notify: {:end_of_stream, :input}]
{{:ok, actions}, state}
else
{:error, reason} -> {{:error, reason}, state}
end
end
@impl true
def handle_prepared_to_stopped(_ctx, state) do
{:ok, %{state | decoder_ref: nil}}
end
defp wrap_frames([], []), do: []
defp wrap_frames(pts_list, frames) do
Enum.zip(pts_list, frames)
|> Enum.map(fn {pts, frame} ->
%Buffer{pts: Common.to_membrane_time_base_truncated(pts), payload: frame}
end)
|> then(&[buffer: {:output, &1}])
end
defp update_caps_if_needed(%{caps_changed: true, decoder_ref: decoder_ref} = state, in_caps) do
{[caps: {:output, generate_caps(in_caps, decoder_ref)}], %{state | caps_changed: false}}
end
defp update_caps_if_needed(%{caps_changed: false} = state, _in_caps) do
{[], state}
end
defp generate_caps(input_caps, decoder_ref) do
{:ok, width, height, pix_fmt} = Native.get_metadata(decoder_ref)
framerate =
case input_caps do
nil -> {0, 1}
%H264{framerate: in_framerate} -> in_framerate
end
%RawVideo{
aligned: true,
pixel_format: pix_fmt,
framerate: framerate,
height: height,
width: width
}
end
end
|
lib/membrane_h264_ffmpeg/decoder.ex
| 0.898572 | 0.436622 |
decoder.ex
|
starcoder
|
defmodule Axon.Schedules do
@moduledoc """
Parameter Schedules.
Parameter schedules are often used to anneal hyperparameters
such as the learning rate during the training process. Schedules
provide a mapping from the current time step to a learning rate
or another hyperparameter.
Choosing a good learning rate and consequently a good learning
rate schedule is typically a process of trial and error. Learning
rates should be relatively small such that the learning curve
does not oscillate violently during the training process, but
not so small that learning proceeds too slowly. Using a
schedule slowly decreases oscillations during the training
process such that, as the model converges, training also
becomes more stable.
All of the functions in this module are implemented as
numerical functions and can be JIT or AOT compiled with
any supported `Nx` compiler.
"""
import Nx.Defn
import Axon.Shared
@doc ~S"""
Exponential decay schedule.
$$\gamma(t) = \gamma_0 * r^{\frac{t}{k}}$$
## Options
* `:init_value` - initial value. $\gamma$ in above formulation.
Defaults to `1.0e-2`
* `:decay_rate` - rate of decay. $r$ in above formulation.
Defaults to `0.95`
* `:transition_steps` - steps per transition. $k$ in above
formulation. Defaults to `10`
* `:transition_begin` - step to begin transition. Defaults to `0`
* `:staircase` - discretize outputs. Defaults to `false`
"""
def exponential_decay(opts \\ []) do
&apply_exponential_decay(&1, opts)
end
defnp apply_exponential_decay(step, opts \\ []) do
opts =
keyword!(opts,
init_value: 1.0e-2,
decay_rate: 0.95,
transition_steps: 10,
transition_begin: 0,
staircase: false
)
init_value = opts[:init_value]
rate = opts[:decay_rate]
staircase? = to_predicate(opts[:staircase])
k = opts[:transition_steps]
start = opts[:transition_begin]
t = Nx.subtract(step, start)
p =
if staircase? do
t
|> Nx.divide(k)
|> Nx.floor()
else
t
|> Nx.divide(k)
end
decayed_value =
rate
|> Nx.power(p)
|> Nx.multiply(init_value)
Nx.select(
Nx.less_equal(t, 0),
init_value,
decayed_value
)
end
@doc ~S"""
Cosine decay schedule.
$$\gamma(t) = \gamma_0 * (1 - \alpha)*(\frac{1}{2}(1 + \cos{\pi \frac{t}{k}})) + \alpha$$
## Options
* `:init_value` - initial value. $\gamma_0$ in above formulation.
Defaults to `1.0e-2`
* `:decay_steps` - number of steps to apply decay for.
$k$ in above formulation. Defaults to `10`
* `:alpha` - minimum value of multiplier adjusting learning rate.
$\alpha$ in above formulation. Defaults to `0.0`
## References
* [SGDR: Stochastic Gradient Descent with Warm Restarts](https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx)
"""
def cosine_decay(opts \\ []) do
&apply_cosine_decay(&1, opts)
end
defnp apply_cosine_decay(step, opts \\ []) do
opts = keyword!(opts, init_value: 1.0e-2, decay_steps: 10, alpha: 0.0)
init_value = opts[:init_value]
decay_steps = opts[:decay_steps]
alpha = opts[:alpha]
step
|> Nx.min(decay_steps)
|> Nx.divide(decay_steps)
|> Nx.multiply(3.1415926535897932384626433832795028841971)
|> Nx.cos()
|> Nx.add(1)
|> Nx.divide(2)
|> Nx.multiply(1 - alpha)
|> Nx.add(alpha)
|> Nx.multiply(init_value)
end
@doc ~S"""
Constant schedule.
$$\gamma(t) = \gamma_0$$
## Options
* `:init_value` - initial value. $\gamma_0$ in above formulation.
Defaults to `1.0e-2`
"""
def constant(opts \\ []) do
&apply_constant(&1, opts)
end
defnp apply_constant(_step, opts \\ []) do
opts = keyword!(opts, init_value: 0.01)
Nx.tensor(opts[:init_value])
end
@doc ~S"""
Polynomial schedule.
$$\gamma(t) = (\gamma_0 - \gamma_n) * (1 - \frac{t}{k})^p$$
## Options
* `:init_value` - initial value. $\gamma_0$ in above formulation.
Defaults to `1.0e-2`
* `:end_value` - end value of annealed scalar. $\gamma_n$ in above formulation.
Defaults to `1.0e-3`
* `:power` - power of polynomial. $p$ in above formulation. Defaults to `2`
* `:transition_steps` - number of steps over which annealing takes place.
$k$ in above formulation. Defaults to `10`
"""
def polynomial_decay(opts \\ []) do
&apply_polynomial_decay(&1, opts)
end
defnp apply_polynomial_decay(step, opts \\ []) do
opts =
keyword!(opts,
init_value: 1.0e-2,
end_value: 1.0e-3,
power: 2,
transition_steps: 10,
transition_begin: 0
)
init_value = opts[:init_value]
end_value = opts[:end_value]
start = opts[:transition_begin]
k = opts[:transition_steps]
p = opts[:power]
step
|> Nx.subtract(start)
|> Nx.clip(0, k)
|> Nx.divide(k)
|> Nx.negate()
|> Nx.add(1)
|> Nx.power(p)
|> Nx.multiply(Nx.subtract(init_value, end_value))
|> Nx.add(end_value)
end
end
|
lib/axon/schedules.ex
| 0.860852 | 0.964422 |
schedules.ex
|
starcoder
|
defmodule Wasmex do
@moduledoc """
Wasmex is an Elixir library for executing WebAssembly binaries.
WASM functions can be executed like this:
```elixir
{:ok, bytes } = File.read("wasmex_test.wasm")
{:ok, instance } = Wasmex.start_link.from_bytes(bytes)
{:ok, [42]} == Wasmex.call_function(instance, "sum", [50, -8])
```
Memory can be read/written using `Wasmex.Memory`:
```elixir
offset = 7
index = 4
value = 42
{:ok, memory} = Wasmex.Instance.memory(instance, :uint8, offset)
Wasmex.Memory.set(memory, index, value)
IO.puts Wasmex.Memory.get(memory, index) # 42
```
"""
use GenServer
# Client
@doc """
Starts a GenServer which compiles and instantiates a WASM module from the given bytes and imports map.
```elixir
imports = %{
env: %{
add_ints: {:fn, [:i32, :i32], [:i32], fn (_context, a, b) -> a + b end},
}
}
{:ok, bytes } = File.read("wasmex_test.wasm")
{:ok, instance } = Wasmex.start_link.from_bytes(%{bytes: bytes, imports: imports})
{:ok, [42]} == Wasmex.call_function(instance, "sum", [50, -8])
```
The imports are given as a map of namespaces.
In the example above, we import the `"env"` namespace.
Each namespace is, again, a map listing imports.
Under the name `add_ints`, we imported a function which is represented with a tuple of:
1. the import type: `:fn` (a function),
1. the functions parameter types: `[:i32, :i32]`,
1. the functions return types: `[:i32]`, and
1. a function reference: `fn (_context, a, b, c) -> a + b end`
When the WASM code executes the `add_ints` imported function, the execution context is forwarded to
the given function reference.
The first param is always the call context (a Map containing e.g. the instances memory).
All other params are regular parameters as specified by the parameter type list.
Valid parameter/return types are:
- `:i32` a 32 bit integer
- `:i64` a 64 bit integer
- `:f32` a 32 bit float
- `:f64` a 64 bit float
The return type must always be one value. (There are preparations to enable WASM to return multiple
values from a function call. We prepared the API for this future by specifying an array of return types.)
"""
def start_link(%{bytes: bytes, imports: imports}) when is_binary(bytes) do
GenServer.start_link(__MODULE__, %{bytes: bytes, imports: stringify_keys(imports)})
end
@doc """
Starts a GenServer which compiles and instantiates a WASM module from the given bytes.
"""
def start_link(bytes) when is_binary(bytes) do
start_link(%{bytes: bytes, imports: %{}})
end
@doc """
Returns whether a function export with the given `name` exists in the WebAssembly instance.
"""
def function_exists(pid, name) do
GenServer.call(pid, {:exported_function_exists, stringify(name)})
end
@doc """
Calls a function with the given `name` and `params` on
the WebAssembly instance and returns its results.
"""
def call_function(pid, name, params) do
GenServer.call(pid, {:call_function, stringify(name), params})
end
@doc """
Finds the exported memory of the given WASM instance and returns it as a `Wasmex.Memory`.
The memory is a collection of bytes which can be viewed and interpreted as a sequence of different
(data-)`types`:
* uint8 / int8 - (un-)signed 8-bit integer values
* uint16 / int16 - (un-)signed 16-bit integer values
* uint32 / int32 - (un-)signed 32-bit integer values
We can think of it as a list of values of the above type (where each value may be larger than a byte).
The `offset` value can be used to start reading the memory from a chosen position.
"""
def memory(pid, type, offset) when type in [:uint8, :int8, :uint16, :int16, :uint32, :int32] do
GenServer.call(pid, {:memory, type, offset})
end
defp stringify_keys(atom_key_map) when is_map(atom_key_map) do
for {key, val} <- atom_key_map, into: %{}, do: {stringify(key), stringify_keys(val)}
end
defp stringify_keys(value), do: value
defp stringify(s) when is_binary(s), do: s
defp stringify(s) when is_atom(s), do: Atom.to_string(s)
# Server
@doc """
Params:
* bytes (binary): the WASM bites defining the WASM module
* imports (map): a map defining imports. Structure is:
%{
namespace_name: %{
import_name: {:fn, [:i32, :i32], [:i32], function_reference}
}
}
"""
@impl true
def init(%{bytes: bytes, imports: imports}) when is_binary(bytes) do
{:ok, instance} = Wasmex.Instance.from_bytes(bytes, imports)
{:ok, %{instance: instance, imports: imports}}
end
@impl true
def handle_call({:memory, size, offset}, _from, %{instance: instance} = state)
when size in [:uint8, :int8, :uint16, :int16, :uint32, :int32] do
case Wasmex.Memory.from_instance(instance, size, offset) do
{:ok, memory} -> {:reply, {:ok, memory}, state}
{:error, error} -> {:reply, {:error, error}, state}
end
end
@impl true
def handle_call({:exported_function_exists, name}, _from, %{instance: instance} = state)
when is_binary(name) do
{:reply, Wasmex.Instance.function_export_exists(instance, name), state}
end
@impl true
def handle_call({:call_function, name, params}, from, %{instance: instance} = state) do
:ok = Wasmex.Instance.call_exported_function(instance, name, params, from)
{:noreply, state}
end
@impl true
def handle_info({:returned_function_call, result, from}, state) do
GenServer.reply(from, result)
{:noreply, state}
end
@impl true
def handle_info(
{:invoke_callback, namespace_name, import_name, context, params, token},
%{imports: imports} = state
) do
context =
Map.put(
context,
:memory,
Wasmex.Memory.wrap_resource(Map.get(context, :memory), :uint8, 0)
)
{success, return_value} =
try do
{:fn, _params, _returns, callback} =
imports
|> Map.get(namespace_name, %{})
|> Map.get(import_name)
{true, apply(callback, [context | params])}
rescue
e in RuntimeError -> {false, e.message}
end
:ok = Wasmex.Native.namespace_receive_callback_result(token, success, [return_value])
{:noreply, state}
end
end
|
lib/wasmex.ex
| 0.920299 | 0.876634 |
wasmex.ex
|
starcoder
|
defmodule BehaviorTree do
@moduledoc """
A library for building [behavior trees](https://en.wikipedia.org/wiki/Behavior_tree_(artificial_intelligence,_robotics_and_control)).
### About
A behavior tree is a method for encapsulating complex, nested logic in a declarative data structure. They are often used for video games and AI.
The key mechanics of a behavior tree is that _inner_ nodes describe how to traverse the tree, and _leaf_ nodes are the actual values or "behaviors." A behavior tree always has a value of one of its leaf nodes, which is advanced by signalling that the current behavior should "succeed" or "fail."
#### Nodes
The primary inner nodes that make up a behavior tree are "select" and "sequence" nodes:
_Select_ nodes will go through their children from left to right. If a child fails, it moves on to the next one. If the last one fails, the select node fails. As soon as any child succeeds, the select node succeeds (and stops traversing its children).
_Sequence_ nodes also go through their children from left to right. If a child fails, the whole select node fails (and stop traversing its children). If a child succeeds, it moves on to the next child. If the last one succeeds, the select node succeeds.
By composing these nodes as needed, you can build up complex behaviors in a simple data structure. There are also be other types of inner nodes (like randomly choosing from its children), and "decorator" nodes, which modify a single child (like repeating it n times). Also, in this implementation, the whole tree will "start over" after exhausting all of its nodes.
#### Behavior trees vs decision trees and state machines
Behavior trees are similar to decision trees and state machines, but have important differences. Where a decision tree "drills down" from general to specific to reach a leaf, behavior trees are stateful, and move from leaf to leaf over time based on their current context. In that way, behavior trees are more like state machines, but they differ by leveraging the simplicity and power of composable trees to create more complex transition logic.
### Example
Let's build an AI to play [Battleship](https://en.wikipedia.org/wiki/Battleship_(game)).
The rules are simple: "ships" are secretly arranged on a 2D grid, and players guess coordinates, trying to "sink" all of the ships, by getting the clues "hit", "miss", and "sunk" after each guess.
The playing strategy is fairly simple, but we will make a few iterations of our AI.
> Note, This example splits up the code into two parts: 1) the tree itself, which only expresses what it wants to do at any given step, and 2) the "handler" code, which interprets the tree's intent, does the appropriate work, and updates the tree with the outcome. An alternative approach would be to load the tree's leafs with functions that could be called directly.
(You can jump directly to the [fully implemented AI code](https://github.com/jschomay/elixir-battleship-guesser/blob/master/lib/ai.ex)).
#### AI "A" - random guessing
This AI doesn't really have a strategy, and doesn't require a behavior tree, but it is a place to start.
ai_a = Node.sequence([:random_guess])
Every play, calling `BehaviorTree.value` will return `:random_guess`. Responding to that "behavior" with either `BehaviorTree.fail` or `BehaviorTree.succeed` will not change what we get next time around.
Note that the root of the tree will "start over" if it fails or succeeds, which is what keeps it running even after traversing all of the nodes.
Also note that the behavior tree does not actually know how to make a random guess, or what a valid random guess is, it just declares its _intent_, allowing the "handler" code to turn that intent into a guess, and then give appropriate feedback.
#### AI "B" - brute force
We can encode a brute force strategy as a tree:
row_by_row =
Node.repeat_until_fail(
Node.select([
:go_right,
:beginning_of_next_row
])
)
ai_b =
Node.sequence([
:top_left,
row_by_row
])
"B" is notably more complex, making use of three different inner nodes. `Node.repeat_until_fail` will repeat its one child node until it fails (in this case, it will only fail after `:beginning_of_next_row` fails, which would happen after all of the board has been guessed). Each time `:go_right` succeeds, the `select` node will succeed, and the `repeat_until_fail` node will restart it. If `go_right` goes off the board, the handler code will fail it, and the `select` node will move on to `:beginning_of_next_row`, which the handling code will succeed, which will "bubble up" to the `select` and `repeat_until_fail` nodes, restarting again at `:go_right` for the next call.
Note that any time the value of the tree fails, the handler code won't have a valid coordinate, requiring an additional "tick" through the tree in order to get a valid guess.
#### AI "C" - zero in
AI "C" is the smartest of the bunch, randomly guessing until getting a "hit", and then scanning left, right, up, or down appropriately until getting a "sunk."
search_horizontally =
Node.select([
:go_right,
:go_left
])
search_vertically =
Node.select([
:go_up,
:go_down
])
narrow_down =
Node.select([
search_horizontally,
search_vertically
])
ai_c =
Node.sequence([
:random_guess,
narrow_down
])
"C" is quite complex, and requires specific feedback from the handler code. When randomly guessing, a "miss" should get a `BehaviorTree.fail`, a "hit" should get a `BehaviorTree.succeed`, and a "sunk" should not update the tree at all, so that it will still be making random guesses next time (note that `BehaviorTree.fail` would work the same in this case, but is less clear).
When narrowing down, a "hit" should leave the tree as it is for next time, a "miss" should get a `BehaviorTree.fail`, and a "sunk" should get a `BehaviorTree.success`. In the case that a guess is invalid (goes off the board), it should respond with a `BehaviorTree.fail` and run it again.
"""
alias ExZipper.Zipper
alias BehaviorTree.Node
defstruct [:zipper]
@opaque t :: %__MODULE__{zipper: Zipper.t()}
@doc """
Start your behavior tree.
Note that the input is a static, declarative data structure, while the output is stateful, and will always have a value of one of the leafs.
The initial value will be the leaf reached from following a descent through each node (for a tree of selects and sequences this will be the deepest left-most leaf, but other types of nodes may have different initiation behaviors).
Note that the supplied argument should be a structure built from Nodes. You can use the included standard `BehaviorTree.Node`s, or one of your own that implements `BehaviorTree.Node.Protocol`. Any other value will be treated as a leaf, which would be a pointless behavior tree.
## Example
iex> tree = Node.sequence([
...> Node.sequence([:a, :b, :c]),
...> Node.select([:x, :y, :z]),
...> :done
...> ])
iex> tree |> BehaviorTree.start |> BehaviorTree.value
:a
"""
@spec start(any) :: __MODULE__.t()
def start(node) do
Zipper.zipper(
fn node -> Node.Protocol.get_children(node) != [] end,
fn node -> Node.Protocol.get_children(node) end,
fn node, children -> Node.Protocol.set_children(node, children) end,
node
)
|> descend_to_leaf
|> (fn zipper -> %__MODULE__{zipper: zipper} end).()
end
@doc """
Signals that the current behavior has "succeeded." The tree will advance to the next state.
The specifics on how the tree will advance depend on type of node that the succeeded behavior is under. See the specific node documentation for the traversal logic.
"""
@spec succeed(__MODULE__.t()) :: __MODULE__.t()
def succeed(%__MODULE__{zipper: zipper} = bt) do
if Zipper.root(zipper) == zipper do
zipper
|> descend_to_leaf
|> (fn zipper -> %__MODULE__{zipper: zipper} end).()
else
parent = Zipper.up(zipper)
case Node.Protocol.on_succeed(Zipper.node(parent), zipper) do
:succeed ->
%__MODULE__{bt | zipper: parent} |> succeed
:fail ->
%__MODULE__{bt | zipper: parent} |> fail
%Zipper{} = new_zipper ->
new_zipper
|> descend_to_leaf
|> (fn zipper -> %__MODULE__{zipper: zipper} end).()
end
end
end
@doc """
Signals that the current behavior has "failed." The tree will advance to the next state.
The specifics on how the tree will advance depend on type of node that the failed behavior is under. See the specific node documentation for the traversal logic.
"""
@spec fail(__MODULE__.t()) :: __MODULE__.t()
def fail(%__MODULE__{zipper: zipper} = bt) do
if Zipper.root(zipper) == zipper do
zipper
|> descend_to_leaf
|> (fn zipper -> %__MODULE__{zipper: zipper} end).()
else
parent = Zipper.up(zipper)
case Node.Protocol.on_fail(Zipper.node(parent), zipper) do
:fail ->
%__MODULE__{bt | zipper: parent} |> fail
:succeed ->
%__MODULE__{bt | zipper: parent} |> succeed
%Zipper{} = new_zipper ->
new_zipper
|> descend_to_leaf
|> (fn zipper -> %__MODULE__{zipper: zipper} end).()
end
end
end
@doc """
Get the current "behavior"
This will always be one of the leaf nodes, based on the current state of the tree.
"""
@spec value(__MODULE__.t()) :: any()
def value(%__MODULE__{} = bt) do
Zipper.node(bt.zipper)
end
@spec descend_to_leaf(Zipper.t()) :: Zipper.t()
defp descend_to_leaf(zipper) do
case Node.Protocol.first_child(Zipper.node(zipper), zipper) do
^zipper ->
zipper
%Zipper{} = zipper ->
descend_to_leaf(zipper)
end
end
end
|
lib/behavior_tree.ex
| 0.937132 | 0.90444 |
behavior_tree.ex
|
starcoder
|
defmodule OMG.Watcher.ExitProcessor.DoubleSpend do
@moduledoc """
Wraps information about a single double spend occuring between a verified transaction and a known transaction
"""
defstruct [:index, :utxo_pos, :known_spent_index, :known_tx]
alias OMG.State.Transaction
alias OMG.Utxo
alias OMG.Watcher.ExitProcessor.KnownTx
alias OMG.Watcher.ExitProcessor.Tools
@type t() :: %__MODULE__{
index: non_neg_integer(),
utxo_pos: Utxo.Position.t(),
known_spent_index: non_neg_integer,
known_tx: KnownTx.t()
}
@doc """
Finds the single, oldest competitor from a set of known transactions grouped by input. `nil` if there's none
`known_txs_by_input` are assumed to hold _the oldest_ transaction spending given input for every input
"""
@spec find_competitor(KnownTx.known_txs_by_input_t(), Transaction.any_flavor_t()) :: nil | t()
def find_competitor(known_txs_by_input, tx) do
inputs = Transaction.get_inputs(tx)
known_txs_by_input
|> all_distinct_spends_of_inputs(inputs, tx)
# need to sort, to get the oldest transaction (double-) spending for _all the_ inputs of `tx`
|> Enum.sort(&KnownTx.is_older?/2)
|> Enum.at(0)
|> case do
nil -> nil
known_tx -> inputs |> Enum.with_index() |> Tools.double_spends_from_known_tx(known_tx) |> hd()
end
end
@doc """
Gets all the double spends found in an `known_txs_by_input`, following an indexed breakdown of particular
utxo_positions of `tx`.
This is useful if the interesting utxo positions aren't just inputs of `tx` (e.g. piggybacking, tx's outputs, etc.)
"""
@spec all_double_spends_by_index(
list({Utxo.Position.t(), non_neg_integer}),
map(),
Transaction.any_flavor_t()
) :: %{non_neg_integer => t()}
def all_double_spends_by_index(indexed_utxo_positions, known_txs_by_input, tx) do
{inputs, _indices} = Enum.unzip(indexed_utxo_positions)
# Will find all spenders of provided indexed inputs.
known_txs_by_input
|> all_distinct_spends_of_inputs(inputs, tx)
|> Stream.flat_map(&Tools.double_spends_from_known_tx(indexed_utxo_positions, &1))
|> Enum.group_by(& &1.index)
end
# filters all the transactions, spending any of the inputs, distinct from `tx` - to find all the double-spending txs
defp all_distinct_spends_of_inputs(known_txs_by_input, inputs, tx) do
known_txs_by_input
|> Map.take(inputs)
|> Stream.flat_map(fn {_input, spending_txs} -> spending_txs end)
|> Stream.filter(&Tools.txs_different(tx, &1.signed_tx))
end
end
|
apps/omg_watcher/lib/omg_watcher/exit_processor/double_spend.ex
| 0.839076 | 0.574425 |
double_spend.ex
|
starcoder
|
defmodule Aoc.Year2018.Day01 do
@moduledoc """
Solution to Day 01 of 2018: Chronal Calibration
## --- Day 01: Chronal Calibration ---
"We've detected some temporal anomalies," one of Santa's Elves at the Temporal
Anomaly Research and Detection Instrument Station tells you. She sounded pretty
worried when she called you down here. "At 500-year intervals into the past,
someone has been changing Santa's history!"
"The good news is that the changes won't propagate to our time stream for
another 25 days, and we have a device" - she attaches something to your wrist -
"that will let you fix the changes with no such propagation delay. It's
configured to send you 500 years further into the past every few days; that was
the best we could do on such short notice."
"The bad news is that we are detecting roughly *fifty* anomalies throughout
time; the device will indicate fixed anomalies with *stars*. The other bad news
is that we only have one device and you're the best person for the job! Good
lu--" She taps a button on the device and you suddenly feel like you're falling.
To save Christmas, you need to get all *fifty stars* by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each day
in the advent calendar; the second puzzle is unlocked when you complete the
first. Each puzzle grants *one star*. Good luck!
After feeling like you've been falling for a few minutes, you look at the
device's tiny screen. "Error: Device must be calibrated before first use.
Frequency drift detected. Cannot maintain destination lock." Below the message,
the device shows a sequence of changes in frequency (your puzzle input). A value
like `+6` means the current frequency increases by `6`; a value like `-3` means
the current frequency decreases by `3`.
For example, if the device displays frequency changes of `+1, -2, +3, +1`, then
starting from a frequency of zero, the following changes would occur:
- Current frequency ` 0`, change of `+1`; resulting frequency ` 1`.
- Current frequency ` 1`, change of `-2`; resulting frequency `-1`.
- Current frequency `-1`, change of `+3`; resulting frequency ` 2`.
- Current frequency ` 2`, change of `+1`; resulting frequency ` 3`.
In this example, the resulting frequency is `3`.
Here are other example situations:
- `+1, +1, +1` results in ` 3`
- `+1, +1, -2` results in ` 0`
- `-1, -2, -3` results in `-6`
Starting with a frequency of zero, *what is the resulting frequency* after all
of the changes in frequency have been applied?
## --- Part Two ---
You notice that the device repeats the same frequency change list over and over.
To calibrate the device, you need to find the first frequency it reaches
*twice*.
For example, using the same list of changes above, the device would loop as
follows:
- Current frequency ` 0`, change of `+1`; resulting frequency ` 1`.
- Current frequency ` 1`, change of `-2`; resulting frequency `-1`.
- Current frequency `-1`, change of `+3`; resulting frequency ` 2`.
- Current frequency ` 2`, change of `+1`; resulting frequency ` 3`.
- (At this point, the device continues from the start of the list.)
- Current frequency ` 3`, change of `+1`; resulting frequency ` 4`.
- Current frequency ` 4`, change of `-2`; resulting frequency ` 2`, which has already been seen.
In this example, the first frequency reached twice is `2`. Note that your device
might need to repeat its list of frequency changes many times before a duplicate
frequency is found, and that duplicates might be found while in the middle of
processing the list.
Here are other examples:
- `+1, -1` first reaches `0` twice.
- `+3, +3, +4, -2, -4` first reaches `10` twice.
- `-6, +3, +8, +5, -6` first reaches `5` twice.
- `+7, +7, -2, -7, -4` first reaches `14` twice.
*What is the first frequency your device reaches twice?*
"""
@doc """
Add all frequencies together, starting from `0`.
"""
def part_1(input) do
input
|> split_into_list_of_ints()
|> Enum.sum()
end
@doc """
Cycle input in a Stream and keep reducing it until it finds one frequency that is repeated
"""
def part_2(input) do
input
|> split_into_list_of_ints()
|> Stream.cycle()
|> Enum.reduce_while({0, MapSet.new([0])}, fn to_add, {old_freq, history} ->
new_freq = old_freq + to_add
if MapSet.member?(history, new_freq) do
{:halt, new_freq}
else
{:cont, {new_freq, MapSet.put(history, new_freq)}}
end
end)
end
defp split_into_list_of_ints(string) do
string
|> String.split()
|> Enum.map(&String.to_integer/1)
end
end
|
lib/aoc/year_2018/day_01.ex
| 0.880624 | 0.844729 |
day_01.ex
|
starcoder
|
defmodule Graph.Utils do
@moduledoc false
@compile {:inline, [vertex_id: 1, edge_weight: 3]}
@binary_heap_limit 64
@doc """
A large portion of the code for `sizeof/1` is based on `erlang_term` which can be found
at [here](https://github.com/okeuday/erlang_term), authored by <NAME>, and licensed
under the MIT license.
"""
def sizeof(term) do
sizeof(term, :erlang.system_info(:wordsize))
end
defp sizeof(term, wordsize) do
sizeof_term_local(term, wordsize) + sizeof_term(term)
end
defp sizeof_term(term) when is_list(term) do
sizeof_list(term)
end
defp sizeof_term(term) when is_tuple(term) do
sizeof_tuple(term)
end
defp sizeof_term(%{__struct__: _} = term) when is_map(term) do
Enum.reduce(Map.from_struct(term), 0, fn {k, v}, size ->
sizeof_term(k) + sizeof_term(v) + size
end)
end
defp sizeof_term(term) do
sizeof_term_global(term)
end
defp sizeof_term_local(term, wordsize) do
# stack/register size + heap size
(1 + :erts_debug.flat_size(term)) * wordsize
end
defp sizeof_term_global(term) when is_binary(term) do
case :erlang.byte_size(term) do
bsize when bsize > @binary_heap_limit ->
# refc binary
bsize
_ ->
# heap binary
0
end
end
defp sizeof_term_global(_term) do
0
end
defp sizeof_list(l, size \\ 0)
defp sizeof_list([], size), do: size
defp sizeof_list([term | rest], size) do
sizeof_list(rest, size + sizeof_term(term))
end
defp sizeof_list(term, size) do
# improper list
size + sizeof_term(term)
end
defp sizeof_tuple(term) do
sizeof_tuple(term, 1, :erlang.tuple_size(term), 0)
end
defp sizeof_tuple(term, n, n, size) do
sizeof_term(:erlang.element(n, term)) + size
end
defp sizeof_tuple(term, i, n, size) do
sizeof_tuple(term, i + 1, n, size + sizeof_term(:erlang.element(i, term)))
end
def edge_weight(%Graph{edges: meta}, a, b) do
Map.fetch!(meta, {a, b})
|> Enum.map(fn {_label, weight} -> weight end)
|> Enum.min()
end
# 2^32
@max_phash 4_294_967_296
def vertex_id(v), do: :erlang.phash2(v, @max_phash)
end
|
lib/graph/utils.ex
| 0.59749 | 0.591104 |
utils.ex
|
starcoder
|
defmodule Taskmaster do
@moduledoc """
A set of convenience functions for concurrent, asynchronous tasks, loosely inspired by JavaScript's Promises.
## Why?
While Elixir's `Task` module provides an API for easy creation of concurrent processes, it does so by *blocking* the caller process on calls to `Task.await/2` or `Task.async_stream/3`. However, sometimes it is
beneficial to operate asynchronously, in a manner somewhat similar to JavaScript's Promises - let the work be done in the background and then act on the results when everything is resolved.
`Taskmaster` wraps around the built-in `Task` module to provide a set of useful functions for doing just that.
"""
@doc false
use GenServer
defguardp is_error(tuple) when tuple_size(tuple) === 2 and elem(tuple, 0) in [:exit, :error]
@type options :: [timeout: non_neg_integer(), link: boolean() | nil]
@doc """
Creates a process, that runs `funs` concurrently and when the first one resolves, sends a message to the caller.
Function resolves either by:
- returning a value, which results in a `{:race_won, value}` message
- crashing or returning a `{:error, reason}` tuple, which results in a `{:race_interrupted, {:error | :exit, reason}}` message
- exceeding a `:timeout` options, which results in a `{:race_interrupted, :timeout}` message
The process created by `race/2` **by default isn't linked** to the caller process. It can be started as a linked process by passing a `link: true` option.
Options
* `:timeout` - a timeout for each function (defaults to 5000)
* `:link` - should the started process by linked to the caller (defaults to `false`)
Example:
iex(1)> Taskmaster.race([
...(1)> fn ->
...(1)> :one
...(1)> end,
...(1)> fn ->
...(1)> :timer.sleep(200)
...(1)> :two
...(1)> end,
...(1)> fn ->
...(1)> :timer.sleep(300)
...(1)> :three
...(1)> end
...(1)> ])
{:ok, #PID<0.178.0>}
iex(2)> flush
{:race_won, :one}
:ok
"""
@spec race(funs :: [function(), ...], opts :: options()) ::
{:ok, pid}
def race(funs, opts \\ [])
def race([], _), do: raise(ArgumentError, message: "funs cannot be an empty list")
def race(funs, opts) when is_list(funs), do: do_start(opts, {:"$taskmaster_race", funs, opts})
@doc """
Creates a process, that runs `funs` concurrently and sends a message to the caller when all of them either return a value or one of them either crashes or returns an error.
Possible messages:
- `{:all_results, results}` when all the `funs` return a result
- `{:all_error, error}` when either function:
- returns an `{:error, reason}`
- crashes
- exceeds a `:timeout` option
The process created by `all/2` **by default isn't linked** to the caller process. It can be started as a linked process by passing a `link: true` option.
Options
* `:timeout` - a timeout for each function (defaults to 5000)
* `:link` - should the started process by linked to the caller (defaults to `false`)
Example:
iex(1)> Taskmaster.all(
...(1)> [
...(1)> fn ->
...(1)> :one
...(1)> end,
...(1)> fn ->
...(1)> :timer.sleep(50)
...(1)> :two
...(1)> end,
...(1)> fn ->
...(1)> :timer.sleep(200)
...(1)> :three
...(1)> end
...(1)> ],
...(1)> timeout: 1000
...(1)> )
{:ok, #PID<0.216.0>}
iex(2)> flush()
{:all_return_values, [:one, :two, :three]}
:ok
"""
@spec all(funs :: [function(), ...], opts :: options()) :: {:ok, pid}
def all(funs, opts \\ [])
def all([], _), do: raise(ArgumentError, message: "funs cannot be an empty list")
def all(funs, opts) when is_list(funs), do: do_start(opts, {:"$taskmaster_all", funs, opts})
@doc false
@impl true
def init(%{op: op, caller: caller} = state) do
monitor = Process.monitor(caller)
GenServer.cast(self(), op)
{:ok, %{state | monitor: monitor}}
end
@impl true
def handle_cast({:"$taskmaster_all", funs, opts}, %{caller: caller} = state) do
funs_results =
funs
|> run_concurrently(ordered: true, timeout: opts[:timeout])
|> values()
|> results_if_all(&correct_result?/1)
result =
case funs_results do
[error] when is_error(error) -> {:error, error}
elements -> elements
end
send(caller, %Taskmaster.Result{action: :all, result: result})
{:stop, :normal, state}
end
def handle_cast({:"$taskmaster_race", funs, opts}, %{caller: caller} = state) do
funs_result =
funs
|> run_concurrently(ordered: false, timeout: opts[:timeout])
|> values()
|> Stream.take(1)
|> extract()
|> List.first()
result =
case funs_result do
{:exit, :timeout} -> {:interrupted, :timeout}
error when is_error(error) -> {:interrupted, error}
value -> {:winner, value}
end
send(caller, %Taskmaster.Result{action: :race, result: result})
{:stop, :normal, state}
end
def handle_cast(_, state), do: {:noreply, state}
@impl true
def handle_info({:DOWN, monitor, :process, _, reason}, %{monitor: monitor} = state) do
{:stop, reason, %{state | monitor: nil}}
end
def handle_info(_, state), do: {:noreply, state}
defp do_start(opts, op) when is_list(opts) do
method = if opts[:link], do: :link, else: :nolink
do_start(method, op)
end
defp do_start(:nolink, op) do
GenServer.start(__MODULE__, %{op: op, caller: self(), monitor: nil})
end
defp do_start(:link, op) do
GenServer.start_link(__MODULE__, %{op: op, caller: self(), monitor: nil})
end
defp run_concurrently(funs, opts) do
Task.async_stream(
funs,
fn fun ->
try do
fun.()
catch
problem, reason ->
{problem, reason}
end
end,
ordered: opts[:ordered],
max_concurrency: length(funs),
on_timeout: :kill_task,
timeout: opts[:timeout] || 5000
)
end
defp values(stream) do
Stream.map(stream, fn
{:ok, error} when is_error(error) -> error
{:ok, value} -> value
{:exit, :timeout} = error -> error
end)
end
defp extract(%Stream{} = stream), do: Enum.map(stream, & &1)
defp results_if_all(stream, fun) do
{correct, wrong} =
stream
|> Stream.transform(
:continue,
fn
_, {:halt, res} ->
{:halt, res}
elem, :continue ->
if fun.(elem) do
{[elem], :continue}
else
{[elem], {:halt, elem}}
end
end
)
|> Enum.split_with(fun)
if Enum.empty?(wrong), do: correct, else: wrong
end
defp correct_result?({:error, _}), do: false
defp correct_result?({:exit, _}), do: false
defp correct_result?(_), do: true
end
|
lib/taskmaster.ex
| 0.782247 | 0.676934 |
taskmaster.ex
|
starcoder
|
defmodule Exgencode.Pdu do
@moduledoc """
The module contains functions for operating with PDUs defined with the `defpdu/2` macro.
"""
@doc "Returns the size of the field in bits."
@spec sizeof(Exgencode.pdu(), atom) :: non_neg_integer | {:subrecord, Exgencode.pdu()}
def sizeof(pdu, fieldName), do: Exgencode.Pdu.Protocol.sizeof(pdu, fieldName)
@doc "Returns the size of the given version pdu. In case the version argument is not passed it returns full size (all fields defined within defpdu macro).
Does not count subrecords size."
@spec sizeof_pdu(Exgencode.pdu(), Version.version() | nil, Exgencode.return_size_type()) ::
non_neg_integer
def sizeof_pdu(pdu, version \\ nil, type \\ :bits),
do: Exgencode.Pdu.Protocol.sizeof_pdu(pdu, version, type)
@doc """
Encode the Elixir structure into a binary give the protocol version.
### Examples:
iex> Exgencode.Pdu.encode(%TestPdu.PzTestMsg{otherTestField: 100})
<< 1 :: size(12), 100 :: size(24), 15 :: size(8), 10 :: size(28) >>
iex> Exgencode.Pdu.encode(%TestPdu.PzTestMsg{testField: 99, otherTestField: 100})
<< 99 :: size(12), 100 :: size(24), 15 :: size(8), 10 :: size(28) >>
Version number can be optionally added to control the encoding of the PDU and exclude certain fields if the version number is lower that specified.
pdu = %TestPdu.VersionedMsg{newerField: 111, evenNewerField: 7}
assert << 10 :: size(16), 111 :: size(8), 14 :: size(8) >> == Exgencode.Pdu.encode(pdu)
assert << 10 :: size(16) >> == Exgencode.Pdu.encode(pdu, "1.0.0")
assert << 10 :: size(16), 111 :: size(8) >> == Exgencode.Pdu.encode(pdu, "2.0.0")
### Examples:
iex> Exgencode.Pdu.encode(%TestPdu.VersionedMsg{newerField: 111, evenNewerField: 7}, "1.0.0")
<< 10 :: size(16) >>
iex> Exgencode.Pdu.encode(%TestPdu.VersionedMsg{newerField: 111, evenNewerField: 7}, "2.0.0")
<< 10 :: size(16), 111 :: size(8) >>
"""
@spec encode(Exgencode.pdu(), nil | Version.version()) :: binary
def encode(pdu, version \\ nil), do: Exgencode.Pdu.Protocol.encode(pdu, version)
@doc """
Decode a binary into the specified Elixir structure.
Returns the given structure with fields filled out and the remainder binary. The remainder should be an empty binary and leftovers usually indicate
a mangled binary.
### Examples:
iex> Exgencode.Pdu.decode(%TestPdu.PzTestMsg{}, << 1 :: size(12), 100 :: size(24), 15 :: size(8), 10 :: size(28)>>)
{%TestPdu.PzTestMsg{otherTestField: 100}, <<>>}
Version number can be optionally added to control how the decoding function reads the given binary. If the provided version does not match the requirement
specified in the field definition the given field will be ignored.
### Examples:
iex> Exgencode.Pdu.decode(%TestPdu.VersionedMsg{}, << 10 :: size(16) >>, "1.0.0")
{%TestPdu.VersionedMsg{oldField: 10}, <<>>}
iex> Exgencode.Pdu.decode(%TestPdu.VersionedMsg{}, << 10 :: size(16), 111 :: size(8) >>, "2.0.0")
{%TestPdu.VersionedMsg{oldField: 10, newerField: 111}, <<>>}
"""
@spec decode(Exgencode.pdu(), binary, nil | Version.version()) :: {Exgencode.pdu(), binary}
def decode(pdu, binary, version \\ nil), do: Exgencode.Pdu.Protocol.decode(pdu, binary, version)
@doc """
Set the values of all offset fields. If no fields have the `:offset_to` property it
become an identity function
"""
@spec set_offsets(Exgencode.pdu(), nil | Version.version()) :: Exgencode.pdu()
def set_offsets(pdu, version \\ nil), do: Exgencode.Pdu.Protocol.set_offsets(pdu, version)
end
|
lib/exgencode/pdu.ex
| 0.885217 | 0.54359 |
pdu.ex
|
starcoder
|
defmodule CxLeaderboard.Indexer.Stats do
@moduledoc """
This module is full of functions that can be used in a custom indexer. Each
uses a different way of calculating stats. Do you want your ranks to go
sequentially, like `1, 1, 2`? Then choose one of the `sequential_rank_*`
functions. Want them offset instead, like `1, 1, 3`? Choose one of the
`offset_rank_*` functions. If there is something else you want to do that
isn't available here, you are welcome to implement your own function.
Most of the functions here are meant to be given as `on_rank` callback. See
description of each function to find out whether it's intended for `on_rank`
or `on_entry`.
The functions used by default are:
on_rank: &Stats.offset_rank_1_99_less_or_equal_percentile/1
on_entry: &Stats.global_index/1
"""
@doc """
An `on_rank` function. Calculates ranks with an offset (e.g. 1,1,3) and
percentiles based on all lower scores, and half the equal scores.
"""
def offset_rank_midpoint_percentile({cnt, _, c_pos, c_size}) do
rank = c_pos + 1
lower_scores_count = cnt - c_pos - c_size
percentile = (lower_scores_count + 0.5 * c_size) / cnt * 100
{rank, percentile}
end
@doc """
An `on_rank` function. Calculates ranks with an offset (e.g. 1,1,3) and
percentiles based on all lower scores.
"""
def offset_rank_less_than_percentile({cnt, _, c_pos, c_size}) do
rank = c_pos + 1
lower_scores_count = cnt - c_pos - c_size
percentile = lower_scores_count / cnt * 100
{rank, percentile}
end
@doc """
An `on_rank` function. Calculates ranks with an offset (e.g. 1,1,3) and
percentiles based on all lower and equal scores.
"""
def offset_rank_less_than_or_equal_percentile({cnt, _, c_pos, _}) do
rank = c_pos + 1
same_or_lower_scores_count = cnt - c_pos
percentile = same_or_lower_scores_count / cnt * 100
{rank, percentile}
end
@doc """
An `on_rank` function. Calculates ranks with an offset (e.g. 1,1,3) and
percentiles based on all lower scores and equal scores, then squeezes the
percentile into 1-99 range.
This is the default choice.
"""
def offset_rank_1_99_less_or_equal_percentile({cnt, _, c_pos, _}) do
rank = c_pos + 1
same_or_lower_scores_count = cnt - c_pos
percentile = same_or_lower_scores_count / cnt * 98 + 1
{rank, percentile}
end
@doc """
An `on_rank` function. Calculates ranks sequentially (e.g. 1,1,2) and
percentiles based on all lower scores, and half the equal scores.
"""
def sequential_rank_midpoint_percentile({cnt, c_i, c_pos, c_size}) do
rank = c_i + 1
lower_scores_count = cnt - c_pos - c_size
percentile = (lower_scores_count + 0.5 * c_size) / cnt * 100
{rank, percentile}
end
@doc """
An `on_rank` function. Calculates ranks sequentially (e.g. 1,1,2) and
percentiles based on all lower scores.
"""
def sequential_rank_less_than_percentile({cnt, c_i, c_pos, c_size}) do
rank = c_i + 1
lower_scores_count = cnt - c_pos - c_size
percentile = lower_scores_count / cnt * 100
{rank, percentile}
end
@doc """
An `on_rank` function. Calculates ranks sequentially (e.g. 1,1,2) and
percentiles based on all lower and equal scores.
"""
def sequential_rank_less_than_or_equal_percentile({cnt, c_i, c_pos, _}) do
rank = c_i + 1
same_or_lower_scores_count = cnt - c_pos
percentile = same_or_lower_scores_count / cnt * 100
{rank, percentile}
end
@doc """
An `on_rank` function. Calculates ranks sequentially (e.g. 1,1,2) and
percentiles based on all lower scores and equal scores, then squeezes the
percentile into 1-99 range.
"""
def sequential_rank_1_99_less_or_equal_percentile({cnt, c_i, c_pos, _}) do
rank = c_i + 1
same_or_lower_scores_count = cnt - c_pos
percentile = same_or_lower_scores_count / cnt * 98 + 1
{rank, percentile}
end
@doc """
An `on_entry` function. Provides the global index in the leaderboard for each
record.
This is the default choice.
"""
def global_index({i, _, _, _}) do
i
end
end
|
lib/cx_leaderboard/indexer/stats.ex
| 0.876892 | 0.773302 |
stats.ex
|
starcoder
|
if Code.ensure_loaded?(Ecto.Type) do
defmodule Cldr.UnitWithUsage.Ecto.Composite.Type do
@moduledoc """
Implements the Ecto.Type behaviour for a user-defined Postgres composite type
called `:cldr_unit`.
This is the preferred option for Postgres database since the serialized unit
value is stored as a decimal number,
"""
@behaviour Ecto.Type
def type do
:cldr_unit_with_usage
end
def blank?(_) do
false
end
def load({unit_name, unit_value, nil}) do
with {:ok, unit} <- Cldr.Unit.new(unit_name, unit_value) do
{:ok, unit}
else
_ -> :error
end
end
def load({unit_name, unit_value, unit_usage}) do
with {:ok, unit} <- Cldr.Unit.new(unit_name, unit_value, usage: unit_usage) do
{:ok, unit}
else
_ -> :error
end
end
# Dumping to the database. We make the assumption that
# since we are dumping from %Cldr.Unit{} structs that the
# data is ok
def dump(%Cldr.Unit{value: %Ratio{} = value} = unit) do
value = Decimal.div(Decimal.new(value.numerator), Decimal.new(value.denominator))
{:ok, {to_string(unit.unit), value, to_string(unit.usage)}}
end
def dump(%Cldr.Unit{} = unit) do
{:ok, {to_string(unit.unit), unit.value, to_string(unit.usage)}}
end
def dump(_) do
:error
end
# Casting in changesets
def cast(%Cldr.Unit{} = unit) do
{:ok, unit}
end
def cast(%{"unit" => _, "value" => ""}) do
{:ok, nil}
end
def cast(%{"unit" => unit_name, "value" => value, "usage" => usage})
when (is_binary(unit_name) or is_atom(unit_name)) and is_number(value) do
with decimal_value <- Decimal.new(value),
{:ok, unit} <- Cldr.Unit.new(unit_name, decimal_value, usage: usage) do
{:ok, unit}
else
{:error, {_, message}} -> {:error, message: message}
end
end
def cast(%{"unit" => unit_name, "value" => value, "usage" => usage})
when (is_binary(unit_name) or is_atom(unit_name)) and is_binary(value) do
with {value, ""} <- Cldr.Decimal.parse(value),
{:ok, unit} <- Cldr.Unit.new(unit_name, value, usage: usage) do
{:ok, unit}
else
{:error, {_, message}} -> {:error, message: message}
:error -> {:error, message: "Couldn't parse value #{inspect value}"}
end
end
def cast(%{"unit" => unit_name, "value" => %Decimal{} = value, "usage" => usage})
when is_binary(unit_name) or is_atom(unit_name) do
with {:ok, unit} <- Cldr.Unit.new(unit_name, value, usage: usage) do
{:ok, unit}
else
{:error, {_, message}} -> {:error, message: message}
end
end
def cast(%{unit: unit_name, value: value} = unit) do
cast(%{"unit" => unit_name, "value" => value, "usage" => unit.usage})
end
def cast(_money) do
:error
end
# New for ecto_sql 3.2
def embed_as(_), do: :self
def equal?(term1, term2), do: term1 == term2
end
end
|
lib/cldr/unit/ecto/unit_with_usage_ecto_composite_type.ex
| 0.768516 | 0.489442 |
unit_with_usage_ecto_composite_type.ex
|
starcoder
|
defmodule AWS.GameLift do
@moduledoc """
GameLift Service
Amazon Web Services provides solutions for hosting session-based multiplayer
game servers in the cloud, including tools for deploying, operating, and scaling
game servers.
Built on Amazon Web Services global computing infrastructure, GameLift helps you
deliver high-performance, high-reliability, low-cost game servers while
dynamically scaling your resource usage to meet player demand.
## About GameLift solutions
Get more information on these GameLift solutions in the [GameLift Developer Guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/).
* GameLift managed hosting -- GameLift offers a fully managed
service to set up and maintain computing machines for hosting, manage game
session and player session life cycle, and handle security, storage, and
performance tracking. You can use automatic scaling tools to balance player
demand and hosting costs, configure your game session management to minimize
player latency, and add FlexMatch for matchmaking.
* Managed hosting with Realtime Servers -- With GameLift Realtime
Servers, you can quickly configure and set up ready-to-go game servers for your
game. Realtime Servers provides a game server framework with core GameLift
infrastructure already built in. Then use the full range of GameLift managed
hosting features, including FlexMatch, for your game.
* GameLift FleetIQ -- Use GameLift FleetIQ as a standalone service
while hosting your games using EC2 instances and Auto Scaling groups. GameLift
FleetIQ provides optimizations for game hosting, including boosting the
viability of low-cost Spot Instances gaming. For a complete solution, pair the
GameLift FleetIQ and FlexMatch standalone services.
* GameLift FlexMatch -- Add matchmaking to your game hosting
solution. FlexMatch is a customizable matchmaking service for multiplayer games.
Use FlexMatch as integrated with GameLift managed hosting or incorporate
FlexMatch as a standalone service into your own hosting solution.
## About this API Reference
This reference guide describes the low-level service API for Amazon Web
Services. With each topic in this guide, you can find links to language-specific
SDK guides and the Amazon Web Services CLI reference. Useful links:
* [GameLift API operations listed by tasks](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html)
* [ GameLift tools and resources](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-components.html)
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2015-10-01",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "gamelift",
global?: false,
protocol: "json",
service_id: "GameLift",
signature_version: "v4",
signing_name: "gamelift",
target_prefix: "GameLift"
}
end
@doc """
Registers a player's acceptance or rejection of a proposed FlexMatch match.
A matchmaking configuration may require player acceptance; if so, then matches
built with that configuration cannot be completed unless all players accept the
proposed match within a specified time limit.
When FlexMatch builds a match, all the matchmaking tickets involved in the
proposed match are placed into status `REQUIRES_ACCEPTANCE`. This is a trigger
for your game to get acceptance from all players in the ticket. Acceptances are
only valid for tickets when they are in this status; all other acceptances
result in an error.
To register acceptance, specify the ticket ID, a response, and one or more
players. Once all players have registered acceptance, the matchmaking tickets
advance to status `PLACING`, where a new game session is created for the match.
If any player rejects the match, or if acceptances are not received before a
specified timeout, the proposed match is dropped. The matchmaking tickets are
then handled in one of two ways: For tickets where one or more players rejected
the match or failed to respond, the ticket status is set to `CANCELLED`, and
processing is terminated. For tickets where players have accepted or not yet
responded, the ticket status is returned to `SEARCHING` to find a new match. A
new matchmaking request for these players can be submitted as needed.
## Learn more
[ Add FlexMatch to a game client](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html)
[ FlexMatch events](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html)
(reference)
## Related actions
`StartMatchmaking` | `DescribeMatchmaking` | `StopMatchmaking` | `AcceptMatch` |
`StartMatchBackfill` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def accept_match(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AcceptMatch", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Locates an available game server and temporarily reserves it to host gameplay
and players.
This operation is called from a game client or client service (such as a
matchmaker) to request hosting resources for a new game session. In response,
GameLift FleetIQ locates an available game server, places it in `CLAIMED` status
for 60 seconds, and returns connection information that players can use to
connect to the game server.
To claim a game server, identify a game server group. You can also specify a
game server ID, although this approach bypasses GameLift FleetIQ placement
optimization. Optionally, include game data to pass to the game server at the
start of a game session, such as a game map or player information.
When a game server is successfully claimed, connection information is returned.
A claimed game server's utilization status remains `AVAILABLE` while the claim
status is set to `CLAIMED` for up to 60 seconds. This time period gives the game
server time to update its status to `UTILIZED` (using `UpdateGameServer`) once
players join. If the game server's status is not updated within 60 seconds, the
game server reverts to unclaimed status and is available to be claimed by
another request. The claim time period is a fixed value and is not configurable.
If you try to claim a specific game server, this request will fail in the
following cases:
* If the game server utilization status is `UTILIZED`.
* If the game server claim status is `CLAIMED`.
When claiming a specific game server, this request will succeed even if the game
server is running on an instance in `DRAINING` status. To avoid this, first
check the instance status by calling `DescribeGameServerInstances`.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`RegisterGameServer` | `ListGameServers` | `ClaimGameServer` |
`DescribeGameServer` | `UpdateGameServer` | `DeregisterGameServer` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def claim_game_server(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ClaimGameServer", input, options)
end
@doc """
Creates an alias for a fleet.
In most situations, you can use an alias ID in place of a fleet ID. An alias
provides a level of abstraction for a fleet that is useful when redirecting
player traffic from one fleet to another, such as when updating your game build.
Amazon GameLift supports two types of routing strategies for aliases: simple and
terminal. A simple alias points to an active fleet. A terminal alias is used to
display messaging or link to a URL instead of routing players to an active
fleet. For example, you might use a terminal alias when a game version is no
longer supported and you want to direct players to an upgrade site.
To create a fleet alias, specify an alias name, routing strategy, and optional
description. Each simple alias can point to only one fleet, but a fleet can have
multiple aliases. If successful, a new alias record is returned, including an
alias ID and an ARN. You can reassign an alias to another fleet by calling
`UpdateAlias`.
## Related actions
`CreateAlias` | `ListAliases` | `DescribeAlias` | `UpdateAlias` | `DeleteAlias`
| `ResolveAlias` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_alias(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAlias", input, options)
end
@doc """
Creates a new Amazon GameLift build resource for your game server binary files.
Game server binaries must be combined into a zip file for use with Amazon
GameLift.
When setting up a new game build for GameLift, we recommend using the Amazon Web
Services CLI command **
[upload-build](https://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html) **. This helper command combines two tasks: (1) it uploads your build files from
a file directory to a GameLift Amazon S3 location, and (2) it creates a new
build resource.
The `CreateBuild` operation can used in the following scenarios:
* To create a new game build with build files that are in an Amazon
S3 location under an Amazon Web Services account that you control. To use this
option, you must first give Amazon GameLift access to the Amazon S3 bucket. With
permissions in place, call `CreateBuild` and specify a build name, operating
system, and the Amazon S3 storage location of your game build.
* To directly upload your build files to a GameLift Amazon S3
location. To use this option, first call `CreateBuild` and specify a build name
and operating system. This operation creates a new build resource and also
returns an Amazon S3 location with temporary access credentials. Use the
credentials to manually upload your build files to the specified Amazon S3
location. For more information, see [Uploading
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html)
in the *Amazon S3 Developer Guide*. Build files can be uploaded to the GameLift
Amazon S3 location once only; that can't be updated.
If successful, this operation creates a new build resource with a unique build
ID and places it in `INITIALIZED` status. A build must be in `READY` status
before you can create fleets with it.
## Learn more
[Uploading Your Game](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
[ Create a Build with Files in Amazon S3](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build)
## Related actions
`CreateBuild` | `ListBuilds` | `DescribeBuild` | `UpdateBuild` | `DeleteBuild` |
[All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_build(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateBuild", input, options)
end
@doc """
Creates a fleet of Amazon Elastic Compute Cloud (Amazon Elastic Compute Cloud)
instances to host your custom game server or Realtime Servers.
Use this operation to configure the computing resources for your fleet and
provide instructions for running game servers on each instance.
Most GameLift fleets can deploy instances to multiple locations, including the
home Region (where the fleet is created) and an optional set of remote
locations. Fleets that are created in the following Amazon Web Services Regions
support multiple locations: us-east-1 (N. Virginia), us-west-2 (Oregon),
eu-central-1 (Frankfurt), eu-west-1 (Ireland), ap-southeast-2 (Sydney),
ap-northeast-1 (Tokyo), and ap-northeast-2 (Seoul). Fleets that are created in
other GameLift Regions can deploy instances in the fleet's home Region only. All
fleet instances use the same configuration regardless of location; however, you
can adjust capacity settings and turn auto-scaling on/off for each location.
To create a fleet, choose the hardware for your instances, specify a game server
build or Realtime script to deploy, and provide a runtime configuration to
direct GameLift how to start and run game servers on each instance in the fleet.
Set permissions for inbound traffic to your game servers, and enable optional
features as needed. When creating a multi-location fleet, provide a list of
additional remote locations.
If you need to debug your fleet, fetch logs, view performance metrics or other
actions on the fleet, create the development fleet with port 22/3389 open. As a
best practice, we recommend opening ports for remote access only when you need
them and closing them when you're finished.
If successful, this operation creates a new Fleet resource and places it in
`NEW` status, which prompts GameLift to initiate the [fleet creation workflow](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creation-workflow.html).
You can track fleet creation by checking fleet status using
`DescribeFleetAttributes` and `DescribeFleetLocationAttributes`/, or by
monitoring fleet creation events using `DescribeFleetEvents`. As soon as the
fleet status changes to `ACTIVE`, you can enable automatic scaling for the fleet
with `PutScalingPolicy` and set capacity for the home Region with
`UpdateFleetCapacity`. When the status of each remote location reaches `ACTIVE`,
you can set capacity by location using `UpdateFleetCapacity`.
## Learn more
[Setting up fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[Debug fleet creation issues](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation)
[Multi-location fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleet` | `UpdateFleetCapacity` | `PutScalingPolicy` |
`DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetLocationAttributes` | `UpdateFleetAttributes` | `StopFleetActions`
| `DeleteFleet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_fleet(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateFleet", input, options)
end
@doc """
Adds remote locations to a fleet and begins populating the new locations with
EC2 instances.
The new instances conform to the fleet's instance type, auto-scaling, and other
configuration settings.
This operation cannot be used with fleets that don't support remote locations.
Fleets can have multiple locations only if they reside in Amazon Web Services
Regions that support this feature (see `CreateFleet` for the complete list) and
were created after the feature was released in March 2021.
To add fleet locations, specify the fleet to be updated and provide a list of
one or more locations.
If successful, this operation returns the list of added locations with their
status set to `NEW`. GameLift initiates the process of starting an instance in
each added location. You can track the status of each new location by monitoring
location creation events using `DescribeFleetEvents`. Alternatively, you can
poll location status by calling `DescribeFleetLocationAttributes`. After a
location status becomes `ACTIVE`, you can adjust the location's capacity as
needed with `UpdateFleetCapacity`.
## Learn more
[Setting up fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[Multi-location fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleetLocations` | `DescribeFleetLocationAttributes` |
`DescribeFleetLocationCapacity` | `DescribeFleetLocationUtilization` |
`DescribeFleetAttributes` | `DescribeFleetCapacity` | `DescribeFleetUtilization`
| `UpdateFleetCapacity` | `StopFleetActions` | `DeleteFleetLocations` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_fleet_locations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateFleetLocations", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Creates a GameLift FleetIQ game server group for managing game hosting on a
collection of Amazon Elastic Compute Cloud instances for game hosting.
This operation creates the game server group, creates an Auto Scaling group in
your Amazon Web Services account, and establishes a link between the two groups.
You can view the status of your game server groups in the GameLift console. Game
server group metrics and events are emitted to Amazon CloudWatch.
Before creating a new game server group, you must have the following:
* An Amazon Elastic Compute Cloud launch template that specifies how
to launch Amazon Elastic Compute Cloud instances with your game server build.
For more information, see [ Launching an Instance from a Launch Template](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)
in the *Amazon Elastic Compute Cloud User Guide*.
* An IAM role that extends limited access to your Amazon Web
Services account to allow GameLift FleetIQ to create and interact with the Auto
Scaling group. For more information, see [Create IAM roles for cross-service interaction](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-iam-permissions-roles.html)
in the *GameLift FleetIQ Developer Guide*.
To create a new game server group, specify a unique group name, IAM role and
Amazon Elastic Compute Cloud launch template, and provide a list of instance
types that can be used in the group. You must also set initial maximum and
minimum limits on the group's instance count. You can optionally set an Auto
Scaling policy with target tracking based on a GameLift FleetIQ metric.
Once the game server group and corresponding Auto Scaling group are created, you
have full access to change the Auto Scaling group's configuration as needed.
Several properties that are set when creating a game server group, including
maximum/minimum size and auto-scaling policy settings, must be updated directly
in the Auto Scaling group. Keep in mind that some Auto Scaling group properties
are periodically updated by GameLift FleetIQ as part of its balancing activities
to optimize for availability and cost.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`CreateGameServerGroup` | `ListGameServerGroups` | `DescribeGameServerGroup` |
`UpdateGameServerGroup` | `DeleteGameServerGroup` | `ResumeGameServerGroup` |
`SuspendGameServerGroup` | `DescribeGameServerInstances` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def create_game_server_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGameServerGroup", input, options)
end
@doc """
Creates a multiplayer game session for players in a specific fleet location.
This operation prompts an available server process to start a game session and
retrieves connection information for the new game session. As an alternative,
consider using the GameLift game session placement feature with
with `StartGameSessionPlacement`, which uses FleetIQ algorithms and queues to
optimize the placement process.
When creating a game session, you specify exactly where you want to place it and
provide a set of game session configuration settings. The fleet must be in
`ACTIVE` status before a game session can be created in it.
This operation can be used in the following ways:
* To create a game session on an instance in a fleet's home Region,
provide a fleet or alias ID along with your game session configuration.
* To create a game session on an instance in a fleet's remote
location, provide a fleet or alias ID and a location name, along with your game
session configuration.
If successful, a workflow is initiated to start a new game session. A
`GameSession` object is returned containing the game session configuration and
status. When the status is `ACTIVE`, game session connection information is
provided and player sessions can be created for the game session. By default,
newly created game sessions are open to new players. You can restrict new player
access by using `UpdateGameSession` to change the game session's player session
creation policy.
Game session logs are retained for all active game sessions for 14 days. To
access the logs, call `GetGameSessionLogUrl` to download the log files.
*Available in Amazon GameLift Local.*
## Learn more
[Start a game session](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_game_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGameSession", input, options)
end
@doc """
Creates a placement queue that processes requests for new game sessions.
A queue uses FleetIQ algorithms to determine the best placement locations and
find an available game server there, then prompts the game server process to
start a new game session.
A game session queue is configured with a set of destinations (GameLift fleets
or aliases), which determine the locations where the queue can place new game
sessions. These destinations can span multiple fleet types (Spot and On-Demand),
instance types, and Amazon Web Services Regions. If the queue includes
multi-location fleets, the queue is able to place game sessions in all of a
fleet's remote locations. You can opt to filter out individual locations if
needed.
The queue configuration also determines how FleetIQ selects the best available
placement for a new game session. Before searching for an available game server,
FleetIQ first prioritizes the queue's destinations and locations, with the best
placement locations on top. You can set up the queue to use the FleetIQ default
prioritization or provide an alternate set of priorities.
To create a new queue, provide a name, timeout value, and a list of
destinations. Optionally, specify a sort configuration and/or a filter, and
define a set of latency cap policies. You can also include the ARN for an Amazon
Simple Notification Service (SNS) topic to receive notifications of game session
placement activity. Notifications using SNS or CloudWatch events is the
preferred way to track placement activity.
If successful, a new `GameSessionQueue` object is returned with an assigned
queue ARN. New game session requests, which are submitted to queue with
[StartGameSessionPlacement](https://docs.aws.amazon.com/gamelift/latest/apireference/API_StartGameSessionPlacement.html) or
[StartMatchmaking](https://docs.aws.amazon.com/gamelift/latest/apireference/API_StartMatchmaking.html),
reference a queue's name or ARN.
## Learn more
[ Design a game session queue](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-design.html)
[ Create a game session queue](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-creating.html)
## Related actions
[CreateGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateGameSessionQueue.html) |
[DescribeGameSessionQueues](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeGameSessionQueues.html)
|
[UpdateGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateGameSessionQueue.html) |
[DeleteGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DeleteGameSessionQueue.html)
| [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_game_session_queue(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGameSessionQueue", input, options)
end
@doc """
Defines a new matchmaking configuration for use with FlexMatch.
Whether your are using FlexMatch with GameLift hosting or as a standalone
matchmaking service, the matchmaking configuration sets out rules for matching
players and forming teams. If you're also using GameLift hosting, it defines how
to start game sessions for each match. Your matchmaking system can use multiple
configurations to handle different game scenarios. All matchmaking requests
(`StartMatchmaking` or `StartMatchBackfill`) identify the matchmaking
configuration to use and provide player attributes consistent with that
configuration.
To create a matchmaking configuration, you must provide the following:
configuration name and FlexMatch mode (with or without GameLift hosting); a rule
set that specifies how to evaluate players and find acceptable matches; whether
player acceptance is required; and the maximum time allowed for a matchmaking
attempt. When using FlexMatch with GameLift hosting, you also need to identify
the game session queue to use when starting a game session for the match.
In addition, you must set up an Amazon Simple Notification Service topic to
receive matchmaking notifications. Provide the topic ARN in the matchmaking
configuration. An alternative method, continuously polling ticket status with
`DescribeMatchmaking`, is only suitable for games in development with low
matchmaking usage.
## Learn more
[ Design a FlexMatch matchmaker](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html)
[ Set up FlexMatch event notification](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
## Related actions
`CreateMatchmakingConfiguration` | `DescribeMatchmakingConfigurations` |
`UpdateMatchmakingConfiguration` | `DeleteMatchmakingConfiguration` |
`CreateMatchmakingRuleSet` | `DescribeMatchmakingRuleSets` |
`ValidateMatchmakingRuleSet` | `DeleteMatchmakingRuleSet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_matchmaking_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateMatchmakingConfiguration", input, options)
end
@doc """
Creates a new rule set for FlexMatch matchmaking.
A rule set describes the type of match to create, such as the number and size of
teams. It also sets the parameters for acceptable player matches, such as
minimum skill level or character type. A rule set is used by a
`MatchmakingConfiguration`.
To create a matchmaking rule set, provide unique rule set name and the rule set
body in JSON format. Rule sets must be defined in the same Region as the
matchmaking configuration they are used with.
Since matchmaking rule sets cannot be edited, it is a good idea to check the
rule set syntax using `ValidateMatchmakingRuleSet` before creating a new rule
set.
## Learn more
* [Build a rule set](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html)
* [Design a matchmaker](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html)
* [Matchmaking with FlexMatch](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-intro.html)
## Related actions
`CreateMatchmakingConfiguration` | `DescribeMatchmakingConfigurations` |
`UpdateMatchmakingConfiguration` | `DeleteMatchmakingConfiguration` |
`CreateMatchmakingRuleSet` | `DescribeMatchmakingRuleSets` |
`ValidateMatchmakingRuleSet` | `DeleteMatchmakingRuleSet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_matchmaking_rule_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateMatchmakingRuleSet", input, options)
end
@doc """
Reserves an open player slot in a game session for a player.
New player sessions can be created in any game session with an open slot that is
in `ACTIVE` status and has a player creation policy of `ACCEPT_ALL`. You can add
a group of players to a game session with `CreatePlayerSessions`.
To create a player session, specify a game session ID, player ID, and optionally
a set of player data.
If successful, a slot is reserved in the game session for the player and a new
`PlayerSession` object is returned with a player session ID. The player
references the player session ID when sending a connection request to the game
session, and the game server can use it to validate the player reservation with
the GameLift service. Player sessions cannot be updated.
The maximum number of players per game session is 200. It is not adjustable.
*Available in Amazon GameLift Local.*
## Related actions
`CreatePlayerSession` | `CreatePlayerSessions` | `DescribePlayerSessions` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_player_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePlayerSession", input, options)
end
@doc """
Reserves open slots in a game session for a group of players.
New player sessions can be created in any game session with an open slot that is
in `ACTIVE` status and has a player creation policy of `ACCEPT_ALL`. To add a
single player to a game session, use `CreatePlayerSession`.
To create player sessions, specify a game session ID and a list of player IDs.
Optionally, provide a set of player data for each player ID.
If successful, a slot is reserved in the game session for each player, and new
`PlayerSession` objects are returned with player session IDs. Each player
references their player session ID when sending a connection request to the game
session, and the game server can use it to validate the player reservation with
the GameLift service. Player sessions cannot be updated.
The maximum number of players per game session is 200. It is not adjustable.
*Available in Amazon GameLift Local.*
## Related actions
`CreatePlayerSession` | `CreatePlayerSessions` | `DescribePlayerSessions` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_player_sessions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePlayerSessions", input, options)
end
@doc """
Creates a new script record for your Realtime Servers script.
Realtime scripts are JavaScript that provide configuration settings and optional
custom game logic for your game. The script is deployed when you create a
Realtime Servers fleet to host your game sessions. Script logic is executed
during an active game session.
To create a new script record, specify a script name and provide the script
file(s). The script files and all dependencies must be zipped into a single
file. You can pull the zip file from either of these locations:
* A locally available directory. Use the *ZipFile* parameter for
this option.
* An Amazon Simple Storage Service (Amazon S3) bucket under your
Amazon Web Services account. Use the *StorageLocation* parameter for this
option. You'll need to have an Identity Access Management (IAM) role that allows
the Amazon GameLift service to access your S3 bucket.
If the call is successful, a new script record is created with a unique script
ID. If the script file is provided as a local file, the file is uploaded to an
Amazon GameLift-owned S3 bucket and the script record's storage location
reflects this location. If the script file is provided as an S3 bucket, Amazon
GameLift accesses the file at this storage location as needed for deployment.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
[Set Up a Role for Amazon GameLift Access](https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html)
## Related actions
`CreateScript` | `ListScripts` | `DescribeScript` | `UpdateScript` |
`DeleteScript` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_script(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateScript", input, options)
end
@doc """
Requests authorization to create or delete a peer connection between the VPC for
your Amazon GameLift fleet and a virtual private cloud (VPC) in your Amazon Web
Services account.
VPC peering enables the game servers on your fleet to communicate directly with
other Amazon Web Services resources. Once you've received authorization, call
`CreateVpcPeeringConnection` to establish the peering connection. For more
information, see [VPC Peering with Amazon GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html).
You can peer with VPCs that are owned by any Amazon Web Services account you
have access to, including the account that you use to manage your Amazon
GameLift fleets. You cannot peer with VPCs that are in different Regions.
To request authorization to create a connection, call this operation from the
Amazon Web Services account with the VPC that you want to peer to your Amazon
GameLift fleet. For example, to enable your game servers to retrieve data from a
DynamoDB table, use the account that manages that DynamoDB resource. Identify
the following values: (1) The ID of the VPC that you want to peer with, and (2)
the ID of the Amazon Web Services account that you use to manage Amazon
GameLift. If successful, VPC peering is authorized for the specified VPC.
To request authorization to delete a connection, call this operation from the
Amazon Web Services account with the VPC that is peered with your Amazon
GameLift fleet. Identify the following values: (1) VPC ID that you want to
delete the peering connection for, and (2) ID of the Amazon Web Services account
that you use to manage Amazon GameLift.
The authorization remains valid for 24 hours unless it is canceled by a call to
`DeleteVpcPeeringAuthorization`. You must create or delete the peering
connection while the authorization is valid.
## Related actions
`CreateVpcPeeringAuthorization` | `DescribeVpcPeeringAuthorizations` |
`DeleteVpcPeeringAuthorization` | `CreateVpcPeeringConnection` |
`DescribeVpcPeeringConnections` | `DeleteVpcPeeringConnection` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_vpc_peering_authorization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateVpcPeeringAuthorization", input, options)
end
@doc """
Establishes a VPC peering connection between a virtual private cloud (VPC) in an
Amazon Web Services account with the VPC for your Amazon GameLift fleet.
VPC peering enables the game servers on your fleet to communicate directly with
other Amazon Web Services resources. You can peer with VPCs in any Amazon Web
Services account that you have access to, including the account that you use to
manage your Amazon GameLift fleets. You cannot peer with VPCs that are in
different Regions. For more information, see [VPC Peering with Amazon GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html).
Before calling this operation to establish the peering connection, you first
need to call `CreateVpcPeeringAuthorization` and identify the VPC you want to
peer with. Once the authorization for the specified VPC is issued, you have 24
hours to establish the connection. These two operations handle all tasks
necessary to peer the two VPCs, including acceptance, updating routing tables,
etc.
To establish the connection, call this operation from the Amazon Web Services
account that is used to manage the Amazon GameLift fleets. Identify the
following values: (1) The ID of the fleet you want to be enable a VPC peering
connection for; (2) The Amazon Web Services account with the VPC that you want
to peer with; and (3) The ID of the VPC you want to peer with. This operation is
asynchronous. If successful, a `VpcPeeringConnection` request is created. You
can use continuous polling to track the request's status using
`DescribeVpcPeeringConnections`, or by monitoring fleet events for success or
failure using `DescribeFleetEvents`.
## Related actions
`CreateVpcPeeringAuthorization` | `DescribeVpcPeeringAuthorizations` |
`DeleteVpcPeeringAuthorization` | `CreateVpcPeeringConnection` |
`DescribeVpcPeeringConnections` | `DeleteVpcPeeringConnection` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def create_vpc_peering_connection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateVpcPeeringConnection", input, options)
end
@doc """
Deletes an alias.
This operation removes all record of the alias. Game clients attempting to
access a server process using the deleted alias receive an error. To delete an
alias, specify the alias ID to be deleted.
## Related actions
`CreateAlias` | `ListAliases` | `DescribeAlias` | `UpdateAlias` | `DeleteAlias`
| `ResolveAlias` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_alias(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAlias", input, options)
end
@doc """
Deletes a build.
This operation permanently deletes the build resource and any uploaded build
files. Deleting a build does not affect the status of any active fleets using
the build, but you can no longer create new fleets with the deleted build.
To delete a build, specify the build ID.
## Learn more
[ Upload a Custom Server Build](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
## Related actions
`CreateBuild` | `ListBuilds` | `DescribeBuild` | `UpdateBuild` | `DeleteBuild` |
[All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_build(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteBuild", input, options)
end
@doc """
Deletes all resources and information related a fleet.
Any current fleet instances, including those in remote locations, are shut down.
You don't need to call `DeleteFleetLocations` separately.
If the fleet being deleted has a VPC peering connection, you first need to get a
valid authorization (good for 24 hours) by calling
`CreateVpcPeeringAuthorization`. You do not need to explicitly delete the VPC
peering connection--this is done as part of the delete fleet process.
To delete a fleet, specify the fleet ID to be terminated. During the deletion
process the fleet status is changed to `DELETING`. When completed, the status
switches to `TERMINATED` and the fleet event `FLEET_DELETED` is sent.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleetLocations` | `UpdateFleetAttributes` | `UpdateFleetCapacity` |
`UpdateFleetPortSettings` | `UpdateRuntimeConfiguration` | `StopFleetActions` |
`StartFleetActions` | `PutScalingPolicy` | `DeleteFleet` |
`DeleteFleetLocations` | `DeleteScalingPolicy` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_fleet(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteFleet", input, options)
end
@doc """
Removes locations from a multi-location fleet.
When deleting a location, all game server process and all instances that are
still active in the location are shut down.
To delete fleet locations, identify the fleet ID and provide a list of the
locations to be deleted.
If successful, GameLift sets the location status to `DELETING`, and begins to
shut down existing server processes and terminate instances in each location
being deleted. When completed, the location status changes to `TERMINATED`.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleetLocations` | `DescribeFleetLocationAttributes` |
`DescribeFleetLocationCapacity` | `DescribeFleetLocationUtilization` |
`DescribeFleetAttributes` | `DescribeFleetCapacity` | `DescribeFleetUtilization`
| `UpdateFleetCapacity` | `StopFleetActions` | `DeleteFleetLocations` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_fleet_locations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteFleetLocations", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Terminates a game server group and permanently deletes the game server group
record.
You have several options for how these resources are impacted when deleting the
game server group. Depending on the type of delete operation selected, this
operation might affect these resources:
* The game server group
* The corresponding Auto Scaling group
* All game servers that are currently running in the group
To delete a game server group, identify the game server group to delete and
specify the type of delete operation to initiate. Game server groups can only be
deleted if they are in `ACTIVE` or `ERROR` status.
If the delete request is successful, a series of operations are kicked off. The
game server group status is changed to `DELETE_SCHEDULED`, which prevents new
game servers from being registered and stops automatic scaling activity. Once
all game servers in the game server group are deregistered, GameLift FleetIQ can
begin deleting resources. If any of the delete operations fail, the game server
group is placed in `ERROR` status.
GameLift FleetIQ emits delete events to Amazon CloudWatch.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`CreateGameServerGroup` | `ListGameServerGroups` | `DescribeGameServerGroup` |
`UpdateGameServerGroup` | `DeleteGameServerGroup` | `ResumeGameServerGroup` |
`SuspendGameServerGroup` | `DescribeGameServerInstances` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def delete_game_server_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteGameServerGroup", input, options)
end
@doc """
Deletes a game session queue.
Once a queue is successfully deleted, unfulfilled
[StartGameSessionPlacement](https://docs.aws.amazon.com/gamelift/latest/apireference/API_StartGameSessionPlacement.html) requests that reference the queue will fail. To delete a queue, specify the
queue name.
## Learn more
[ Using Multi-Region
Queues](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html)
## Related actions
[CreateGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateGameSessionQueue.html) |
[DescribeGameSessionQueues](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeGameSessionQueues.html)
|
[UpdateGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateGameSessionQueue.html) |
[DeleteGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DeleteGameSessionQueue.html)
| [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_game_session_queue(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteGameSessionQueue", input, options)
end
@doc """
Permanently removes a FlexMatch matchmaking configuration.
To delete, specify the configuration name. A matchmaking configuration cannot be
deleted if it is being used in any active matchmaking tickets.
## Related actions
`CreateMatchmakingConfiguration` | `DescribeMatchmakingConfigurations` |
`UpdateMatchmakingConfiguration` | `DeleteMatchmakingConfiguration` |
`CreateMatchmakingRuleSet` | `DescribeMatchmakingRuleSets` |
`ValidateMatchmakingRuleSet` | `DeleteMatchmakingRuleSet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_matchmaking_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteMatchmakingConfiguration", input, options)
end
@doc """
Deletes an existing matchmaking rule set.
To delete the rule set, provide the rule set name. Rule sets cannot be deleted
if they are currently being used by a matchmaking configuration.
## Learn more
* [Build a rule set](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html)
## Related actions
`CreateMatchmakingConfiguration` | `DescribeMatchmakingConfigurations` |
`UpdateMatchmakingConfiguration` | `DeleteMatchmakingConfiguration` |
`CreateMatchmakingRuleSet` | `DescribeMatchmakingRuleSets` |
`ValidateMatchmakingRuleSet` | `DeleteMatchmakingRuleSet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_matchmaking_rule_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteMatchmakingRuleSet", input, options)
end
@doc """
Deletes a fleet scaling policy.
Once deleted, the policy is no longer in force and GameLift removes all record
of it. To delete a scaling policy, specify both the scaling policy name and the
fleet ID it is associated with.
To temporarily suspend scaling policies, call `StopFleetActions`. This operation
suspends all policies for the fleet.
## Related actions
`DescribeFleetCapacity` | `UpdateFleetCapacity` | `DescribeEC2InstanceLimits` |
`PutScalingPolicy` | `DescribeScalingPolicies` | `DeleteScalingPolicy` |
`StopFleetActions` | `StartFleetActions` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_scaling_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteScalingPolicy", input, options)
end
@doc """
Deletes a Realtime script.
This operation permanently deletes the script record. If script files were
uploaded, they are also deleted (files stored in an S3 bucket are not deleted).
To delete a script, specify the script ID. Before deleting a script, be sure to
terminate all fleets that are deployed with the script being deleted. Fleet
instances periodically check for script updates, and if the script record no
longer exists, the instance will go into an error state and be unable to host
game sessions.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
## Related actions
`CreateScript` | `ListScripts` | `DescribeScript` | `UpdateScript` |
`DeleteScript` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_script(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteScript", input, options)
end
@doc """
Cancels a pending VPC peering authorization for the specified VPC.
If you need to delete an existing VPC peering connection, call
`DeleteVpcPeeringConnection`.
## Related actions
`CreateVpcPeeringAuthorization` | `DescribeVpcPeeringAuthorizations` |
`DeleteVpcPeeringAuthorization` | `CreateVpcPeeringConnection` |
`DescribeVpcPeeringConnections` | `DeleteVpcPeeringConnection` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_vpc_peering_authorization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteVpcPeeringAuthorization", input, options)
end
@doc """
Removes a VPC peering connection.
To delete the connection, you must have a valid authorization for the VPC
peering connection that you want to delete. You can check for an authorization
by calling `DescribeVpcPeeringAuthorizations` or request a new one using
`CreateVpcPeeringAuthorization`.
Once a valid authorization exists, call this operation from the Amazon Web
Services account that is used to manage the Amazon GameLift fleets. Identify the
connection to delete by the connection ID and fleet ID. If successful, the
connection is removed.
## Related actions
`CreateVpcPeeringAuthorization` | `DescribeVpcPeeringAuthorizations` |
`DeleteVpcPeeringAuthorization` | `CreateVpcPeeringConnection` |
`DescribeVpcPeeringConnections` | `DeleteVpcPeeringConnection` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def delete_vpc_peering_connection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteVpcPeeringConnection", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Removes the game server from a game server group.
As a result of this operation, the deregistered game server can no longer be
claimed and will not be returned in a list of active game servers.
To deregister a game server, specify the game server group and game server ID.
If successful, this operation emits a CloudWatch event with termination
timestamp and reason.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`RegisterGameServer` | `ListGameServers` | `ClaimGameServer` |
`DescribeGameServer` | `UpdateGameServer` | `DeregisterGameServer` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def deregister_game_server(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterGameServer", input, options)
end
@doc """
Retrieves properties for an alias.
This operation returns all alias metadata and settings. To get an alias's target
fleet ID only, use `ResolveAlias`.
To get alias properties, specify the alias ID. If successful, the requested
alias record is returned.
## Related actions
`CreateAlias` | `ListAliases` | `DescribeAlias` | `UpdateAlias` | `DeleteAlias`
| `ResolveAlias` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_alias(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAlias", input, options)
end
@doc """
Retrieves properties for a custom game build.
To request a build resource, specify a build ID. If successful, an object
containing the build properties is returned.
## Learn more
[ Upload a Custom Server Build](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
## Related actions
`CreateBuild` | `ListBuilds` | `DescribeBuild` | `UpdateBuild` | `DeleteBuild` |
[All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_build(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBuild", input, options)
end
@doc """
Retrieves the instance limits and current utilization for an Amazon Web Services
Region or location.
Instance limits control the number of instances, per instance type, per
location, that your Amazon Web Services account can use. Learn more at [Amazon EC2 Instance Types](http://aws.amazon.com/ec2/instance-types/). The information
returned includes the maximum number of instances allowed and your account's
current usage across all fleets. This information can affect your ability to
scale your GameLift fleets. You can request a limit increase for your account by
using the **Service limits** page in the GameLift console.
Instance limits differ based on whether the instances are deployed in a fleet's
home Region or in a remote location. For remote locations, limits also differ
based on the combination of home Region and remote location. All requests must
specify an Amazon Web Services Region (either explicitly or as your default
settings). To get the limit for a remote location, you must also specify the
location. For example, the following requests all return different results:
* Request specifies the Region `ap-northeast-1` with no location.
The result is limits and usage data on all instance types that are deployed in
`us-east-2`, by all of the fleets that reside in `ap-northeast-1`.
* Request specifies the Region `us-east-1` with location
`ca-central-1`. The result is limits and usage data on all instance types that
are deployed in `ca-central-1`, by all of the fleets that reside in `us-east-2`.
These limits do not affect fleets in any other Regions that deploy instances to
`ca-central-1`.
* Request specifies the Region `eu-west-1` with location
`ca-central-1`. The result is limits and usage data on all instance types that
are deployed in `ca-central-1`, by all of the fleets that reside in `eu-west-1`.
This operation can be used in the following ways:
* To get limit and usage data for all instance types that are
deployed in an Amazon Web Services Region by fleets that reside in the same
Region: Specify the Region only. Optionally, specify a single instance type to
retrieve information for.
* To get limit and usage data for all instance types that are
deployed to a remote location by fleets that reside in different Amazon Web
Services Region: Provide both the Amazon Web Services Region and the remote
location. Optionally, specify a single instance type to retrieve information
for.
If successful, an `EC2InstanceLimits` object is returned with limits and usage
data for each requested instance type.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleet` | `UpdateFleetCapacity` | `PutScalingPolicy` |
`DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetLocationAttributes` | `UpdateFleetAttributes` | `StopFleetActions`
| `DeleteFleet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_ec2_instance_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEC2InstanceLimits", input, options)
end
@doc """
Retrieves core fleet-wide properties, including the computing hardware and
deployment configuration for all instances in the fleet.
This operation can be used in the following ways:
* To get attributes for one or more specific fleets, provide a list
of fleet IDs or fleet ARNs.
* To get attributes for all fleets, do not provide a fleet
identifier.
When requesting attributes for multiple fleets, use the pagination parameters to
retrieve results as a set of sequential pages.
If successful, a `FleetAttributes` object is returned for each fleet requested,
unless the fleet identifier is not found.
Some API operations limit the number of fleet IDs that allowed in one request.
If a request exceeds this limit, the request fails and the error message
contains the maximum allowed number.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`ListFleets` | `DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetCapacity` | `DescribeFleetEvents` |
`DescribeFleetLocationAttributes` | `DescribeFleetPortSettings` |
`DescribeFleetUtilization` | `DescribeRuntimeConfiguration` |
`DescribeScalingPolicies` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_fleet_attributes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFleetAttributes", input, options)
end
@doc """
Retrieves the resource capacity settings for one or more fleets.
The data returned includes the current fleet capacity (number of EC2 instances),
and settings that can control how capacity scaling. For fleets with remote
locations, this operation retrieves data for the fleet's home Region only. See
`DescribeFleetLocationCapacity` to get capacity settings for a fleet's remote
locations.
This operation can be used in the following ways:
* To get capacity data for one or more specific fleets, provide a
list of fleet IDs or fleet ARNs.
* To get capacity data for all fleets, do not provide a fleet
identifier.
When requesting multiple fleets, use the pagination parameters to retrieve
results as a set of sequential pages.
If successful, a `FleetCapacity` object is returned for each requested fleet ID.
Each FleetCapacity object includes a `Location` property, which is set to the
fleet's home Region. When a list of fleet IDs is provided, attribute objects are
returned only for fleets that currently exist.
Some API operations may limit the number of fleet IDs that are allowed in one
request. If a request exceeds this limit, the request fails and the error
message includes the maximum allowed.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[GameLift metrics for fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet)
## Related actions
`ListFleets` | `DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetCapacity` | `DescribeFleetEvents` |
`DescribeFleetLocationAttributes` | `DescribeFleetPortSettings` |
`DescribeFleetUtilization` | `DescribeRuntimeConfiguration` |
`DescribeScalingPolicies` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_fleet_capacity(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFleetCapacity", input, options)
end
@doc """
Retrieves entries from a fleet's event log.
Fleet events are initiated by changes in status, such as during fleet creation
and termination, changes in capacity, etc. If a fleet has multiple locations,
events are also initiated by changes to status and capacity in remote locations.
You can specify a time range to limit the result set. Use the pagination
parameters to retrieve results as a set of sequential pages.
If successful, a collection of event log entries matching the request are
returned.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`ListFleets` | `DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetCapacity` | `DescribeFleetEvents` |
`DescribeFleetLocationAttributes` | `DescribeFleetPortSettings` |
`DescribeFleetUtilization` | `DescribeRuntimeConfiguration` |
`DescribeScalingPolicies` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_fleet_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFleetEvents", input, options)
end
@doc """
Retrieves information on a fleet's remote locations, including life-cycle status
and any suspended fleet activity.
This operation can be used in the following ways:
* To get data for specific locations, provide a fleet identifier and
a list of locations. Location data is returned in the order that it is
requested.
* To get data for all locations, provide a fleet identifier only.
Location data is returned in no particular order.
When requesting attributes for multiple locations, use the pagination parameters
to retrieve results as a set of sequential pages.
If successful, a `LocationAttributes` object is returned for each requested
location. If the fleet does not have a requested location, no information is
returned. This operation does not return the home Region. To get information on
a fleet's home Region, call `DescribeFleetAttributes`.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleetLocations` | `DescribeFleetLocationAttributes` |
`DescribeFleetLocationCapacity` | `DescribeFleetLocationUtilization` |
`DescribeFleetAttributes` | `DescribeFleetCapacity` | `DescribeFleetUtilization`
| `UpdateFleetCapacity` | `StopFleetActions` | `DeleteFleetLocations` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_fleet_location_attributes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFleetLocationAttributes", input, options)
end
@doc """
Retrieves the resource capacity settings for a fleet location.
The data returned includes the current capacity (number of EC2 instances) and
some scaling settings for the requested fleet location. Use this operation to
retrieve capacity information for a fleet's remote location or home Region (you
can also retrieve home Region capacity by calling `DescribeFleetCapacity`).
To retrieve capacity data, identify a fleet and location.
If successful, a `FleetCapacity` object is returned for the requested fleet
location.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[GameLift metrics for fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet)
## Related actions
`CreateFleetLocations` | `DescribeFleetLocationAttributes` |
`DescribeFleetLocationCapacity` | `DescribeFleetLocationUtilization` |
`DescribeFleetAttributes` | `DescribeFleetCapacity` | `DescribeFleetUtilization`
| `UpdateFleetCapacity` | `StopFleetActions` | `DeleteFleetLocations` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_fleet_location_capacity(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFleetLocationCapacity", input, options)
end
@doc """
Retrieves current usage data for a fleet location.
Utilization data provides a snapshot of current game hosting activity at the
requested location. Use this operation to retrieve utilization information for a
fleet's remote location or home Region (you can also retrieve home Region
utilization by calling `DescribeFleetUtilization`).
To retrieve utilization data, identify a fleet and location.
If successful, a `FleetUtilization` object is returned for the requested fleet
location.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[GameLift metrics for fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet)
## Related actions
`CreateFleetLocations` | `DescribeFleetLocationAttributes` |
`DescribeFleetLocationCapacity` | `DescribeFleetLocationUtilization` |
`DescribeFleetAttributes` | `DescribeFleetCapacity` | `DescribeFleetUtilization`
| `UpdateFleetCapacity` | `StopFleetActions` | `DeleteFleetLocations` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_fleet_location_utilization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFleetLocationUtilization", input, options)
end
@doc """
Retrieves a fleet's inbound connection permissions.
Connection permissions specify the range of IP addresses and port settings that
incoming traffic can use to access server processes in the fleet. Game sessions
that are running on instances in the fleet must use connections that fall in
this range.
This operation can be used in the following ways:
* To retrieve the inbound connection permissions for a fleet,
identify the fleet's unique identifier.
* To check the status of recent updates to a fleet remote location,
specify the fleet ID and a location. Port setting updates can take time to
propagate across all locations.
If successful, a set of `IpPermission` objects is returned for the requested
fleet ID. When a location is specified, a pending status is included. If the
requested fleet has been deleted, the result set is empty.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`ListFleets` | `DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetCapacity` | `DescribeFleetEvents` |
`DescribeFleetLocationAttributes` | `DescribeFleetPortSettings` |
`DescribeFleetUtilization` | `DescribeRuntimeConfiguration` |
`DescribeScalingPolicies` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_fleet_port_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFleetPortSettings", input, options)
end
@doc """
Retrieves utilization statistics for one or more fleets.
Utilization data provides a snapshot of how the fleet's hosting resources are
currently being used. For fleets with remote locations, this operation retrieves
data for the fleet's home Region only. See `DescribeFleetLocationUtilization` to
get utilization statistics for a fleet's remote locations.
This operation can be used in the following ways:
* To get utilization data for one or more specific fleets, provide a
list of fleet IDs or fleet ARNs.
* To get utilization data for all fleets, do not provide a fleet
identifier.
When requesting multiple fleets, use the pagination parameters to retrieve
results as a set of sequential pages.
If successful, a `FleetUtilization` object is returned for each requested fleet
ID, unless the fleet identifier is not found. Each fleet utilization object
includes a `Location` property, which is set to the fleet's home Region.
Some API operations may limit the number of fleet IDs allowed in one request. If
a request exceeds this limit, the request fails and the error message includes
the maximum allowed.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[GameLift Metrics for Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet)
## Related actions
`ListFleets` | `DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetCapacity` | `DescribeFleetEvents` |
`DescribeFleetLocationAttributes` | `DescribeFleetPortSettings` |
`DescribeFleetUtilization` | `DescribeRuntimeConfiguration` |
`DescribeScalingPolicies` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_fleet_utilization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFleetUtilization", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Retrieves information for a registered game server.
Information includes game server status, health check info, and the instance
that the game server is running on.
To retrieve game server information, specify the game server ID. If successful,
the requested game server object is returned.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`RegisterGameServer` | `ListGameServers` | `ClaimGameServer` |
`DescribeGameServer` | `UpdateGameServer` | `DeregisterGameServer` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def describe_game_server(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGameServer", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Retrieves information on a game server group.
This operation returns only properties related to GameLift FleetIQ. To view or
update properties for the corresponding Auto Scaling group, such as launch
template, auto scaling policies, and maximum/minimum group size, access the Auto
Scaling group directly.
To get attributes for a game server group, provide a group name or ARN value. If
successful, a `GameServerGroup` object is returned.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`CreateGameServerGroup` | `ListGameServerGroups` | `DescribeGameServerGroup` |
`UpdateGameServerGroup` | `DeleteGameServerGroup` | `ResumeGameServerGroup` |
`SuspendGameServerGroup` | `DescribeGameServerInstances` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def describe_game_server_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGameServerGroup", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Retrieves status information about the Amazon EC2 instances associated with a
GameLift FleetIQ game server group.
Use this operation to detect when instances are active or not available to host
new game servers. If you are looking for instance configuration information,
call `DescribeGameServerGroup` or access the corresponding Auto Scaling group
properties.
To request status for all instances in the game server group, provide a game
server group ID only. To request status for specific instances, provide the game
server group ID and one or more instance IDs. Use the pagination parameters to
retrieve results in sequential segments. If successful, a collection of
`GameServerInstance` objects is returned.
This operation is not designed to be called with every game server claim
request; this practice can cause you to exceed your API limit, which results in
errors. Instead, as a best practice, cache the results and refresh your cache no
more than once every 10 seconds.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`CreateGameServerGroup` | `ListGameServerGroups` | `DescribeGameServerGroup` |
`UpdateGameServerGroup` | `DeleteGameServerGroup` | `ResumeGameServerGroup` |
`SuspendGameServerGroup` | `DescribeGameServerInstances` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def describe_game_server_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGameServerInstances", input, options)
end
@doc """
Retrieves additional game session properties, including the game session
protection policy in force, a set of one or more game sessions in a specific
fleet location.
You can optionally filter the results by current game session status.
Alternatively, use `SearchGameSessions` to request a set of active game sessions
that are filtered by certain criteria. To retrieve all game session properties,
use `DescribeGameSessions`.
This operation can be used in the following ways:
* To retrieve details for all game sessions that are currently
running on all locations in a fleet, provide a fleet or alias ID, with an
optional status filter. This approach returns details from the fleet's home
Region and all remote locations.
* To retrieve details for all game sessions that are currently
running on a specific fleet location, provide a fleet or alias ID and a location
name, with optional status filter. The location can be the fleet's home Region
or any remote location.
* To retrieve details for a specific game session, provide the game
session ID. This approach looks for the game session ID in all fleets that
reside in the Amazon Web Services Region defined in the request.
Use the pagination parameters to retrieve results as a set of sequential pages.
If successful, a `GameSessionDetail` object is returned for each game session
that matches the request.
## Learn more
[Find a game session](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-find)
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_game_session_details(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGameSessionDetails", input, options)
end
@doc """
Retrieves information, including current status, about a game session placement
request.
To get game session placement details, specify the placement ID.
This operation is not designed to be continually called to track game session
status. This practice can cause you to exceed your API limit, which results in
errors. Instead, you must configure configure an Amazon Simple Notification
Service (SNS) topic to receive notifications from FlexMatch or queues.
Continuously polling with `DescribeGameSessionPlacement` should only be used for
games in development with low game session usage.
If successful, a `GameSessionPlacement` object is returned.
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_game_session_placement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGameSessionPlacement", input, options)
end
@doc """
Retrieves the properties for one or more game session queues.
When requesting multiple queues, use the pagination parameters to retrieve
results as a set of sequential pages. If successful, a `GameSessionQueue` object
is returned for each requested queue. When specifying a list of queues, objects
are returned only for queues that currently exist in the Region.
## Learn more
[ View Your Queues](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-console.html)
## Related actions
[CreateGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateGameSessionQueue.html) |
[DescribeGameSessionQueues](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeGameSessionQueues.html)
|
[UpdateGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateGameSessionQueue.html) |
[DeleteGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DeleteGameSessionQueue.html)
| [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_game_session_queues(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGameSessionQueues", input, options)
end
@doc """
Retrieves a set of one or more game sessions in a specific fleet location.
You can optionally filter the results by current game session status.
Alternatively, use `SearchGameSessions` to request a set of active game sessions
that are filtered by certain criteria. To retrieve the protection policy for
game sessions, use `DescribeGameSessionDetails`.
This operation is not designed to be continually called to track game session
status. This practice can cause you to exceed your API limit, which results in
errors. Instead, you must configure configure an Amazon Simple Notification
Service (SNS) topic to receive notifications from FlexMatch or queues.
Continuously polling with `DescribeGameSessions` should only be used for games
in development with low game session usage.
This operation can be used in the following ways:
* To retrieve all game sessions that are currently running on all
locations in a fleet, provide a fleet or alias ID, with an optional status
filter. This approach returns all game sessions in the fleet's home Region and
all remote locations.
* To retrieve all game sessions that are currently running on a
specific fleet location, provide a fleet or alias ID and a location name, with
optional status filter. The location can be the fleet's home Region or any
remote location.
* To retrieve a specific game session, provide the game session ID.
This approach looks for the game session ID in all fleets that reside in the
Amazon Web Services Region defined in the request.
Use the pagination parameters to retrieve results as a set of sequential pages.
If successful, a `GameSession` object is returned for each game session that
matches the request.
This operation is not designed to be continually called to track matchmaking
ticket status. This practice can cause you to exceed your API limit, which
results in errors. Instead, as a best practice, set up an Amazon Simple
Notification Service to receive notifications, and provide the topic ARN in the
matchmaking configuration. Continuously poling ticket status with
`DescribeGameSessions` should only be used for games in development with low
matchmaking usage.
*Available in Amazon GameLift Local.*
## Learn more
[Find a game session](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-find)
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_game_sessions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGameSessions", input, options)
end
@doc """
Retrieves information about a fleet's instances, including instance IDs,
connection data, and status.
This operation can be used in the following ways:
* To get information on all instances that are deployed to a fleet's
home Region, provide the fleet ID.
* To get information on all instances that are deployed to a fleet's
remote location, provide the fleet ID and location name.
* To get information on a specific instance in a fleet, provide the
fleet ID and instance ID.
Use the pagination parameters to retrieve results as a set of sequential pages.
If successful, an `Instance` object is returned for each requested instance.
Instances are not returned in any particular order.
## Learn more
[Remotely Access Fleet Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html)
[Debug Fleet Issues](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html)
## Related actions
`DescribeInstances` | `GetInstanceAccess` | `DescribeEC2InstanceLimits` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeInstances", input, options)
end
@doc """
Retrieves one or more matchmaking tickets.
Use this operation to retrieve ticket information, including--after a successful
match is made--connection information for the resulting new game session.
To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the
request is successful, a ticket object is returned for each requested ID that
currently exists.
This operation is not designed to be continually called to track matchmaking
ticket status. This practice can cause you to exceed your API limit, which
results in errors. Instead, as a best practice, set up an Amazon Simple
Notification Service to receive notifications, and provide the topic ARN in the
matchmaking configuration. Continuously polling ticket status with
`DescribeMatchmaking` should only be used for games in development with low
matchmaking usage.
## Learn more
[ Add FlexMatch to a game client](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html)
[ Set Up FlexMatch event notification](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
## Related actions
`StartMatchmaking` | `DescribeMatchmaking` | `StopMatchmaking` | `AcceptMatch` |
`StartMatchBackfill` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_matchmaking(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeMatchmaking", input, options)
end
@doc """
Retrieves the details of FlexMatch matchmaking configurations.
This operation offers the following options: (1) retrieve all matchmaking
configurations, (2) retrieve configurations for a specified list, or (3)
retrieve all configurations that use a specified rule set name. When requesting
multiple items, use the pagination parameters to retrieve results as a set of
sequential pages.
If successful, a configuration is returned for each requested name. When
specifying a list of names, only configurations that currently exist are
returned.
## Learn more
[ Setting up FlexMatch matchmakers](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/matchmaker-build.html)
## Related actions
`CreateMatchmakingConfiguration` | `DescribeMatchmakingConfigurations` |
`UpdateMatchmakingConfiguration` | `DeleteMatchmakingConfiguration` |
`CreateMatchmakingRuleSet` | `DescribeMatchmakingRuleSets` |
`ValidateMatchmakingRuleSet` | `DeleteMatchmakingRuleSet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_matchmaking_configurations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeMatchmakingConfigurations", input, options)
end
@doc """
Retrieves the details for FlexMatch matchmaking rule sets.
You can request all existing rule sets for the Region, or provide a list of one
or more rule set names. When requesting multiple items, use the pagination
parameters to retrieve results as a set of sequential pages. If successful, a
rule set is returned for each requested name.
## Learn more
* [Build a rule set](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html)
## Related actions
`CreateMatchmakingConfiguration` | `DescribeMatchmakingConfigurations` |
`UpdateMatchmakingConfiguration` | `DeleteMatchmakingConfiguration` |
`CreateMatchmakingRuleSet` | `DescribeMatchmakingRuleSets` |
`ValidateMatchmakingRuleSet` | `DeleteMatchmakingRuleSet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_matchmaking_rule_sets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeMatchmakingRuleSets", input, options)
end
@doc """
Retrieves properties for one or more player sessions.
This action can be used in the following ways:
* To retrieve a specific player session, provide the player session
ID only.
* To retrieve all player sessions in a game session, provide the
game session ID only.
* To retrieve all player sessions for a specific player, provide a
player ID only.
To request player sessions, specify either a player session ID, game session ID,
or player ID. You can filter this request by player session status. Use the
pagination parameters to retrieve results as a set of sequential pages.
If successful, a `PlayerSession` object is returned for each session that
matches the request.
*Available in Amazon GameLift Local.*
## Related actions
`CreatePlayerSession` | `CreatePlayerSessions` | `DescribePlayerSessions` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_player_sessions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePlayerSessions", input, options)
end
@doc """
Retrieves a fleet's runtime configuration settings.
The runtime configuration tells GameLift which server processes to run (and how)
on each instance in the fleet.
To get the runtime configuration that is currently in forces for a fleet,
provide the fleet ID.
If successful, a `RuntimeConfiguration` object is returned for the requested
fleet. If the requested fleet has been deleted, the result set is empty.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[Running multiple processes on a fleet](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-multiprocess.html)
## Related actions
`ListFleets` | `DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetCapacity` | `DescribeFleetEvents` |
`DescribeFleetLocationAttributes` | `DescribeFleetPortSettings` |
`DescribeFleetUtilization` | `DescribeRuntimeConfiguration` |
`DescribeScalingPolicies` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_runtime_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeRuntimeConfiguration", input, options)
end
@doc """
Retrieves all scaling policies applied to a fleet.
To get a fleet's scaling policies, specify the fleet ID. You can filter this
request by policy status, such as to retrieve only active scaling policies. Use
the pagination parameters to retrieve results as a set of sequential pages. If
successful, set of `ScalingPolicy` objects is returned for the fleet.
A fleet may have all of its scaling policies suspended (`StopFleetActions`).
This operation does not affect the status of the scaling policies, which remains
ACTIVE. To see whether a fleet's scaling policies are in force or suspended,
call `DescribeFleetAttributes` and check the stopped actions.
## Related actions
`DescribeFleetCapacity` | `UpdateFleetCapacity` | `DescribeEC2InstanceLimits` |
`PutScalingPolicy` | `DescribeScalingPolicies` | `DeleteScalingPolicy` |
`StopFleetActions` | `StartFleetActions` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_scaling_policies(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeScalingPolicies", input, options)
end
@doc """
Retrieves properties for a Realtime script.
To request a script record, specify the script ID. If successful, an object
containing the script properties is returned.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
## Related actions
`CreateScript` | `ListScripts` | `DescribeScript` | `UpdateScript` |
`DeleteScript` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_script(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeScript", input, options)
end
@doc """
Retrieves valid VPC peering authorizations that are pending for the Amazon Web
Services account.
This operation returns all VPC peering authorizations and requests for peering.
This includes those initiated and received by this account.
## Related actions
`CreateVpcPeeringAuthorization` | `DescribeVpcPeeringAuthorizations` |
`DeleteVpcPeeringAuthorization` | `CreateVpcPeeringConnection` |
`DescribeVpcPeeringConnections` | `DeleteVpcPeeringConnection` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_vpc_peering_authorizations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeVpcPeeringAuthorizations", input, options)
end
@doc """
Retrieves information on VPC peering connections.
Use this operation to get peering information for all fleets or for one specific
fleet ID.
To retrieve connection information, call this operation from the Amazon Web
Services account that is used to manage the Amazon GameLift fleets. Specify a
fleet ID or leave the parameter empty to retrieve all connection records. If
successful, the retrieved information includes both active and pending
connections. Active connections identify the IpV4 CIDR block that the VPC uses
to connect.
## Related actions
`CreateVpcPeeringAuthorization` | `DescribeVpcPeeringAuthorizations` |
`DeleteVpcPeeringAuthorization` | `CreateVpcPeeringConnection` |
`DescribeVpcPeeringConnections` | `DeleteVpcPeeringConnection` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def describe_vpc_peering_connections(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeVpcPeeringConnections", input, options)
end
@doc """
Retrieves the location of stored game session logs for a specified game session.
When a game session is terminated, GameLift automatically stores the logs in
Amazon S3 and retains them for 14 days. Use this URL to download the logs.
See the [Amazon Web Services Service Limits](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift)
page for maximum log file sizes. Log files that exceed this limit are not saved.
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def get_game_session_log_url(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetGameSessionLogUrl", input, options)
end
@doc """
Requests remote access to a fleet instance.
Remote access is useful for debugging, gathering benchmarking data, or observing
activity in real time.
To remotely access an instance, you need credentials that match the operating
system of the instance. For a Windows instance, GameLift returns a user name and
password as strings for use with a Windows Remote Desktop client. For a Linux
instance, GameLift returns a user name and RSA private key, also as strings, for
use with an SSH client. The private key must be saved in the proper format to a
`.pem` file before using. If you're making this request using the CLI, saving
the secret can be handled as part of the `GetInstanceAccess` request, as shown
in one of the examples for this operation.
To request access to a specific instance, specify the IDs of both the instance
and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling
`DescribeInstances`. If successful, an `InstanceAccess` object is returned that
contains the instance's IP address and a set of credentials.
## Learn more
[Remotely Access Fleet Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html)
[Debug Fleet Issues](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html)
## Related actions
`DescribeInstances` | `GetInstanceAccess` | `DescribeEC2InstanceLimits` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def get_instance_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetInstanceAccess", input, options)
end
@doc """
Retrieves all aliases for this Amazon Web Services account.
You can filter the result set by alias name and/or routing strategy type. Use
the pagination parameters to retrieve results in sequential pages.
Returned aliases are not listed in any particular order.
## Related actions
`CreateAlias` | `ListAliases` | `DescribeAlias` | `UpdateAlias` | `DeleteAlias`
| `ResolveAlias` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def list_aliases(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAliases", input, options)
end
@doc """
Retrieves build resources for all builds associated with the Amazon Web Services
account in use.
You can limit results to builds that are in a specific status by using the
`Status` parameter. Use the pagination parameters to retrieve results in a set
of sequential pages.
Build resources are not listed in any particular order.
## Learn more
[ Upload a Custom Server Build](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
## Related actions
`CreateBuild` | `ListBuilds` | `DescribeBuild` | `UpdateBuild` | `DeleteBuild` |
[All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def list_builds(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListBuilds", input, options)
end
@doc """
Retrieves a collection of fleet resources in an Amazon Web Services Region.
You can call this operation to get fleets in a previously selected default
Region (see
[https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-region.html](https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-region.html)or specify a Region in your request. You can filter the result set to find only
those fleets that are deployed with a specific build or script. For fleets that
have multiple locations, this operation retrieves fleets based on their home
Region only.
This operation can be used in the following ways:
* To get a list of all fleets in a Region, don't provide a build or
script identifier.
* To get a list of all fleets where a specific custom game build is
deployed, provide the build ID.
* To get a list of all Realtime Servers fleets with a specific
configuration script, provide the script ID.
Use the pagination parameters to retrieve results as a set of sequential pages.
If successful, a list of fleet IDs that match the request parameters is
returned. A NextToken value is also returned if there are more result pages to
retrieve.
Fleet resources are not listed in a particular order.
## Learn more
[Setting up GameLift
fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleet` | `UpdateFleetCapacity` | `PutScalingPolicy` |
`DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetLocationAttributes` | `UpdateFleetAttributes` | `StopFleetActions`
| `DeleteFleet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def list_fleets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListFleets", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Retrieves information on all game servers groups that exist in the current
Amazon Web Services account for the selected Region.
Use the pagination parameters to retrieve results in a set of sequential
segments.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`CreateGameServerGroup` | `ListGameServerGroups` | `DescribeGameServerGroup` |
`UpdateGameServerGroup` | `DeleteGameServerGroup` | `ResumeGameServerGroup` |
`SuspendGameServerGroup` | `DescribeGameServerInstances` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def list_game_server_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListGameServerGroups", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Retrieves information on all game servers that are currently active in a
specified game server group.
You can opt to sort the list by game server age. Use the pagination parameters
to retrieve results in a set of sequential segments.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`RegisterGameServer` | `ListGameServers` | `ClaimGameServer` |
`DescribeGameServer` | `UpdateGameServer` | `DeregisterGameServer` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def list_game_servers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListGameServers", input, options)
end
@doc """
Retrieves script records for all Realtime scripts that are associated with the
Amazon Web Services account in use.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
## Related actions
`CreateScript` | `ListScripts` | `DescribeScript` | `UpdateScript` |
`DeleteScript` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def list_scripts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListScripts", input, options)
end
@doc """
Retrieves all tags that are assigned to a GameLift resource.
Resource tags are used to organize Amazon Web Services resources for a range of
purposes. This operation handles the permissions necessary to manage tags for
the following GameLift resource types:
* Build
* Script
* Fleet
* Alias
* GameSessionQueue
* MatchmakingConfiguration
* MatchmakingRuleSet
To list tags for a resource, specify the unique ARN value for the resource.
## Learn more
[Tagging Amazon Web Services Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in
the *Amazon Web Services General Reference*
[ Amazon Web Services Tagging Strategies](http://aws.amazon.com/answers/account-management/aws-tagging-strategies/)
## Related actions
`TagResource` | `UntagResource` | `ListTagsForResource` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Creates or updates a scaling policy for a fleet.
Scaling policies are used to automatically scale a fleet's hosting capacity to
meet player demand. An active scaling policy instructs Amazon GameLift to track
a fleet metric and automatically change the fleet's capacity when a certain
threshold is reached. There are two types of scaling policies: target-based and
rule-based. Use a target-based policy to quickly and efficiently manage fleet
scaling; this option is the most commonly used. Use rule-based policies when you
need to exert fine-grained control over auto-scaling.
Fleets can have multiple scaling policies of each type in force at the same
time; you can have one target-based policy, one or multiple rule-based scaling
policies, or both. We recommend caution, however, because multiple auto-scaling
policies can have unintended consequences.
You can temporarily suspend all scaling policies for a fleet by calling
`StopFleetActions` with the fleet action AUTO_SCALING. To resume scaling
policies, call `StartFleetActions` with the same fleet action. To stop just one
scaling policy--or to permanently remove it, you must delete the policy with
`DeleteScalingPolicy`.
Learn more about how to work with auto-scaling in [Set Up Fleet Automatic Scaling](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-autoscaling.html).
## Target-based policy
A target-based policy tracks a single metric: PercentAvailableGameSessions. This
metric tells us how much of a fleet's hosting capacity is ready to host game
sessions but is not currently in use. This is the fleet's buffer; it measures
the additional player demand that the fleet could handle at current capacity.
With a target-based policy, you set your ideal buffer size and leave it to
Amazon GameLift to take whatever action is needed to maintain that target.
For example, you might choose to maintain a 10% buffer for a fleet that has the
capacity to host 100 simultaneous game sessions. This policy tells Amazon
GameLift to take action whenever the fleet's available capacity falls below or
rises above 10 game sessions. Amazon GameLift will start new instances or stop
unused instances in order to return to the 10% buffer.
To create or update a target-based policy, specify a fleet ID and name, and set
the policy type to "TargetBased". Specify the metric to track
(PercentAvailableGameSessions) and reference a `TargetConfiguration` object with
your desired buffer value. Exclude all other parameters. On a successful
request, the policy name is returned. The scaling policy is automatically in
force as soon as it's successfully created. If the fleet's auto-scaling actions
are temporarily suspended, the new policy will be in force once the fleet
actions are restarted.
## Rule-based policy
A rule-based policy tracks specified fleet metric, sets a threshold value, and
specifies the type of action to initiate when triggered. With a rule-based
policy, you can select from several available fleet metrics. Each policy
specifies whether to scale up or scale down (and by how much), so you need one
policy for each type of action.
For example, a policy may make the following statement: "If the percentage of
idle instances is greater than 20% for more than 15 minutes, then reduce the
fleet capacity by 10%."
A policy's rule statement has the following structure:
If `[MetricName]` is `[ComparisonOperator]` `[Threshold]` for `[EvaluationPeriods]` minutes, then `[ScalingAdjustmentType]` to/by `[ScalingAdjustment]`.
To implement the example, the rule statement would look like this:
If `[PercentIdleInstances]` is `[GreaterThanThreshold]` `[20]` for `[15]` minutes, then `[PercentChangeInCapacity]` to/by `[10]`. To create or update a scaling policy, specify a unique combination of name and
fleet ID, and set the policy type to "RuleBased". Specify the parameter values
for a policy rule statement. On a successful request, the policy name is
returned. Scaling policies are automatically in force as soon as they're
successfully created. If the fleet's auto-scaling actions are temporarily
suspended, the new policy will be in force once the fleet actions are restarted.
## Related actions
`DescribeFleetCapacity` | `UpdateFleetCapacity` | `DescribeEC2InstanceLimits` |
`PutScalingPolicy` | `DescribeScalingPolicies` | `DeleteScalingPolicy` |
`StopFleetActions` | `StartFleetActions` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def put_scaling_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutScalingPolicy", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Creates a new game server resource and notifies GameLift FleetIQ that the game
server is ready to host gameplay and players.
This operation is called by a game server process that is running on an instance
in a game server group. Registering game servers enables GameLift FleetIQ to
track available game servers and enables game clients and services to claim a
game server for a new game session.
To register a game server, identify the game server group and instance where the
game server is running, and provide a unique identifier for the game server. You
can also include connection and game server data. When a game client or service
requests a game server by calling `ClaimGameServer`, this information is
returned in the response.
Once a game server is successfully registered, it is put in status `AVAILABLE`.
A request to register a game server may fail if the instance it is running on is
in the process of shutting down as part of instance balancing or scale-down
activity.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`RegisterGameServer` | `ListGameServers` | `ClaimGameServer` |
`DescribeGameServer` | `UpdateGameServer` | `DeregisterGameServer` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def register_game_server(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterGameServer", input, options)
end
@doc """
Retrieves a fresh set of credentials for use when uploading a new set of game
build files to Amazon GameLift's Amazon S3.
This is done as part of the build creation process; see `CreateBuild`.
To request new credentials, specify the build ID as returned with an initial
`CreateBuild` request. If successful, a new set of credentials are returned,
along with the S3 storage location associated with the build ID.
## Learn more
[ Create a Build with Files in S3](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build)
## Related actions
`CreateBuild` | `ListBuilds` | `DescribeBuild` | `UpdateBuild` | `DeleteBuild` |
[All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def request_upload_credentials(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RequestUploadCredentials", input, options)
end
@doc """
Retrieves the fleet ID that an alias is currently pointing to.
## Related actions
`CreateAlias` | `ListAliases` | `DescribeAlias` | `UpdateAlias` | `DeleteAlias`
| `ResolveAlias` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def resolve_alias(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResolveAlias", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Reinstates activity on a game server group after it has been suspended.
A game server group might be suspended by the`SuspendGameServerGroup` operation,
or it might be suspended involuntarily due to a configuration problem. In the
second case, you can manually resume activity on the group once the
configuration problem has been resolved. Refer to the game server group status
and status reason for more information on why group activity is suspended.
To resume activity, specify a game server group ARN and the type of activity to
be resumed. If successful, a `GameServerGroup` object is returned showing that
the resumed activity is no longer listed in `SuspendedActions`.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`CreateGameServerGroup` | `ListGameServerGroups` | `DescribeGameServerGroup` |
`UpdateGameServerGroup` | `DeleteGameServerGroup` | `ResumeGameServerGroup` |
`SuspendGameServerGroup` | `DescribeGameServerInstances` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def resume_game_server_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResumeGameServerGroup", input, options)
end
@doc """
Retrieves all active game sessions that match a set of search criteria and sorts
them into a specified order.
This operation is not designed to be continually called to track game session
status. This practice can cause you to exceed your API limit, which results in
errors. Instead, you must configure configure an Amazon Simple Notification
Service (SNS) topic to receive notifications from FlexMatch or queues.
Continuously polling game session status with `DescribeGameSessions` should only
be used for games in development with low game session usage.
When searching for game sessions, you specify exactly where you want to search
and provide a search filter expression, a sort expression, or both. A search
request can search only one fleet, but it can search all of a fleet's locations.
This operation can be used in the following ways:
* To search all game sessions that are currently running on all
locations in a fleet, provide a fleet or alias ID. This approach returns game
sessions in the fleet's home Region and all remote locations that fit the search
criteria.
* To search all game sessions that are currently running on a
specific fleet location, provide a fleet or alias ID and a location name. For
location, you can specify a fleet's home Region or any remote location.
Use the pagination parameters to retrieve results as a set of sequential pages.
If successful, a `GameSession` object is returned for each game session that
matches the request. Search finds game sessions that are in `ACTIVE` status
only. To retrieve information on game sessions in other statuses, use
`DescribeGameSessions`.
You can search or sort by the following game session attributes:
* **gameSessionId** -- A unique identifier for the game session. You
can use either a `GameSessionId` or `GameSessionArn` value.
* **gameSessionName** -- Name assigned to a game session. This value
is set when requesting a new game session with `CreateGameSession` or updating
with `UpdateGameSession`. Game session names do not need to be unique to a game
session.
* **gameSessionProperties** -- Custom data defined in a game
session's `GameProperty` parameter. `GameProperty` values are stored as
key:value pairs; the filter expression must indicate the key and a string to
search the data values for. For example, to search for game sessions with custom
data containing the key:value pair "gameMode:brawl", specify the following:
`gameSessionProperties.gameMode = "brawl"`. All custom data values are searched
as strings.
* **maximumSessions** -- Maximum number of player sessions allowed
for a game session. This value is set when requesting a new game session with
`CreateGameSession` or updating with `UpdateGameSession`.
* **creationTimeMillis** -- Value indicating when a game session was
created. It is expressed in Unix time as milliseconds.
* **playerSessionCount** -- Number of players currently connected to
a game session. This value changes rapidly as players join the session or drop
out.
* **hasAvailablePlayerSessions** -- Boolean value indicating whether
a game session has reached its maximum number of players. It is highly
recommended that all search requests include this filter attribute to optimize
search performance and return only sessions that players can join.
Returned values for `playerSessionCount` and `hasAvailablePlayerSessions` change
quickly as players join sessions and others drop out. Results should be
considered a snapshot in time. Be sure to refresh search results often, and
handle sessions that fill up before a player can join.
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def search_game_sessions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SearchGameSessions", input, options)
end
@doc """
Resumes certain types of activity on fleet instances that were suspended with
`StopFleetActions`.
For multi-location fleets, fleet actions are managed separately for each
location. Currently, this operation is used to restart a fleet's auto-scaling
activity.
This operation can be used in the following ways:
* To restart actions on instances in the fleet's home Region,
provide a fleet ID and the type of actions to resume.
* To restart actions on instances in one of the fleet's remote
locations, provide a fleet ID, a location name, and the type of actions to
resume.
If successful, GameLift once again initiates scaling events as triggered by the
fleet's scaling policies. If actions on the fleet location were never stopped,
this operation will have no effect. You can view a fleet's stopped actions using
`DescribeFleetAttributes` or `DescribeFleetLocationAttributes`.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleet` | `UpdateFleetCapacity` | `PutScalingPolicy` |
`DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetLocationAttributes` | `UpdateFleetAttributes` | `StopFleetActions`
| `DeleteFleet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def start_fleet_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartFleetActions", input, options)
end
@doc """
Places a request for a new game session in a queue (see
`CreateGameSessionQueue`).
When processing a placement request, Amazon GameLift searches for available
resources on the queue's destinations, scanning each until it finds resources or
the placement request times out.
A game session placement request can also request player sessions. When a new
game session is successfully created, Amazon GameLift creates a player session
for each player included in the request.
When placing a game session, by default Amazon GameLift tries each fleet in the
order they are listed in the queue configuration. Ideally, a queue's
destinations are listed in preference order.
Alternatively, when requesting a game session with players, you can also provide
latency data for each player in relevant Regions. Latency data indicates the
performance lag a player experiences when connected to a fleet in the Region.
Amazon GameLift uses latency data to reorder the list of destinations to place
the game session in a Region with minimal lag. If latency data is provided for
multiple players, Amazon GameLift calculates each Region's average lag for all
players and reorders to get the best game play across all players.
To place a new game session request, specify the following:
* The queue name and a set of game session properties and settings
* A unique ID (such as a UUID) for the placement. You use this ID to
track the status of the placement request
* (Optional) A set of player data and a unique player ID for each
player that you are joining to the new game session (player data is optional,
but if you include it, you must also provide a unique ID for each player)
* Latency data for all players (if you want to optimize game play
for the players)
If successful, a new game session placement is created.
To track the status of a placement request, call `DescribeGameSessionPlacement`
and check the request's status. If the status is `FULFILLED`, a new game session
has been created and a game session ARN and Region are referenced. If the
placement request times out, you can resubmit the request or retry it with a
different queue.
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def start_game_session_placement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartGameSessionPlacement", input, options)
end
@doc """
Finds new players to fill open slots in currently running game sessions.
The backfill match process is essentially identical to the process of forming
new matches. Backfill requests use the same matchmaker that was used to make the
original match, and they provide matchmaking data for all players currently in
the game session. FlexMatch uses this information to select new players so that
backfilled match continues to meet the original match requirements.
When using FlexMatch with GameLift managed hosting, you can request a backfill
match from a client service by calling this operation with a `GameSession`
identifier. You also have the option of making backfill requests directly from
your game server. In response to a request, FlexMatch creates player sessions
for the new players, updates the `GameSession` resource, and sends updated
matchmaking data to the game server. You can request a backfill match at any
point after a game session is started. Each game session can have only one
active backfill request at a time; a subsequent request automatically replaces
the earlier request.
When using FlexMatch as a standalone component, request a backfill match by
calling this operation without a game session identifier. As with newly formed
matches, matchmaking results are returned in a matchmaking event so that your
game can update the game session that is being backfilled.
To request a backfill match, specify a unique ticket ID, the original
matchmaking configuration, and matchmaking data for all current players in the
game session being backfilled. Optionally, specify the `GameSession` ARN. If
successful, a match backfill ticket is created and returned with status set to
QUEUED. Track the status of backfill tickets using the same method for tracking
tickets for new matches.
Only game sessions created by FlexMatch are supported for match backfill.
## Learn more
[ Backfill existing games with FlexMatch](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html)
[ Matchmaking events](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html)
(reference)
[ How GameLift FlexMatch works](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html)
## Related actions
`StartMatchmaking` | `DescribeMatchmaking` | `StopMatchmaking` | `AcceptMatch` |
`StartMatchBackfill` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def start_match_backfill(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartMatchBackfill", input, options)
end
@doc """
Uses FlexMatch to create a game match for a group of players based on custom
matchmaking rules.
With games that use GameLift managed hosting, this operation also triggers
GameLift to find hosting resources and start a new game session for the new
match. Each matchmaking request includes information on one or more players and
specifies the FlexMatch matchmaker to use. When a request is for multiple
players, FlexMatch attempts to build a match that includes all players in the
request, placing them in the same team and finding additional players as needed
to fill the match.
To start matchmaking, provide a unique ticket ID, specify a matchmaking
configuration, and include the players to be matched. You must also include any
player attributes that are required by the matchmaking configuration's rule set.
If successful, a matchmaking ticket is returned with status set to `QUEUED`.
Track matchmaking events to respond as needed and acquire game session
connection information for successfully completed matches. Ticket status updates
are tracked using event notification through Amazon Simple Notification Service,
which is defined in the matchmaking configuration.
## Learn more
[ Add FlexMatch to a game client](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html)
[ Set Up FlexMatch event notification](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
[ How GameLift FlexMatch works](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html)
## Related actions
`StartMatchmaking` | `DescribeMatchmaking` | `StopMatchmaking` | `AcceptMatch` |
`StartMatchBackfill` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def start_matchmaking(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartMatchmaking", input, options)
end
@doc """
Suspends certain types of activity in a fleet location.
Currently, this operation is used to stop auto-scaling activity. For
multi-location fleets, fleet actions are managed separately for each location.
Stopping fleet actions has several potential purposes. It allows you to
temporarily stop auto-scaling activity but retain your scaling policies for use
in the future. For multi-location fleets, you can set up fleet-wide
auto-scaling, and then opt out of it for certain locations.
This operation can be used in the following ways:
* To stop actions on instances in the fleet's home Region, provide a
fleet ID and the type of actions to suspend.
* To stop actions on instances in one of the fleet's remote
locations, provide a fleet ID, a location name, and the type of actions to
suspend.
If successful, GameLift no longer initiates scaling events except in response to
manual changes using `UpdateFleetCapacity`. You can view a fleet's stopped
actions using `DescribeFleetAttributes` or `DescribeFleetLocationAttributes`.
Suspended activity can be restarted using `StartFleetActions`.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleet` | `UpdateFleetCapacity` | `PutScalingPolicy` |
`DescribeEC2InstanceLimits` | `DescribeFleetAttributes` |
`DescribeFleetLocationAttributes` | `UpdateFleetAttributes` | `StopFleetActions`
| `DeleteFleet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def stop_fleet_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopFleetActions", input, options)
end
@doc """
Cancels a game session placement that is in `PENDING` status.
To stop a placement, provide the placement ID values. If successful, the
placement is moved to `CANCELLED` status.
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def stop_game_session_placement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopGameSessionPlacement", input, options)
end
@doc """
Cancels a matchmaking ticket or match backfill ticket that is currently being
processed.
To stop the matchmaking operation, specify the ticket ID. If successful, work on
the ticket is stopped, and the ticket status is changed to `CANCELLED`.
This call is also used to turn off automatic backfill for an individual game
session. This is for game sessions that are created with a matchmaking
configuration that has automatic backfill enabled. The ticket ID is included in
the `MatchmakerData` of an updated game session object, which is provided to the
game server.
If the operation is successful, the service sends back an empty JSON struct with
the HTTP 200 response (not an empty HTTP body).
## Learn more
[ Add FlexMatch to a game client](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html)
## Related actions
`StartMatchmaking` | `DescribeMatchmaking` | `StopMatchmaking` | `AcceptMatch` |
`StartMatchBackfill` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def stop_matchmaking(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopMatchmaking", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Temporarily stops activity on a game server group without terminating instances
or the game server group.
You can restart activity by calling `ResumeGameServerGroup`. You can suspend the
following activity:
* **Instance type replacement** - This activity evaluates the
current game hosting viability of all Spot instance types that are defined for
the game server group. It updates the Auto Scaling group to remove nonviable
Spot Instance types, which have a higher chance of game server interruptions. It
then balances capacity across the remaining viable Spot Instance types. When
this activity is suspended, the Auto Scaling group continues with its current
balance, regardless of viability. Instance protection, utilization metrics, and
capacity scaling activities continue to be active.
To suspend activity, specify a game server group ARN and the type of activity to
be suspended. If successful, a `GameServerGroup` object is returned showing that
the activity is listed in `SuspendedActions`.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`CreateGameServerGroup` | `ListGameServerGroups` | `DescribeGameServerGroup` |
`UpdateGameServerGroup` | `DeleteGameServerGroup` | `ResumeGameServerGroup` |
`SuspendGameServerGroup` | `DescribeGameServerInstances` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def suspend_game_server_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SuspendGameServerGroup", input, options)
end
@doc """
Assigns a tag to a GameLift resource.
Amazon Web Services resource tags provide an additional management tool set. You
can use tags to organize resources, create IAM permissions policies to manage
access to groups of resources, customize Amazon Web Services cost breakdowns,
etc. This operation handles the permissions necessary to manage tags for the
following GameLift resource types:
* Build
* Script
* Fleet
* Alias
* GameSessionQueue
* MatchmakingConfiguration
* MatchmakingRuleSet
To add a tag to a resource, specify the unique ARN value for the resource and
provide a tag list containing one or more tags. The operation succeeds even if
the list includes tags that are already assigned to the specified resource.
## Learn more
[Tagging Amazon Web Services Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in
the *Amazon Web Services General Reference*
[ Amazon Web Services Tagging Strategies](http://aws.amazon.com/answers/account-management/aws-tagging-strategies/)
## Related actions
`TagResource` | `UntagResource` | `ListTagsForResource` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes a tag that is assigned to a GameLift resource.
Resource tags are used to organize Amazon Web Services resources for a range of
purposes. This operation handles the permissions necessary to manage tags for
the following GameLift resource types:
* Build
* Script
* Fleet
* Alias
* GameSessionQueue
* MatchmakingConfiguration
* MatchmakingRuleSet
To remove a tag from a resource, specify the unique ARN value for the resource
and provide a string list containing one or more tags to be removed. This
operation succeeds even if the list includes tags that are not currently
assigned to the specified resource.
## Learn more
[Tagging Amazon Web Services Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in
the *Amazon Web Services General Reference*
[ Amazon Web Services Tagging Strategies](http://aws.amazon.com/answers/account-management/aws-tagging-strategies/)
## Related actions
`TagResource` | `UntagResource` | `ListTagsForResource` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates properties for an alias.
To update properties, specify the alias ID to be updated and provide the
information to be changed. To reassign an alias to another fleet, provide an
updated routing strategy. If successful, the updated alias record is returned.
## Related actions
`CreateAlias` | `ListAliases` | `DescribeAlias` | `UpdateAlias` | `DeleteAlias`
| `ResolveAlias` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_alias(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateAlias", input, options)
end
@doc """
Updates metadata in a build resource, including the build name and version.
To update the metadata, specify the build ID to update and provide the new
values. If successful, a build object containing the updated metadata is
returned.
## Learn more
[ Upload a Custom Server Build](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
## Related actions
`CreateBuild` | `ListBuilds` | `DescribeBuild` | `UpdateBuild` | `DeleteBuild` |
[All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_build(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateBuild", input, options)
end
@doc """
Updates a fleet's mutable attributes, including game session protection and
resource creation limits.
To update fleet attributes, specify the fleet ID and the property values that
you want to change.
If successful, an updated `FleetAttributes` object is returned.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleetLocations` | `UpdateFleetAttributes` | `UpdateFleetCapacity` |
`UpdateFleetPortSettings` | `UpdateRuntimeConfiguration` | `StopFleetActions` |
`StartFleetActions` | `PutScalingPolicy` | `DeleteFleet` |
`DeleteFleetLocations` | `DeleteScalingPolicy` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_fleet_attributes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateFleetAttributes", input, options)
end
@doc """
Updates capacity settings for a fleet.
For fleets with multiple locations, use this operation to manage capacity
settings in each location individually. Fleet capacity determines the number of
game sessions and players that can be hosted based on the fleet configuration.
Use this operation to set the following fleet capacity properties:
* Minimum/maximum size: Set hard limits on fleet capacity. GameLift
cannot set the fleet's capacity to a value outside of this range, whether the
capacity is changed manually or through automatic scaling.
* Desired capacity: Manually set the number of Amazon EC2 instances
to be maintained in a fleet location. Before changing a fleet's desired
capacity, you may want to call `DescribeEC2InstanceLimits` to get the maximum
capacity of the fleet's Amazon EC2 instance type. Alternatively, consider using
automatic scaling to adjust capacity based on player demand.
This operation can be used in the following ways:
* To update capacity for a fleet's home Region, or if the fleet has
no remote locations, omit the `Location` parameter. The fleet must be in
`ACTIVE` status.
* To update capacity for a fleet's remote location, include the
`Location` parameter set to the location to be updated. The location must be in
`ACTIVE` status.
If successful, capacity settings are updated immediately. In response a change
in desired capacity, GameLift initiates steps to start new instances or
terminate existing instances in the requested fleet location. This continues
until the location's active instance count matches the new desired instance
count. You can track a fleet's current capacity by calling
`DescribeFleetCapacity` or `DescribeFleetLocationCapacity`. If the requested
desired instance count is higher than the instance type's limit, the
`LimitExceeded` exception occurs.
## Learn more
[Scaling fleet capacity](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-manage-capacity.html)
## Related actions
`CreateFleetLocations` | `UpdateFleetAttributes` | `UpdateFleetCapacity` |
`UpdateFleetPortSettings` | `UpdateRuntimeConfiguration` | `StopFleetActions` |
`StartFleetActions` | `PutScalingPolicy` | `DeleteFleet` |
`DeleteFleetLocations` | `DeleteScalingPolicy` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_fleet_capacity(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateFleetCapacity", input, options)
end
@doc """
Updates permissions that allow inbound traffic to connect to game sessions that
are being hosted on instances in the fleet.
To update settings, specify the fleet ID to be updated and specify the changes
to be made. List the permissions you want to add in
`InboundPermissionAuthorizations`, and permissions you want to remove in
`InboundPermissionRevocations`. Permissions to be removed must match existing
fleet permissions.
If successful, the fleet ID for the updated fleet is returned. For fleets with
remote locations, port setting updates can take time to propagate across all
locations. You can check the status of updates in each location by calling
`DescribeFleetPortSettings` with a location name.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleetLocations` | `UpdateFleetAttributes` | `UpdateFleetCapacity` |
`UpdateFleetPortSettings` | `UpdateRuntimeConfiguration` | `StopFleetActions` |
`StartFleetActions` | `PutScalingPolicy` | `DeleteFleet` |
`DeleteFleetLocations` | `DeleteScalingPolicy` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_fleet_port_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateFleetPortSettings", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Updates information about a registered game server to help GameLift FleetIQ to
track game server availability.
This operation is called by a game server process that is running on an instance
in a game server group.
Use this operation to update the following types of game server information. You
can make all three types of updates in the same request:
* To update the game server's utilization status, identify the game
server and game server group and specify the current utilization status. Use
this status to identify when game servers are currently hosting games and when
they are available to be claimed.
* To report health status, identify the game server and game server
group and set health check to `HEALTHY`. If a game server does not report health
status for a certain length of time, the game server is no longer considered
healthy. As a result, it will be eventually deregistered from the game server
group to avoid affecting utilization metrics. The best practice is to report
health every 60 seconds.
* To change game server metadata, provide updated game server data.
Once a game server is successfully updated, the relevant statuses and timestamps
are updated.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`RegisterGameServer` | `ListGameServers` | `ClaimGameServer` |
`DescribeGameServer` | `UpdateGameServer` | `DeregisterGameServer` | [All APIs by
task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def update_game_server(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateGameServer", input, options)
end
@doc """
## This operation is used with the GameLift FleetIQ solution and game server
groups.
Updates GameLift FleetIQ-specific properties for a game server group.
Many Auto Scaling group properties are updated on the Auto Scaling group
directly, including the launch template, Auto Scaling policies, and
maximum/minimum/desired instance counts.
To update the game server group, specify the game server group ID and provide
the updated values. Before applying the updates, the new values are validated to
ensure that GameLift FleetIQ can continue to perform instance balancing
activity. If successful, a `GameServerGroup` object is returned.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related actions
`CreateGameServerGroup` | `ListGameServerGroups` | `DescribeGameServerGroup` |
`UpdateGameServerGroup` | `DeleteGameServerGroup` | `ResumeGameServerGroup` |
`SuspendGameServerGroup` | `DescribeGameServerInstances` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html)
"""
def update_game_server_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateGameServerGroup", input, options)
end
@doc """
Updates the mutable properties of a game session.
To update a game session, specify the game session ID and the values you want to
change.
If successful, the updated `GameSession` object is returned.
## Related actions
`CreateGameSession` | `DescribeGameSessions` | `DescribeGameSessionDetails` |
`SearchGameSessions` | `UpdateGameSession` | `GetGameSessionLogUrl` |
`StartGameSessionPlacement` | `DescribeGameSessionPlacement` |
`StopGameSessionPlacement` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_game_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateGameSession", input, options)
end
@doc """
Updates the configuration of a game session queue, which determines how the
queue processes new game session requests.
To update settings, specify the queue name to be updated and provide the new
settings. When updating destinations, provide a complete list of destinations.
## Learn more
[ Using Multi-Region Queues](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html)
## Related actions
[CreateGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateGameSessionQueue.html) |
[DescribeGameSessionQueues](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeGameSessionQueues.html)
|
[UpdateGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateGameSessionQueue.html) |
[DeleteGameSessionQueue](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DeleteGameSessionQueue.html)
| [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_game_session_queue(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateGameSessionQueue", input, options)
end
@doc """
Updates settings for a FlexMatch matchmaking configuration.
These changes affect all matches and game sessions that are created after the
update. To update settings, specify the configuration name to be updated and
provide the new settings.
## Learn more
[ Design a FlexMatch matchmaker](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html)
## Related actions
`CreateMatchmakingConfiguration` | `DescribeMatchmakingConfigurations` |
`UpdateMatchmakingConfiguration` | `DeleteMatchmakingConfiguration` |
`CreateMatchmakingRuleSet` | `DescribeMatchmakingRuleSets` |
`ValidateMatchmakingRuleSet` | `DeleteMatchmakingRuleSet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_matchmaking_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateMatchmakingConfiguration", input, options)
end
@doc """
Updates the current runtime configuration for the specified fleet, which tells
GameLift how to launch server processes on all instances in the fleet.
You can update a fleet's runtime configuration at any time after the fleet is
created; it does not need to be in `ACTIVE` status.
To update runtime configuration, specify the fleet ID and provide a
`RuntimeConfiguration` with an updated set of server process configurations.
If successful, the fleet's runtime configuration settings are updated. Each
instance in the fleet regularly checks for and retrieves updated runtime
configurations. Instances immediately begin complying with the new configuration
by launching new server processes or not replacing existing processes when they
shut down. Updating a fleet's runtime configuration never affects existing
server processes.
## Learn more
[Setting up GameLift fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related actions
`CreateFleetLocations` | `UpdateFleetAttributes` | `UpdateFleetCapacity` |
`UpdateFleetPortSettings` | `UpdateRuntimeConfiguration` | `StopFleetActions` |
`StartFleetActions` | `PutScalingPolicy` | `DeleteFleet` |
`DeleteFleetLocations` | `DeleteScalingPolicy` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_runtime_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRuntimeConfiguration", input, options)
end
@doc """
Updates Realtime script metadata and content.
To update script metadata, specify the script ID and provide updated name and/or
version values.
To update script content, provide an updated zip file by pointing to either a
local file or an Amazon S3 bucket location. You can use either method regardless
of how the original script was uploaded. Use the *Version* parameter to track
updates to the script.
If the call is successful, the updated metadata is stored in the script record
and a revised script is uploaded to the Amazon GameLift service. Once the script
is updated and acquired by a fleet instance, the new version is used for all new
game sessions.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
## Related actions
`CreateScript` | `ListScripts` | `DescribeScript` | `UpdateScript` |
`DeleteScript` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def update_script(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateScript", input, options)
end
@doc """
Validates the syntax of a matchmaking rule or rule set.
This operation checks that the rule set is using syntactically correct JSON and
that it conforms to allowed property expressions. To validate syntax, provide a
rule set JSON string.
## Learn more
* [Build a rule set](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html)
## Related actions
`CreateMatchmakingConfiguration` | `DescribeMatchmakingConfigurations` |
`UpdateMatchmakingConfiguration` | `DeleteMatchmakingConfiguration` |
`CreateMatchmakingRuleSet` | `DescribeMatchmakingRuleSets` |
`ValidateMatchmakingRuleSet` | `DeleteMatchmakingRuleSet` | [All APIs by task](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets)
"""
def validate_matchmaking_rule_set(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ValidateMatchmakingRuleSet", input, options)
end
end
|
lib/aws/generated/game_lift.ex
| 0.874158 | 0.732305 |
game_lift.ex
|
starcoder
|
defmodule AWS.LakeFormation do
@moduledoc """
AWS Lake Formation
Defines the public endpoint for the AWS Lake Formation service.
"""
@doc """
Batch operation to grant permissions to the principal.
"""
def batch_grant_permissions(client, input, options \\ []) do
request(client, "BatchGrantPermissions", input, options)
end
@doc """
Batch operation to revoke permissions from the principal.
"""
def batch_revoke_permissions(client, input, options \\ []) do
request(client, "BatchRevokePermissions", input, options)
end
@doc """
Deregisters the resource as managed by the Data Catalog.
When you deregister a path, Lake Formation removes the path from the inline
policy attached to your service-linked role.
"""
def deregister_resource(client, input, options \\ []) do
request(client, "DeregisterResource", input, options)
end
@doc """
Retrieves the current data access role for the given resource registered in
AWS Lake Formation.
"""
def describe_resource(client, input, options \\ []) do
request(client, "DescribeResource", input, options)
end
@doc """
Retrieves the list of the data lake administrators of a Lake
Formation-managed data lake.
"""
def get_data_lake_settings(client, input, options \\ []) do
request(client, "GetDataLakeSettings", input, options)
end
@doc """
Returns the Lake Formation permissions for a specified table or database
resource located at a path in Amazon S3. `GetEffectivePermissionsForPath`
will not return databases and tables if the catalog is encrypted.
"""
def get_effective_permissions_for_path(client, input, options \\ []) do
request(client, "GetEffectivePermissionsForPath", input, options)
end
@doc """
Grants permissions to the principal to access metadata in the Data Catalog
and data organized in underlying data storage such as Amazon S3.
For information about permissions, see [Security and Access Control to
Metadata and
Data](https://docs-aws.amazon.com/lake-formation/latest/dg/security-data-access.html).
"""
def grant_permissions(client, input, options \\ []) do
request(client, "GrantPermissions", input, options)
end
@doc """
Returns a list of the principal permissions on the resource, filtered by
the permissions of the caller. For example, if you are granted an ALTER
permission, you are able to see only the principal permissions for ALTER.
This operation returns only those permissions that have been explicitly
granted.
For information about permissions, see [Security and Access Control to
Metadata and
Data](https://docs-aws.amazon.com/lake-formation/latest/dg/security-data-access.html).
"""
def list_permissions(client, input, options \\ []) do
request(client, "ListPermissions", input, options)
end
@doc """
Lists the resources registered to be managed by the Data Catalog.
"""
def list_resources(client, input, options \\ []) do
request(client, "ListResources", input, options)
end
@doc """
Sets the list of data lake administrators who have admin privileges on all
resources managed by Lake Formation. For more information on admin
privileges, see [Granting Lake Formation
Permissions](https://docs.aws.amazon.com/lake-formation/latest/dg/lake-formation-permissions.html).
This API replaces the current list of data lake admins with the new list
being passed. To add an admin, fetch the current list and add the new admin
to that list and pass that list in this API.
"""
def put_data_lake_settings(client, input, options \\ []) do
request(client, "PutDataLakeSettings", input, options)
end
@doc """
Registers the resource as managed by the Data Catalog.
To add or update data, Lake Formation needs read/write access to the chosen
Amazon S3 path. Choose a role that you know has permission to do this, or
choose the AWSServiceRoleForLakeFormationDataAccess service-linked role.
When you register the first Amazon S3 path, the service-linked role and a
new inline policy are created on your behalf. Lake Formation adds the first
path to the inline policy and attaches it to the service-linked role. When
you register subsequent paths, Lake Formation adds the path to the existing
policy.
The following request registers a new location and gives AWS Lake Formation
permission to use the service-linked role to access that location.
`ResourceArn = arn:aws:s3:::my-bucket UseServiceLinkedRole = true`
If `UseServiceLinkedRole` is not set to true, you must provide or set the
`RoleArn`:
`arn:aws:iam::12345:role/my-data-access-role`
"""
def register_resource(client, input, options \\ []) do
request(client, "RegisterResource", input, options)
end
@doc """
Revokes permissions to the principal to access metadata in the Data Catalog
and data organized in underlying data storage such as Amazon S3.
"""
def revoke_permissions(client, input, options \\ []) do
request(client, "RevokePermissions", input, options)
end
@doc """
Updates the data access role used for vending access to the given
(registered) resource in AWS Lake Formation.
"""
def update_resource(client, input, options \\ []) do
request(client, "UpdateResource", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "lakeformation"}
host = build_host("lakeformation", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSLakeFormation.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/lake_formation.ex
| 0.819352 | 0.470068 |
lake_formation.ex
|
starcoder
|
defmodule MangoPay do
@moduledoc """
The elixir client for MangoPay API.
This module is the root of all the application.
## Configuring
Set your API key by configuring the :mangopay application.
```
config :mangopay, client_id: YOUR_MANGOPAY_CLIENT_ID
config :mangopay, passphrase: <PASSWORD>
```
"""
@base_header %{"User-Agent": "Elixir", "Content-Type": "application/json"}
@payline_header %{"Accept-Encoding": "gzip;q=1.0,deflate;q=0.6,identity;q=0.3", "Accept": "*/*", "Host": "homologation-webpayment.payline.com"}
def base_header do
@base_header
end
@doc """
Returns MANGOPAY_BASE_URL
"""
def base_url do
"https://api.sandbox.mangopay.com"
end
@doc """
Returns MANGOPAY_CLIENT
"""
def client do
Application.get_env(:mangopay, :client)
end
def mangopay_version do
"v2.01"
end
def mangopay_version_and_client_id do
"/#{mangopay_version()}/#{MangoPay.client()[:id]}"
end
@doc """
Request to mangopay web API.
## Examples
response = MangoPay.request!("get", "users")
"""
def request! {method, url, body, headers} do
request!(method, url, body, headers)
end
def request! {method, url, query} do
request!(:get, url, "", "", query)
end
def request!(method, url, body \\ "", headers \\ "", query \\ %{}) do
{method, url, body, headers, _} = full_header_request(method, url, body, headers, query)
filter_and_send(method, url, body, headers, query, true)
end
@doc """
Request to mangopay web API.
## Examples
{:ok, response} = MangoPay.request({"get", "users", nil, nil})
"""
def request {method, url, body, headers} do
request(method, url, body, headers)
end
def request {method, url, query} do
request(:get, url, "", "", query)
end
@doc """
Request to mangopay web API.
## Examples
{:ok, response} = MangoPay.request("get", "users")
"""
def request(method, url, body \\ "", headers \\ "", query \\ %{}) do
{method, url, body, headers, query} = full_header_request(method, url, body, headers, query)
filter_and_send(method, url, body, headers, query, false)
end
defp full_header_request(method, url, body, headers, query) do
{method, url, decode_map(body), headers, query}
|> authorization_params()
|> payline_params()
end
defp authorization_params {method, url, body, headers, query} do
headers = case headers do
%{"Authorization": _} -> headers
_ -> Map.merge(base_header(), %{"Authorization": "#{MangoPay.Authorization.pull_token()}"})
end
{method, url, body, headers, query}
end
defp payline_params {method, url, body, headers, query} do
if String.contains?(url, "payline") do
{method, url, body, cond_payline(headers), query}
else
{method, cond_mangopay(url), body, headers, query}
end
end
defp cond_payline headers do
headers
|> Map.update!(:"Content-Type", fn _ -> "application/x-www-form-urlencoded" end)
|> Map.merge(@payline_header)
end
defp cond_mangopay url do
base_url() <> mangopay_version_and_client_id() <> url
end
defp decode_map body do
cond do
is_map body -> Poison.encode! body
is_list body -> Poison.encode! body
is_binary body -> body
end
end
# default request send to mangopay
defp filter_and_send(method, url, body, headers, query, bang) do
cond do
bang ->
case {Mix.env, method} do
{:dev, _} -> HTTPoison.request!(method, url, body, headers, [params: query, timeout: 4600, recv_timeout: 5000])
{:test, _} -> HTTPoison.request!(method, url, body, headers, [params: query, timeout: 500000, recv_timeout: 500000])
end
true ->
case {Mix.env, method, query} do
{:dev, _, _} -> HTTPoison.request(method, url, body, headers, [params: query, timeout: 4600, recv_timeout: 5000])
{:test, _, _} -> HTTPoison.request(method, url, body, headers, [params: query, timeout: 500000, recv_timeout: 500000])
end
end
end
end
|
lib/mango_pay.ex
| 0.748076 | 0.611005 |
mango_pay.ex
|
starcoder
|
defmodule Bitcoin.Protocol.Messages.GetBlocks do
@moduledoc """
Return an inv packet containing the list of blocks starting right after the last known hash in the block locator
object, up to hash_stop or 500 blocks, whichever comes first.
The locator hashes are processed by a node in the order as they appear in the message. If a block hash is found in
the node's main chain, the list of its children is returned back via the inv message and the remaining locators are
ignored, no matter if the requested limit was reached, or not.
To receive the next blocks hashes, one needs to issue getblocks again with a new block locator object. Keep in mind
that some clients may provide blocks which are invalid if the block locator object contains a hash on the invalid
branch.
To create the block locator hashes, keep pushing hashes until you go back to the genesis block.
After pushing 10 hashes back, the step backwards doubles every loop.
Note that it is allowed to send in fewer known hashes down to a minimum of just one hash. However, the purpose of
the block locator object is to detect a wrong branch in the caller's main chain. If the peer detects that you are
off the main chain, it will send in block hashes which are earlier than your last known block. So if you just send
in your last known hash and it is off the main chain, the peer starts over at block #1.
https://en.bitcoin.it/wiki/Protocol_specification#getblocks
"""
import Bitcoin.Protocol
defstruct version: 0, # the protocol version
block_locator_hashes: [], # block locator object; newest back to genesis block (dense to start, but then sparse)
hash_stop: <<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>> # hash of the last desired block; set to zero to get as many blocks as possible (up to 500)
@type t :: %__MODULE__{
version: non_neg_integer,
block_locator_hashes: list(Bitcoin.Block.t_hash),
hash_stop: Bitcoin.Block.t_hash
}
@spec parse(binary) :: t
def parse(data) do
<< version :: unsigned-little-integer-size(32), payload :: binary>> = data
{block_locator_hashes, payload} = payload |> collect_items(:hash)
<< hash_stop :: bytes-size(32) >> = payload
%__MODULE__{
version: version,
block_locator_hashes: block_locator_hashes,
hash_stop: hash_stop
}
end
@spec serialize(t) :: binary
def serialize(%__MODULE__{} = s) do
<< s.version :: unsigned-little-integer-size(32) >>
<>
( s.block_locator_hashes |> serialize_items )
<>
<< s.hash_stop :: bytes-size(32) >>
end
end
|
lib/bitcoin/protocol/messages/get_blocks.ex
| 0.812719 | 0.519338 |
get_blocks.ex
|
starcoder
|
defmodule Config do
@moduledoc ~S"""
A simple keyword-based configuration API.
## Example
This module is most commonly used to define application configuration,
typically in `config/config.exs`:
import Config
config :some_app,
key1: "value1",
key2: "value2"
import_config "#{config_env()}.exs"
`import Config` will import the functions `config/2`, `config/3`
`config_env/0`, `config_target/0`, and `import_config/1`
to help you manage your configuration.
`config/2` and `config/3` are used to define key-value configuration
for a given application. Once Mix starts, it will automatically
evaluate the configuration file and persist the configuration above
into `:some_app`'s application environment, which can be accessed in
as follows:
"value1" = Application.fetch_env!(:some_app, :key1)
Finally, the line `import_config "#{config_env()}.exs"` will import
other config files based on the current configuration environment,
such as `config/dev.exs` and `config/test.exs`.
`Config` also provides a low-level API for evaluating and reading
configuration, under the `Config.Reader` module.
> **Important:** if you are writing a library to be used by other developers,
> it is generally recommended to avoid the application environment, as the
> application environment is effectively a global storage. Also note that
> the `config/config.exs` of a library is not evaluated when the library is
> used as a dependency, as configuration is always meant to configure the
> current project. For more information, read our [library guidelines](library-guidelines.md).
## Migrating from `use Mix.Config`
The `Config` module in Elixir was introduced in v1.9 as a replacement to
`Mix.Config`, which was specific to Mix and has been deprecated.
You can leverage `Config` instead of `Mix.Config` in three steps. The first
step is to replace `use Mix.Config` at the top of your config files by
`import Config`.
The second is to make sure your `import_config/1` calls do not have a
wildcard character. If so, you need to perform the wildcard lookup
manually. For example, if you did:
import_config "../apps/*/config/config.exs"
It has to be replaced by:
for config <- "../apps/*/config/config.exs" |> Path.expand(__DIR__) |> Path.wildcard() do
import_config config
end
The last step is to replace all `Mix.env()` calls in the config files with `config_env()`.
Keep in mind you must also avoid using `Mix.env()` inside your project files.
To check the environment at _runtime_, you may add a configuration key:
# config.exs
...
config :my_app, env: config_env()
Then, in other scripts and modules, you may get the environment with
`Application.fetch_env!/2`:
# router.exs
...
if Application.fetch_env!(:my_app, :env) == :prod do
...
end
The only files where you may access functions from the `Mix` module are
the `mix.exs` file and inside custom Mix tasks, which always within the
`Mix.Tasks` namespace.
## config/runtime.exs
For runtime configuration, you can use the `config/runtime.exs` file.
It is executed right before applications start in both Mix and releases
(assembled with `mix release`).
"""
@opts_key {__MODULE__, :opts}
@config_key {__MODULE__, :config}
@imports_key {__MODULE__, :imports}
defp get_opts!(), do: Process.get(@opts_key) || raise_improper_use!()
defp put_opts(value), do: Process.put(@opts_key, value)
defp delete_opts(), do: Process.delete(@opts_key)
defp get_config!(), do: Process.get(@config_key) || raise_improper_use!()
defp put_config(value), do: Process.put(@config_key, value)
defp delete_config(), do: Process.delete(@config_key)
defp get_imports!(), do: Process.get(@imports_key) || raise_improper_use!()
defp put_imports(value), do: Process.put(@imports_key, value)
defp delete_imports(), do: Process.delete(@imports_key)
defp raise_improper_use!() do
raise "could not set configuration via Config. " <>
"This usually means you are trying to execute a configuration file " <>
"directly, instead of reading it with Config.Reader"
end
@doc """
Configures the given `root_key`.
Keyword lists are always deep-merged.
## Examples
The given `opts` are merged into the existing configuration
for the given `root_key`. Conflicting keys are overridden by the
ones specified in `opts`, unless they are keywords, which are
deep merged recursively. For example, the application configuration
below
config :logger,
level: :warn,
backends: [:console]
config :logger,
level: :info,
truncate: 1024
will have a final configuration for `:logger` of:
[level: :info, backends: [:console], truncate: 1024]
"""
@doc since: "1.9.0"
def config(root_key, opts) when is_atom(root_key) and is_list(opts) do
unless Keyword.keyword?(opts) do
raise ArgumentError, "config/2 expected a keyword list, got: #{inspect(opts)}"
end
get_config!()
|> __merge__([{root_key, opts}])
|> put_config()
end
@doc """
Configures the given `key` for the given `root_key`.
Keyword lists are always deep merged.
## Examples
The given `opts` are merged into the existing values for `key`
in the given `root_key`. Conflicting keys are overridden by the
ones specified in `opts`, unless they are keywords, which are
deep merged recursively. For example, the application configuration
below
config :ecto, Repo,
log_level: :warn,
adapter: Ecto.Adapters.Postgres,
metadata: [read_only: true]
config :ecto, Repo,
log_level: :info,
pool_size: 10,
metadata: [replica: true]
will have a final value of the configuration for the `Repo`
key in the `:ecto` application of:
Application.get_env(:ecto, Repo)
#=> [
#=> log_level: :info,
#=> pool_size: 10,
#=> adapter: Ecto.Adapters.Postgres,
#=> metadata: [read_only: true, replica: true]
#=> ]
"""
@doc since: "1.9.0"
def config(root_key, key, opts) when is_atom(root_key) and is_atom(key) do
get_config!()
|> __merge__([{root_key, [{key, opts}]}])
|> put_config()
end
@doc """
Returns the environment this configuration file is executed on.
In Mix projects this function returns the environment this configuration
file is executed on. In releases, the environment when `mix release` ran.
This is most often used to execute conditional code:
if config_env() == :prod do
config :my_app, :debug, false
end
"""
@doc since: "1.11.0"
defmacro config_env() do
quote do
Config.__env__!()
end
end
@doc false
@spec __env__!() :: atom()
def __env__!() do
elem(get_opts!(), 0) || raise "no :env key was given to this configuration file"
end
@doc """
Returns the target this configuration file is executed on.
This is most often used to execute conditional code:
if config_target() == :host do
config :my_app, :debug, false
end
"""
@doc since: "1.11.0"
defmacro config_target() do
quote do
Config.__target__!()
end
end
@doc false
@spec __target__!() :: atom()
def __target__!() do
elem(get_opts!(), 1) || raise "no :target key was given to this configuration file"
end
@doc ~S"""
Imports configuration from the given file.
In case the file doesn't exist, an error is raised.
If file is a relative, it will be expanded relatively to the
directory the current configuration file is in.
## Examples
This is often used to emulate configuration across environments:
import_config "#{config_env()}.exs"
Note, however, some configuration files, such as `config/runtime.exs`
does not support imports, as they are meant to be copied across
systems.
"""
@doc since: "1.9.0"
defmacro import_config(file) do
quote do
Config.__import__!(Path.expand(unquote(file), __DIR__))
:ok
end
end
@doc false
@spec __import__!(Path.t()) :: {term, Code.binding()}
def __import__!(file) when is_binary(file) do
import_config!(file, File.read!(file), true)
end
@doc false
@spec __eval__!(Path.t(), binary(), keyword) :: {keyword, [Path.t()] | :disabled}
def __eval__!(file, content, opts \\ []) when is_binary(file) and is_list(opts) do
env = Keyword.get(opts, :env)
target = Keyword.get(opts, :target)
imports = Keyword.get(opts, :imports, [])
previous_opts = put_opts({env, target})
previous_config = put_config([])
previous_imports = put_imports(imports)
try do
{eval_config, _} = import_config!(file, content, false)
case get_config!() do
[] when is_list(eval_config) ->
{validate!(eval_config, file), get_imports!()}
pdict_config ->
{pdict_config, get_imports!()}
end
after
if previous_opts, do: put_opts(previous_opts), else: delete_opts()
if previous_config, do: put_config(previous_config), else: delete_config()
if previous_imports, do: put_imports(previous_imports), else: delete_imports()
end
end
defp import_config!(file, contents, raise_when_disabled?) do
current_imports = get_imports!()
cond do
current_imports == :disabled ->
if raise_when_disabled? do
raise "import_config/1 is not enabled for this configuration file. " <>
"Some configuration files do not allow importing other files " <>
"as they are often copied to external systems"
end
file in current_imports ->
raise ArgumentError,
"attempting to load configuration #{Path.relative_to_cwd(file)} recursively"
true ->
put_imports([file | current_imports])
:ok
end
# TODO: Emit a warning if Mix.env() is found in said files in Elixir v1.15.
# Note this won't be a deprecation warning as it will always be emitted.
Code.eval_string(contents, [], file: file)
end
@doc false
def __merge__(config1, config2) when is_list(config1) and is_list(config2) do
Keyword.merge(config1, config2, fn _, app1, app2 ->
Keyword.merge(app1, app2, &deep_merge/3)
end)
end
defp deep_merge(_key, value1, value2) do
if Keyword.keyword?(value1) and Keyword.keyword?(value2) do
Keyword.merge(value1, value2, &deep_merge/3)
else
value2
end
end
defp validate!(config, file) do
Enum.all?(config, fn
{app, value} when is_atom(app) ->
if Keyword.keyword?(value) do
true
else
raise ArgumentError,
"expected config for app #{inspect(app)} in #{Path.relative_to_cwd(file)} " <>
"to return keyword list, got: #{inspect(value)}"
end
_ ->
false
end)
config
end
end
|
lib/elixir/lib/config.ex
| 0.894271 | 0.493103 |
config.ex
|
starcoder
|
defmodule Unit do
@moduledoc """
Types and helper functions to safely work with and convert between units
"""
import Kernel, except: [+: 1, +: 2, -: 1, -: 2, *: 2, /: 2]
defmacro __using__(_) do
quote do
import Unit
import Kernel, except: [+: 1, +: 2, -: 1, -: 2, *: 2, /: 2]
end
end
@type area_unit :: :m2 | :ft2 | :in2
@type energy_unit :: :w | :kw | :hp
@type force_unit :: :n | :lbf
@type length_unit :: :m | :cm | :mm | :ft | :in
@type mass_unit :: :kg | :g | :lbs | :slug
@type power_unit :: :w | :kw | :hp
@type pressure_unit :: :pa | :kpa | :psi | :psf
@type time_unit :: :s | :min | :hrs
@type velocity_unit :: :ms | :mph | :knots | :kph | :fpm
@type unit :: area_unit | energy_unit | force_unit | length_unit | mass_unit | power_unit | pressure_unit | time_unit | velocity_unit
@units %{
kg: {:mass, 1.0, "kilograms"},
g: {:mass, 0.001, "grams"},
lbs: {:mass, 0.45359237, "pounds"},
slug: {:mass, Kernel.*(32.174049, 0.45359237), "slugs"},
oz: {:mass, 0.02835, "ounces"},
m: {:length, 1.0, "metres"},
cm: {:length, 0.01, "centimetres"},
mm: {:length, 0.001, "millimetres"},
ft: {:length, 0.3048, "feet"},
in: {:length, 0.0254, "inches"},
s: {:time, 1.0, "seconds"},
min: {:time, 60.0, "minutes"},
hrs: {:time, 3600.0, "hours"},
m2: {:area, 1.0, "square metres"},
ft2: {:area, Kernel./(1, 10.76), "square feet"},
in2: {:area, Kernel./(1, Kernel.*(10.76, 144)), "square inches"},
m3: {:volume, 1.0, "cubic metres"},
l: {:volume, 1_000, "litres"},
ft3: {:volume, 0.028, "cubic feet"},
in3: {:volume, Kernel./(1, 61_023.744), "cubic inches"},
gal: {:volume, 0.003785, "US gallons"},
ms: {:velocity, 1.0, "metres per second"},
mph: {:velocity, 0.45, "miles per hour"},
knots: {:velocity, 0.51, "knots"},
kph: {:velocity, Kernel./(1, 3.6), "kilometres per hour"},
fpm: {:velocity, 0.00508, "feet per minute"},
ms2: {:acceleration, 1.0, "metres per second squared"},
fts2: {:acceleration, 0.3048, "feet per second squared"},
n: {:force, 1.0, "newtons"},
lbf: {:force, 4.448222, "pound force"},
pa: {:pressure, 1.0, "pascals"}, # N/m2
kpa: {:pressure, 1_000, "kilopascals"},
psi: {:pressure, 6_895, "pounds per square inch"},
psf: {:pressure, 47.8803, "pounds per square foot"},
inhg: {:pressure, 3_390, "inches of mercury"},
w: {:power, 1.0, "watts"},
kw: {:power, 1_000, "kilowatts"},
hp: {:power, 746, "horsepower"},
c: {:temperature, 1.0, "degrees centigrade"},
f: {:temperature, {&Util.f_to_c/1, &Util.c_to_f/1}, "degrees fahrenheit"}
}
@type dimension ::
:acceleration | :area | :density | :energy | :force | :length | :mass |
:moment | :power | :pressure | :velocity | :volume | :temperature | :time
@dimensions [
{:area, :length, :length},
{:energy, :force, :length},
{:energy, :power, :time},
{:force, :mass, :acceleration},
{:force, :pressure, :area},
{:length, :velocity, :time},
{:mass, :volume, :density},
{:moment, :length, :force},
{:power, :force, :velocity},
{:velocity, :acceleration, :time},
{:volume, :area, :length}
]
@doc """
Operator for constructing a unit qualified value
TODO may work better as macro to get higher precedence
## Example
```
iex>use Unit
iex>10 <~ :kph
{10, :kph}
```
"""
@spec number <~ unit :: {number, unit}
def a <~ b when is_number(a) and is_atom(b), do: {a, b}
@doc """
Query the dimension of a unit
## Examples
```
iex>use Unit
iex>dimension_of {10, :kph}
:velocity
```
"""
@spec dimension_of({number, unit}) :: dimension
def dimension_of({value, unit}) when is_number(value) and is_atom(unit) do
with {dimension, _, _} <- @units[unit] do
dimension
end
end
@doc """
Human readable description of a unit
## Examples
```
iex>use Unit
iex>describe {10, :kph}
"kilometres per hour"
```
"""
@spec describe({number, unit}) :: String.t
def describe({value, unit}) when is_number(value) and is_atom(unit) do
with {_, _, description} <- @units[unit] do
description
end
end
@doc """
Convert a value in specified units to a compatible second unit or raise a useful error.
## Examples
```
iex> Unit.to {1.0, :ft}, :in
{12.0, :in}
iex> Unit.to {1.0, :ms}, :ms
{1.0, :ms}
iex> Unit.to {10, :ms}, :knots
{19.6078431372549, :knots}
iex> Unit.to {10, :ms}, :kph
{36.0, :kph}
iex> Unit.to {50, :f}, :c
{10.0, :c}
iex> Unit.to {10, :c}, :f
{50.0, :f}
iex> Unit.to {10, :ms}, :feet
{:error, "Unknown destination unit 'feet'."}
iex> Unit.to {10, :mps}, :ft
{:error, "Unknown source unit 'mps'."}
iex> Unit.to {10, :ms}, :ft
{:error, "metres per second (velocity) cannot be converted to feet (length)"}
```
"""
@spec to({number, unit}, unit) :: {float, unit}
def to({input, from_unit}, to_unit) do
with {:from, {from_dim, c_from, _}} <- {:from, @units[from_unit]},
{:to, {to_dim, c_to, _}} <- {:to, @units[to_unit]},
{:dims_match, true} <- {:dims_match, from_dim == to_dim} do
interim = if is_number(c_from) do
input * c_from
else
{convert, _} = c_from
convert.(input)
end
output = if is_number(c_to) do
interim / c_to
else
{_, convert} = c_to
convert.(interim)
end |> Float.round(14)
{output, to_unit}
else
{:from, nil} -> {:error, "Unknown source unit '#{Atom.to_string(from_unit)}'."}
{:to, nil} -> {:error, "Unknown destination unit '#{Atom.to_string(to_unit)}'."}
{:dims_match, false} ->
%{^from_unit => {from_dim, _, from_desc}, ^to_unit => {to_dim, _, to_desc}} = @units
{
:error,
"#{from_desc} (#{Atom.to_string(from_dim)}) cannot be converted to #{to_desc} (#{Atom.to_string(to_dim)})"
}
end
end
@doc """
Operator version of to()
## Examples
```
iex> use Unit
iex> 10 <~ :ms ~> :kph
{36.0, :kph}
```
"""
@spec {float, unit} ~> unit :: {float, unit}
def a ~> b when is_tuple(a) and is_atom(b), do: to(a, b)
@doc """
Override addition operator to handle unit qualified values.
If multiple units of the same dimension (e.g length) are used the result is expressed in the
units of the left operand
## Examples
```
iex> use Unit
iex> {1, :ft} + {2, :ft}
{3, :ft}
iex> {1, :in} + {1, :ft}
{13.0, :in}
```
"""
@spec number + number :: number
@spec {number, unit} + {number, unit} :: {number, unit}
def a + b when is_number(a) and is_number(b), do: Kernel.+(a, b) # original
def {a, unit} + {b, unit} when is_number(a) and is_number(b) and is_atom(unit), do: {Kernel.+(a, b), unit} # same unit
def {value_a, unit_of_a} + {value_b, unit_of_b} when is_number(value_a) and is_number(value_b) and is_atom(unit_of_a) and is_atom(unit_of_b) do
{b_in_as, ^unit_of_a} = to({value_b, unit_of_b}, unit_of_a)
{Kernel.+(value_a, b_in_as), unit_of_a}
end
@doc """
Override subtraction operator to handle unit qualified values.
If multiple units of the same dimension (e.g length) are used the result is expressed in the
units of the left operand
## Examples
```
iex> use Unit
iex> {3, :ft} - {1, :ft}
{2, :ft}
iex> {61, :s} - {1, :min}
{1.0, :s}
```
"""
@spec number - number :: number
@spec {number, unit} - {number, unit} :: {number, unit}
def a - b when is_number(a) and is_number(b), do: Kernel.-(a, b) # original
def {a, unit} - {b, unit} when is_number(a) and is_number(b) and is_atom(unit), do: {Kernel.-(a, b), unit} # same unit
def {value_a, unit_of_a} - {value_b, unit_of_b} when is_number(value_a) and is_number(value_b) and is_atom(unit_of_a) and is_atom(unit_of_b) do
{b_in_as, ^unit_of_a} = to({value_b, unit_of_b}, unit_of_a)
{Kernel.-(value_a, b_in_as), unit_of_a}
end
@doc """
Override unary plus operator to handle unit qualified values.
## Examples
```
iex> use Unit
iex>+3
3
iex>+{3, :mm}
{3, :mm}
```
"""
@spec +number :: number
@spec +tuple :: tuple
def +a when is_number(a), do: a # original
def +a when is_tuple(a) do # pattern matching here caused syntax error
with {value, unit} <- a,
true <- is_number(value) and is_atom(unit) do
a
end
end
@doc """
Override unary minus operator to handle unit qualified values.
## Examples
```
iex> use Unit
iex>-3
-3
iex>-{3, :mm}
{-3, :mm}
```
"""
@spec -number :: number
@spec -tuple :: tuple
def -a when is_number(a), do: Kernel.-(a) # original
def -a when is_tuple(a) do # pattern matching here caused syntax error
with {value, unit} <- a,
true <- is_number(value) and is_atom(unit) do
{-value, unit}
end
end
@doc """
Override multiplication operator to handle unit qualified values.
When the result is a different dimension it is expressed in the base SI unit.
## Examples
```
iex> use Unit
iex> 5 * 6
30
iex> 4 * {3, :knots}
{12, :knots}
iex> {3, :knots} * 4
{12, :knots}
iex> {1, :m} * {200, :cm}
{2.0, :m2}
```
"""
@spec number * number :: number
@spec number * {number, unit} :: {number, unit}
@spec {number, unit} * number :: {number, unit}
@spec {number, unit} * {number, unit} :: {number, unit}
def a * b when is_number(a) and is_number(b), do: Kernel.*(a, b) # original
def a * {b, unit} when is_number(a) and is_number(b) and is_atom(unit), do: {Kernel.*(a, b), unit} # dimensionless coefficient
def {a, unit} * b when is_number(a) and is_number(b) and is_atom(unit), do: {Kernel.*(a, b), unit}
def {a_val, a_unit} * {b_val, b_unit} when is_number(a_val) and is_atom(a_unit) and is_number(b_val) and is_atom(b_unit) do
with {:a_conv_details, {a_dim, a_cf, _}} <- {:a_conv_details, @units[a_unit]},
{:b_conv_details, {b_dim, b_cf, _}} <- {:b_conv_details, @units[b_unit]},
{:result_dim, [{result_dim, _, _}]} <- { # What dimension is result of multiplying a_dim and b_dim?
:result_dim,
@dimensions
|> Enum.filter(&(match?({_, ^a_dim, ^b_dim}, &1) or match?({_, ^b_dim, ^a_dim}, &1)))},
{:result_unit, [{result_unit, _}]} <- {
:result_unit,
@units
|> Map.to_list()
|> Enum.filter(&match?({_, {^result_dim, 1.0, _}}, &1)) # Coefficient of base unit for a dimension is 1.0
} do
{a_val * a_cf * b_val * b_cf, result_unit}
else
{:a_conv_details, _} -> {:error, "Unknown unit '#{Atom.to_string(a_unit)}' for left operand of *."}
{:b_conv_details, _} -> {:error, "Unknown unit '#{Atom.to_string(b_unit)}' for right operand of *."}
{:result_dim, _} -> {:error, "Could not resolve result dimension when multiplying '#{Atom.to_string(a_unit)}' and '#{Atom.to_string(b_unit)}'."}
{:result_unit, _} -> {:error, "Could not resolve result unit when multiplying '#{Atom.to_string(a_unit)}' and '#{Atom.to_string(b_unit)}'."}
end
end
@doc """
Override division operator to handle unit qualified values.
When the result is a different dimension it is expressed in the base SI unit.
## Examples
```
iex> use Unit
iex> 30 / 6
5.0
iex> 4 / {3, :knots}
{1.3333333333333333, :knots}
iex> {3, :knots} / 4
{0.75, :knots}
iex> {2.0, :m2} / {200, :cm}
{1.0, :m}
```
"""
@spec number / number :: float
@spec number / {number, unit} :: {float, unit}
@spec {number, unit} / number :: {float, unit}
@spec {number, unit} / {number, unit} :: {float, unit}
def a / b when is_number(a) and is_number(b), do: Kernel./(a, b) # original
def a / {b, unit} when is_number(a) and is_number(b) and is_atom(unit), do: {Kernel./(a, b), unit} # dimensionless coefficient
def {a, unit} / b when is_number(a) and is_number(b) and is_atom(unit), do: {Kernel./(a, b), unit}
def {a_val, a_unit} / {b_val, b_unit} when is_number(a_val) and is_atom(a_unit) and is_number(b_val) and is_atom(b_unit) do
with {:a_conv_details, {a_dim, a_cf, _}} <- {:a_conv_details, @units[a_unit]},
{:b_conv_details, {b_dim, b_cf, _}} <- {:b_conv_details, @units[b_unit]},
{:result_dim_opts, [{_, result_dim_1, result_dim_2}]} <- { # What dimension is result of dividing a_dim by b_dim?
:result_dim_opts,
@dimensions
|> Enum.filter(&(match?({^a_dim, _, ^b_dim}, &1) or match?({^a_dim, ^b_dim, _}, &1)))},
{:result_dim, result_dim} <- {:result_dim, (if b_dim == result_dim_1, do: result_dim_2, else: result_dim_1)},
{:result_unit, [{result_unit, _}]} <- {
:result_unit,
@units
|> Map.to_list()
|> Enum.filter(&match?({_, {^result_dim, 1.0, _}}, &1)) # Coefficient of base unit for a dimension is 1.0
} do
{(a_val * a_cf) / (b_val * b_cf), result_unit}
else
{:a_conv_details, _} -> {:error, "Unknown unit '#{Atom.to_string(a_unit)}' for left operand of /."}
{:b_conv_details, _} -> {:error, "Unknown unit '#{Atom.to_string(b_unit)}' for right operand of /."}
{:result_dim_opts, _} -> {:error, "Could not resolve result dimension when dividing '#{Atom.to_string(a_unit)}' by '#{Atom.to_string(b_unit)}'."}
{:result_unit, _} -> {:error, "Could not resolve result unit when dividing '#{Atom.to_string(a_unit)}' by '#{Atom.to_string(b_unit)}'."}
end
end
end
|
lib/unit.ex
| 0.798776 | 0.839405 |
unit.ex
|
starcoder
|
defmodule ExqLimit.Global do
@moduledoc """
Exq comes with concurrency control support per queue, but it's
limited to a single worker node. This module on the other hand
limits the concurrency globally across all the worker nodes. For
example, with a limit of 10, if there are two active worker nodes,
each will be allowed to work on 5 concurrent jobs. The limits per
worker node will get auto adjusted when new worker nodes get added
or existing ones get removed.
### Options
- `limit` (integer) - Global max concurrency across all the worker nodes. Required field
- `node_id` (string) - Unique id of the worker node. Defaults to Exq node identifier.
- `interval` (integer - milliseconds) - The availability of each node is determined by the last hearbeat. The interval controls how often the node registers the hearbeat. Defaults to 20_000.
- `missed_heartbeats_allowed` (integer) - Number of hearbeats a node is allowed to miss. After which, the node will be considered dead and its capacity will be redistributed to remaining worker nodes. Defaults to 10.
### NOTES
This implementations tries to never run more than the configured
limit. But the limit could be crossed if a node could not
communicate with redis server, but able to continue with current
running jobs. In those cases, after certain time, the node will be
considered dead and its capacity will be shared across remaining
nodes.
"""
require Logger
alias ExqLimit.Redis.Script
require ExqLimit.Redis.Script
@behaviour Exq.Dequeue.Behaviour
Script.compile(:rebalance)
Script.compile(:drain)
Script.compile(:fill)
Script.compile(:heartbeat)
Script.compile(:clear)
defmodule State do
@moduledoc false
defstruct limit: nil,
running: 0,
allowed: 0,
current: 0,
redis: nil,
node_id: nil,
version: nil,
queue: nil,
interval: nil,
cutoff_threshold: nil,
last_synced: nil,
mode: :rebalance,
version_key: nil,
limit_key: nil,
allocation_key: nil,
heartbeat_key: nil
end
@version "limit_global_v1"
@impl true
def init(%{queue: queue}, options) do
interval = Keyword.get(options, :interval, 20_000)
missed_heartbeats_allowed = Keyword.get(options, :missed_heartbeats_allowed, 10)
namespace =
Keyword.get_lazy(options, :namespace, fn ->
Exq.Support.Config.get(:namespace)
end)
prefix = "#{namespace}:#{@version}:#{queue}:"
state = %State{
interval: interval,
cutoff_threshold: interval / 1000 * (missed_heartbeats_allowed + 1),
limit: Keyword.fetch!(options, :limit),
redis:
Keyword.get_lazy(options, :redis, fn ->
Exq.Support.Config.get(:name)
|> Exq.Support.Opts.redis_client_name()
end),
node_id:
Keyword.get_lazy(options, :node_id, fn ->
Exq.Support.Config.node_identifier().node_id()
end),
queue: queue,
version_key: prefix <> "version",
limit_key: prefix <> "limit",
allocation_key: prefix <> "allocation",
heartbeat_key: prefix <> "heartbeat"
}
state = sync(state)
{:ok, state}
end
@impl true
def stop(state) do
sync(%{state | mode: :clear})
:ok
end
@impl true
def available?(state) do
state = sync(state)
{:ok, state.running < Enum.min([state.allowed, state.current]), state}
end
@impl true
def dispatched(state), do: {:ok, %{state | running: state.running + 1}}
@impl true
def processed(state), do: {:ok, %{state | running: state.running - 1}}
@impl true
def failed(state), do: {:ok, %{state | running: state.running - 1}}
defp sync(state) do
now = System.system_time(:millisecond)
state =
if state.mode != :clear && state.last_synced && now - state.last_synced < state.interval do
state
else
time = now / 1000
state = %{state | last_synced: now}
case state.mode do
:clear ->
clear(state, time)
:rebalance ->
rebalance(state, time)
:drain ->
drain(state, time)
:fill ->
fill(state, time)
:heartbeat ->
heartbeat(state, time)
end
end
:telemetry.execute(
[:exq_limit, :global],
%{running: state.running, quota: state.current},
%{queue: state.queue}
)
state
end
defp heartbeat(state, time) do
case Script.eval!(
state.redis,
@heartbeat,
[
state.version_key,
state.heartbeat_key
],
[state.node_id, state.version, time, time - state.cutoff_threshold]
) do
{:ok, 0} ->
rebalance(state, time)
{:ok, 1} ->
state
error ->
Logger.error(
"Failed to run hearbeat script. Unexpected error from redis: #{inspect(error)}"
)
state
end
end
defp fill(state, time) do
case Script.eval!(
state.redis,
@fill,
[
state.version_key,
state.allocation_key,
state.heartbeat_key
],
[state.node_id, state.version, time, time - state.cutoff_threshold]
) do
{:ok, 0} ->
rebalance(state, time)
{:ok, [allowed, current]} ->
%{state | allowed: allowed, current: current, mode: next_mode(allowed, current)}
error ->
Logger.error("Failed to run fill script. Unexpected error from redis: #{inspect(error)}")
state
end
end
defp drain(state, time) do
amount = Enum.min([state.current - state.allowed, state.current - state.running])
if amount == 0 do
heartbeat(state, time)
else
case Script.eval!(
state.redis,
@drain,
[
state.version_key,
state.allocation_key,
state.heartbeat_key
],
[state.node_id, state.version, time, time - state.cutoff_threshold, amount]
) do
{:ok, 0} ->
rebalance(state, time)
{:ok, 1} ->
current = state.current - amount
%{state | current: current, mode: next_mode(state.allowed, current)}
error ->
Logger.error(
"Failed to run drain script. Unexpected error from redis: #{inspect(error)}"
)
state
end
end
end
defp rebalance(state, time) do
case Script.eval!(
state.redis,
@rebalance,
[
state.version_key,
state.limit_key,
state.allocation_key,
state.heartbeat_key
],
[state.node_id, state.limit, time, time - state.cutoff_threshold]
) do
{:ok, [version, allowed, current]} ->
%{
state
| version: version,
allowed: allowed,
current: current,
mode: next_mode(allowed, current)
}
error ->
Logger.error(
"Failed to run rebalance script. Unexpected error from redis: #{inspect(error)}"
)
state
end
end
defp clear(state, _time) do
case Script.eval!(
state.redis,
@clear,
[
state.version_key,
state.limit_key,
state.allocation_key,
state.heartbeat_key
],
[state.node_id]
) do
{:ok, 1} ->
state
error ->
Logger.error("Failed to run clear script. Unexpected error from redis: #{inspect(error)}")
state
end
end
defp next_mode(allowed, current) do
cond do
current > allowed -> :drain
current < allowed -> :fill
current == allowed -> :heartbeat
end
end
end
|
lib/exq_limit/global.ex
| 0.869701 | 0.63576 |
global.ex
|
starcoder
|
defmodule Harmonex.Pitch do
@moduledoc """
Provides functions for working with pitches on the Western dodecaphonic scale.
"""
alias Harmonex.Interval
defstruct natural_name: nil, accidental: :natural, octave: nil
@typedoc """
A `Harmonex.Pitch` struct.
"""
@type pitch :: %Harmonex.Pitch{natural_name: natural_name,
accidental: accidental,
octave: octave}
@typedoc """
An expression describing a pitch.
"""
@type t :: t_map | t_atom
@typedoc """
A map expression describing a pitch.
"""
@type t_map :: %{natural_name: natural_name, accidental: accidental, octave: octave} |
%{natural_name: natural_name, accidental: accidental} |
%{natural_name: natural_name, octave: octave} |
%{natural_name: natural_name} |
pitch
@typedoc """
An atom expression describing a pitch. Can be a `t:natural_name/0`, or a
`t:natural_name/0` joined by underscore with an `t:accidental/0` (e.g.,
`:a_flat`).
"""
@type t_atom :: natural_name | atom
@typedoc """
The name of a pitch whose accidental is ♮ (natural).
"""
@type natural_name :: :c | :d | :e | :f | :g | :a | :b
@position_by_natural_name [c: 0, d: 2, e: 4, f: 5, g: 7, a: 9, b: 11]
@natural_names @position_by_natural_name |> Keyword.keys
@natural_names_count @natural_names |> length
@typedoc """
The alteration of a pitch from its natural value.
"""
@type accidental :: :natural | :flat | :sharp | :double_flat | :double_sharp
@accidental_by_offset [double_flat: -2,
flat: -1,
natural: 0,
sharp: 1,
double_sharp: 2]
@accidentals @accidental_by_offset |> Keyword.keys
@typedoc """
The numeric element of scientific pitch notation.
"""
@type octave :: integer | nil
@invalid_name "Invalid pitch name -- must be in #{inspect Enum.sort(@natural_names)}"
@invalid_accidental_or_octave "Invalid accidental or octave -- must be in #{inspect @accidentals} or be an integer"
@invalid_accidental "Invalid accidental -- must be in #{inspect @accidentals}"
@invalid_octave "Invalid octave -- must be an integer"
@doc """
Computes the accidental of the specified `pitch`.
## Examples
iex> Harmonex.Pitch.accidental %{natural_name: :a, accidental: :flat, octave: 6}
:flat
iex> Harmonex.Pitch.accidental %{natural_name: :a}
:natural
iex> Harmonex.Pitch.accidental :a_flat
:flat
iex> Harmonex.Pitch.accidental :a
:natural
"""
@spec accidental(t) :: accidental | Harmonex.error
def accidental(pitch) do
with %{accidental: pitch_accidental} <- pitch |> new do
pitch_accidental
end
end
@doc """
**(DEPRECATED)** Computes a pitch that is the sum of the specified `pitch` and
the specified `adjustment` in semitones.
## Examples
iex> Harmonex.Pitch.adjust_by_semitones %{natural_name: :a, accidental: :sharp}, 14
%Harmonex.Pitch{natural_name: :c, accidental: :natural}
iex> Harmonex.Pitch.adjust_by_semitones :b_flat, -2
:g_sharp
iex> Harmonex.Pitch.adjust_by_semitones :c, 0
:c_natural
"""
@spec adjust_by_semitones(t_map, integer) :: pitch | Harmonex.error
def adjust_by_semitones(pitch, adjustment) when is_map(pitch) do
with pitch_name when is_atom(pitch_name) <- name(pitch) do
pitch_name |> adjust_by_semitones(adjustment) |> new
end
end
@spec adjust_by_semitones(t_atom, integer) :: t_atom | Harmonex.error
def adjust_by_semitones(pitch, adjustment) do
with pitch_name when is_atom(pitch_name) <- name(pitch) do
(position(pitch_name) + adjustment) |> Integer.mod(12)
|> names_at
|> Enum.sort_by(&complexity_score/1)
|> List.first
end
end
@doc """
Computes the pitch class corresponding to the specified `pitch`.
## Examples
iex> Harmonex.Pitch.class %{natural_name: :a, accidental: :flat, octave: 6}
%Harmonex.Pitch{natural_name: :a, accidental: :flat}
iex> Harmonex.Pitch.class %{natural_name: :a}
%Harmonex.Pitch{natural_name: :a, accidental: :natural}
iex> Harmonex.Pitch.class :a_flat
:a_flat
iex> Harmonex.Pitch.class :a
:a_natural
"""
@spec class(t_map) :: pitch | Harmonex.error
def class(pitch) when is_map(pitch) do
with pitch_name when is_atom(pitch_name) <- pitch |> name do
pitch_name |> new
end
end
@spec class(t_atom) :: t_atom | Harmonex.error
def class(pitch), do: pitch |> name
@doc """
Determines if the specified `pitch` represents a pitch class.
## Examples
iex> Harmonex.Pitch.class? %{natural_name: :a, accidental: :flat, octave: 6}
false
iex> Harmonex.Pitch.class? %{natural_name: :a}
true
iex> Harmonex.Pitch.class? :a_flat
true
iex> Harmonex.Pitch.class? :a
true
"""
@spec class?(t) :: boolean | Harmonex.error
def class?(pitch) do
case pitch |> octave do
pitch_octave when is_integer(pitch_octave) -> false
pitch_octave when is_nil(pitch_octave) -> true
other -> other
end
end
@doc """
Enharmonically compares the specified `pitch1` and `pitch2`.
It returns:
* `:eq` if they are identical or enharmonically equivalent
* `:lt` if `pitch1` is enharmonically lower
* `:gt` if `pitch1` is enharmonically higher
If either specified pitch is missing an octave (see `octave/1`) then octaves
are ignored and the smaller of the two intervals between them is measured.
## Examples
iex> Harmonex.Pitch.compare %{natural_name: :a, octave: 4}, %{natural_name: :a, octave: 4}
:eq
iex> Harmonex.Pitch.compare %{natural_name: :b, accidental: :sharp, octave: 5}, %{natural_name: :c, octave: 6}
:eq
iex> Harmonex.Pitch.compare %{natural_name: :b, accidental: :sharp, octave: 5}, %{natural_name: :c, octave: 5}
:gt
iex> Harmonex.Pitch.compare %{natural_name: :g, accidental: :sharp}, :a_flat
:eq
iex> Harmonex.Pitch.compare :c_flat, %{natural_name: :a, accidental: :double_sharp, octave: 2}
:eq
iex> Harmonex.Pitch.compare :a, :a_sharp
:lt
#{ # TODO: Make this pass using `Interval.invert/1`
# iex> Harmonex.Pitch.compare :a, :d_sharp
# :lt
}
iex> Harmonex.Pitch.compare :a, :d_double_sharp
:gt
iex> Harmonex.Pitch.compare :a, :a
:eq
"""
@spec compare(t, t) :: Harmonex.comparison | Harmonex.error
def compare(pitch1, pitch2) do
with pitch1_struct when is_map(pitch1_struct) <- new(pitch1),
pitch2_struct when is_map(pitch2_struct) <- new(pitch2) do
{comparison, _} = compare_with_semitones(pitch1, pitch2)
comparison
end
end
@doc """
Determines whether the specified `pitch1` and `pitch2` are enharmonically
equivalent.
If either specified pitch is missing an octave (see `octave/1`) then octaves
are ignored.
## Examples
iex> Harmonex.Pitch.enharmonic? %{natural_name: :a, octave: 4}, %{natural_name: :a, octave: 4}
true
iex> Harmonex.Pitch.enharmonic? %{natural_name: :b, accidental: :sharp, octave: 5}, %{natural_name: :c, octave: 6}
true
iex> Harmonex.Pitch.enharmonic? %{natural_name: :b, accidental: :sharp, octave: 5}, %{natural_name: :c, octave: 5}
false
iex> Harmonex.Pitch.enharmonic? %{natural_name: :g, accidental: :sharp}, :a_flat
true
iex> Harmonex.Pitch.enharmonic? :c_flat, %{natural_name: :a, accidental: :double_sharp, octave: 2}
true
iex> Harmonex.Pitch.enharmonic? :a, :a_sharp
false
iex> Harmonex.Pitch.enharmonic? :a, :a
true
"""
@spec enharmonic?(t, t) :: boolean | Harmonex.error
def enharmonic?(pitch1, pitch2) do
with semitones when is_integer(semitones) <- semitones(pitch1, pitch2) do
semitones == 0
end
end
@doc """
Computes the enharmonic equivalents of the specified `pitch`.
## Examples
iex> Harmonex.Pitch.enharmonics %{natural_name: :g, accidental: :sharp, octave: 6}
[%Harmonex.Pitch{natural_name: :a, accidental: :flat, octave: 6}]
iex> Harmonex.Pitch.enharmonics %{natural_name: :a, accidental: :double_sharp, octave: 5}
[%Harmonex.Pitch{natural_name: :b, accidental: :natural, octave: 5}, %Harmonex.Pitch{natural_name: :c, accidental: :flat, octave: 6}]
iex> Harmonex.Pitch.enharmonics :f_double_sharp
[:g_natural, :a_double_flat]
iex> Harmonex.Pitch.enharmonics :c
[:b_sharp, :d_double_flat]
"""
@spec enharmonics(t_map) :: [pitch] | Harmonex.error
def enharmonics(pitch) when is_map(pitch) do
with pitch_name when is_atom(pitch_name) <- name(pitch) do
pitch_octave = octave(pitch)
pitch_natural_name = natural_name(pitch)
pitch_name |> position
|> Integer.mod(12)
|> names_at
|> Enum.reject(&(&1 == pitch_name))
|> Enum.map(fn enharmonic_name ->
if pitch_octave |> is_nil do
enharmonic_name |> new
else
octave = if (pitch_natural_name in [:a, :b]) &&
(natural_name(enharmonic_name) in [:c, :d]) do
pitch_octave + 1
else
pitch_octave
end
enharmonic_name |> new(octave)
end
end)
end
end
@spec enharmonics(t_atom) :: [t_atom] | Harmonex.error
def enharmonics(pitch) do
with pitch_struct when is_map(pitch_struct) <- new(pitch) do
pitch_struct |> enharmonics |> Enum.map(&name/1)
end
end
@doc """
Computes the interval between the specified `pitch1` and `pitch2`. Equivalent
to `Harmonex.Interval.from_pitches/2`.
If either specified pitch is missing an octave (see `octave/1`) then octaves
are ignored and the smaller of the two intervals between them is computed.
## Examples
iex> Harmonex.Pitch.interval %{natural_name: :a, accidental: :sharp, octave: 4}, %{natural_name: :c, octave: 6}
%Harmonex.Interval{quality: :diminished, size: 10}
iex> Harmonex.Pitch.interval :a_sharp, :c
%Harmonex.Interval{quality: :diminished, size: 3}
iex> Harmonex.Pitch.interval :d_double_sharp, :a_double_sharp
%Harmonex.Interval{quality: :perfect, size: 4}
iex> Harmonex.Pitch.interval :c_flat, :c_natural
%Harmonex.Interval{quality: :augmented, size: 1}
iex> Harmonex.Pitch.interval :a_flat, :e_sharp
%Harmonex.Interval{quality: :doubly_diminished, size: 4}
iex> Harmonex.Pitch.interval :a_flat, :e_double_sharp
{:error, "Invalid interval"}
"""
@spec interval(t, t) :: Interval.interval | Harmonex.error
defdelegate interval(pitch1, pitch2), to: Interval, as: :from_pitches
@doc """
Computes the full name of the specified `pitch`, combining its natural name and
its accidental.
## Examples
iex> Harmonex.Pitch.name %{natural_name: :a, accidental: :flat}
:a_flat
iex> Harmonex.Pitch.name %{natural_name: :a}
:a_natural
iex> Harmonex.Pitch.name :a_flat
:a_flat
iex> Harmonex.Pitch.name :a
:a_natural
"""
@spec name(t) :: t_atom | Harmonex.error
def name(pitch) do
with %{natural_name: pitch_natural_name,
accidental: pitch_accidental} <- new(pitch) do
:"#{pitch_natural_name}_#{pitch_accidental}"
end
end
@doc """
Computes the natural name of the specified `pitch`.
## Examples
iex> Harmonex.Pitch.natural_name %{natural_name: :a, accidental: :flat}
:a
iex> Harmonex.Pitch.natural_name %{natural_name: :a}
:a
iex> Harmonex.Pitch.natural_name :a_flat
:a
iex> Harmonex.Pitch.natural_name :a
:a
"""
@spec natural_name(t) :: natural_name | Harmonex.error
def natural_name(pitch) do
with %{natural_name: pitch_natural_name} <- new(pitch) do
pitch_natural_name
end
end
@doc """
Constructs a new pitch with the specified `name_or_definition`.
## Examples
iex> Harmonex.Pitch.new %{natural_name: :a, accidental: :flat, octave: 6}
%Harmonex.Pitch{natural_name: :a, accidental: :flat, octave: 6}
iex> Harmonex.Pitch.new %{natural_name: :a, accidental: :flat}
%Harmonex.Pitch{natural_name: :a, accidental: :flat}
iex> Harmonex.Pitch.new %{natural_name: :a, octave: 6}
%Harmonex.Pitch{natural_name: :a, accidental: :natural, octave: 6}
iex> Harmonex.Pitch.new %{natural_name: :a}
%Harmonex.Pitch{natural_name: :a, accidental: :natural}
iex> Harmonex.Pitch.new :a_flat
%Harmonex.Pitch{natural_name: :a, accidental: :flat}
iex> Harmonex.Pitch.new :a
%Harmonex.Pitch{natural_name: :a, accidental: :natural}
iex> Harmonex.Pitch.new %{natural_name: :h}
{:error, #{inspect @invalid_name}}
iex> Harmonex.Pitch.new :h
{:error, #{inspect @invalid_name}}
iex> Harmonex.Pitch.new %{natural_name: :a, accidental: :out_of_tune}
{:error, #{inspect @invalid_accidental}}
iex> Harmonex.Pitch.new %{natural_name: :a, accidental: :flat, octave: :not_an_octave}
{:error, #{inspect @invalid_octave}}
"""
@spec new(t) :: pitch | Harmonex.error
@spec new(natural_name, accidental, octave) :: pitch | Harmonex.error
@spec new(natural_name, accidental | octave) :: pitch | Harmonex.error
for natural_name <- @natural_names, accidental <- @accidentals do
def new(%{natural_name: unquote(natural_name)=natural_name,
accidental: unquote(accidental)=accidental,
octave: octave}=_name_or_definition) when is_integer(octave) or
is_nil(octave) do
__MODULE__ |> struct(natural_name: natural_name,
accidental: accidental,
octave: octave)
end
def new(%{natural_name: unquote(natural_name),
accidental: unquote(accidental),
octave: _}=_name_or_definition) do
{:error, @invalid_octave}
end
def new(%{natural_name: unquote(natural_name)=natural_name,
accidental: unquote(accidental)=accidental}=_name_or_definition) do
__MODULE__ |> struct(natural_name: natural_name, accidental: accidental)
end
@doc """
Constructs a new pitch with the specified `natural_name`, `accidental`, and
`octave`.
## Examples
iex> Harmonex.Pitch.new :a, :flat, 6
%Harmonex.Pitch{natural_name: :a, accidental: :flat, octave: 6}
iex> Harmonex.Pitch.new :h, :flat, 6
{:error, #{inspect @invalid_name}}
iex> Harmonex.Pitch.new :a, :out_of_tune, 6
{:error, #{inspect @invalid_accidental}}
iex> Harmonex.Pitch.new :a, :flat, :not_an_octave
{:error, #{inspect @invalid_octave}}
"""
def new(unquote(natural_name)=natural_name,
unquote(accidental)=accidental,
octave) when is_integer(octave) or is_nil(octave) do
new %{natural_name: natural_name, accidental: accidental, octave: octave}
end
def new(unquote(natural_name)=_natural_name,
unquote(accidental)=_accidental,
_octave) do
{:error, @invalid_octave}
end
@doc """
Constructs a new pitch with the specified `name` and `accidental_or_octave`.
## Examples
iex> Harmonex.Pitch.new :a, :flat
%Harmonex.Pitch{natural_name: :a, accidental: :flat}
iex> Harmonex.Pitch.new :a_flat, 6
%Harmonex.Pitch{natural_name: :a, accidental: :flat, octave: 6}
iex> Harmonex.Pitch.new :h, :flat
{:error, #{inspect @invalid_name}}
iex> Harmonex.Pitch.new :a, :out_of_tune
{:error, #{inspect @invalid_accidental_or_octave}}
"""
def new(unquote(natural_name)=name,
unquote(accidental)=accidental_or_octave) do
new %{natural_name: name, accidental: accidental_or_octave}
end
def new(unquote(natural_name)=name,
accidental_or_octave) when is_integer(accidental_or_octave) or
is_nil(accidental_or_octave) do
new %{natural_name: name, octave: accidental_or_octave}
end
name = :"#{natural_name}_#{accidental}"
def new(unquote(name)=_name, octave) do
new %{natural_name: unquote(natural_name),
accidental: unquote(accidental),
octave: octave}
end
def new(unquote(name)=_name_or_definition) do
new %{natural_name: unquote(natural_name), accidental: unquote(accidental)}
end
end
for natural_name <- @natural_names do
def new(unquote(natural_name)=name_or_definition) do
new %{natural_name: name_or_definition, accidental: :natural}
end
def new(%{natural_name: unquote(natural_name),
accidental: _}=_name_or_definition) do
{:error, @invalid_accidental}
end
def new(%{natural_name: unquote(natural_name)=name, octave: octave}) do
new %{natural_name: name, accidental: :natural, octave: octave}
end
def new(%{natural_name: unquote(natural_name)=name}) do
new %{natural_name: name, accidental: :natural}
end
def new(unquote(natural_name)=_natural_name,
_accidental,
octave) when is_integer(octave) or is_nil(octave) do
{:error, @invalid_accidental}
end
def new(unquote(natural_name)=_name, _accidental_or_octave) do
{:error, @invalid_accidental_or_octave}
end
end
def new(_name_or_definition), do: {:error, @invalid_name}
def new(_name, _accidental_or_octave), do: {:error, @invalid_name}
def new(_natural_name, _accidental, _octave), do: {:error, @invalid_name}
@doc """
Computes the octave of the specified `pitch`. Pitch values having an octave of
`nil` represent pitch classes.
## Examples
iex> Harmonex.Pitch.octave %{natural_name: :a, accidental: :flat, octave: 6}
6
iex> Harmonex.Pitch.octave %{natural_name: :a}
nil
iex> Harmonex.Pitch.octave :a_flat
nil
"""
@spec octave(t) :: octave | Harmonex.error
def octave(pitch) do
with %{octave: pitch_octave} <- new(pitch) do
pitch_octave
end
end
@doc """
Computes the distance in half steps between the specified `pitch1` and
`pitch2`.
If either specified pitch is missing an octave (see `octave/1`) then octaves
are ignored and the smaller of the two intervals between them is measured.
## Examples
iex> Harmonex.Pitch.semitones %{natural_name: :a, accidental: :flat, octave: 4}, %{natural_name: :c, accidental: :sharp, octave: 6}
17
iex> Harmonex.Pitch.semitones %{natural_name: :a, octave: 5}, :d_sharp
6
iex> Harmonex.Pitch.semitones :a, :d_double_sharp
5
iex> Harmonex.Pitch.semitones :c, :c
0
"""
@spec semitones(t, t) :: Interval.semitones | Harmonex.error
def semitones(pitch1, pitch2) do
with pitch1_struct when is_map(pitch1_struct) <- new(pitch1),
pitch2_struct when is_map(pitch2_struct) <- new(pitch2) do
{_, semitones} = compare_with_semitones(pitch1, pitch2)
semitones
end
end
@doc false
@spec staff_positions(t, t) :: 0..3
def staff_positions(pitch1, pitch2) do
pitch1_position = pitch1 |> natural_name |> staff_position
pitch2_position = pitch2 |> natural_name |> staff_position
diff_simple = abs(pitch1_position - pitch2_position) |> Integer.mod(@natural_names_count)
diff_simple_inverse = @natural_names_count - diff_simple
min diff_simple, diff_simple_inverse
end
@spec compare_with_semitones(t, t) :: {Harmonex.comparison, Interval.semitones} | Harmonex.error
defp compare_with_semitones(pitch1, pitch2) do
pitch1_position = position(pitch1)
pitch2_position = position(pitch2)
if is_nil(octave(pitch1)) || is_nil(octave(pitch2)) do
pitch1_position_simple = pitch1_position |> Integer.mod(12)
pitch2_position_simple = pitch2_position |> Integer.mod(12)
positions_diff = abs(pitch1_position_simple - pitch2_position_simple)
semitones = min(positions_diff, 12 - positions_diff)
comparison = cond do
pitch1_position_simple < pitch2_position_simple -> :lt
pitch2_position_simple < pitch1_position_simple -> :gt
:else -> :eq
end
{comparison, semitones}
else
comparison = cond do
pitch1_position < pitch2_position -> :lt
pitch2_position < pitch1_position -> :gt
:else -> :eq
end
semitones = abs(pitch1_position - pitch2_position)
{comparison, semitones}
end
end
@spec complexity_score(t_atom) :: 0..2
for natural_name <- @natural_names do
defp complexity_score(unquote(natural_name)), do: 0
defp complexity_score(unquote(:"#{natural_name}_natural")), do: 0
defp complexity_score(unquote(:"#{natural_name}_flat")), do: 1
defp complexity_score(unquote(:"#{natural_name}_sharp")), do: 1
defp complexity_score(unquote(:"#{natural_name}_double_flat")), do: 2
defp complexity_score(unquote(:"#{natural_name}_double_sharp")), do: 2
end
@spec names_at(0..11) :: [t_atom]
name_list_by_position = @position_by_natural_name |> Enum.reduce(%{},
fn({natural_name, position},
acc) ->
@accidental_by_offset |> Enum.reduce(acc,
fn({accidental, offset}, inner_acc) ->
name = :"#{natural_name}_#{accidental}"
altered_position = Integer.mod(position + offset, 12)
inner_acc |> Map.put_new(altered_position, [])
|> Map.update!(altered_position, &([name | &1]))
end)
end)
for {position, names} <- name_list_by_position do
# Tweak the order of enharmonic groups that wrap around G-A.
sorted_names = names |> Enum.sort(fn(name1, name2) ->
natural_name1 = name1 |> Atom.to_string |> String.at(0)
natural_name2 = name2 |> Atom.to_string |> String.at(0)
cond do
(natural_name1 == "f" && natural_name2 == "a") ||
(natural_name1 == "g" && natural_name2 in ~w( a b )) ->
true
(natural_name2 == "f" && natural_name1 == "a") ||
(natural_name2 == "g" && natural_name1 in ~w( a b )) ->
false
:else ->
name1 <= name2
end
end)
defp names_at(unquote(position)), do: unquote(sorted_names)
end
@spec position(t_atom) :: 0..11
for {natural_name, position} <- @position_by_natural_name do
defp position(unquote(natural_name)), do: unquote(position)
end
for {natural_name, position} <- @position_by_natural_name,
{accidental, offset} <- @accidental_by_offset do
name = :"#{natural_name}_#{accidental}"
defp position(unquote(name)), do: unquote(position + offset)
end
@spec position(t_map) :: integer | Harmonex.error
defp position(pitch) do
pitch_position_simple = pitch |> name |> position
case octave(pitch) do
nil -> pitch_position_simple
pitch_octave -> pitch_position_simple + (pitch_octave * 12)
end
end
@spec staff_position(natural_name) :: 0..6
for {pitch_natural_name, index} <- @natural_names |> Stream.with_index do
defp staff_position(unquote(pitch_natural_name)), do: unquote(index)
end
end
|
lib/harmonex/pitch.ex
| 0.936263 | 0.666239 |
pitch.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.