code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Phoenix.NotAcceptableError do
@moduledoc """
Raised when one of the `accept*` headers is not accepted by the server.
This exception is commonly raised by `Phoenix.Controller.accepts/2`
which negotiates the media types the server is able to serve with
the contents the client is able to render.
If you are seeing this error, you should check if you are listing
the desired formats in your `:accepts` plug or if you are setting
the proper accept header in the client. The exception contains the
acceptable mime types in the `accepts` field.
"""
defexception message: nil, accepts: [], plug_status: 406
end
defmodule Phoenix.MissingParamError do
@moduledoc """
Raised when a key is expected to be present in the request parameters,
but is not.
This exception is raised by `Phoenix.Controller.scrub_params/2` which:
* Checks to see if the required_key is present (can be empty)
* Changes all empty parameters to nils ("" -> nil)
If you are seeing this error, you should handle the error and surface it
to the end user. It means that there is a parameter missing from the request.
"""
defexception [:message, plug_status: 400]
def exception([key: value]) do
msg = "expected key #{inspect value} to be present in params, " <>
"please send the expected key or adapt your scrub_params/2 call"
%Phoenix.MissingParamError{message: msg}
end
end
defmodule Phoenix.ActionClauseError do
exception_keys =
FunctionClauseError.__struct__
|> Map.keys()
|> Kernel.--([:__exception__, :__struct__])
defexception exception_keys
def message(exception) do
exception
|> Map.put(:__struct__, FunctionClauseError)
|> FunctionClauseError.message()
end
def blame(exception, stacktrace) do
{exception, stacktrace} =
exception
|> Map.put(:__struct__, FunctionClauseError)
|> FunctionClauseError.blame(exception, stacktrace)
exception = Map.put(exception, :__struct__, __MODULE__)
{exception, stacktrace}
end
end
defimpl Plug.Exception, for: Phoenix.ActionClauseError do
def status(_), do: 400
def actions(_), do: []
end
|
lib/phoenix/exceptions.ex
| 0.764892 | 0.52543 |
exceptions.ex
|
starcoder
|
defmodule Clickhousex.Codec.Values do
alias Clickhousex.Query
def encode(%Query{param_count: 0, type: :insert}, _, []) do
# An insert query's arguments go into the post body and the query part goes into the query string.
# If we don't have any arguments, we don't have to encode anything, but we don't want to return
# anything here because we'll duplicate the query into both the query string and post body
""
end
def encode(%Query{param_count: 0, statement: statement}, _, []) do
statement
end
def encode(%Query{param_count: 0}, _, _) do
raise ArgumentError, "Extra params! Query doesn't contain '?'"
end
def encode(%Query{param_count: param_count} = query, query_text, params) do
if length(params) != param_count do
raise ArgumentError,
"The number of parameters does not correspond to the number of question marks!"
end
query_parts = String.split(query_text, "?")
weave(query, query_parts, params)
end
defp weave(query, query_parts, params) do
weave(query, query_parts, params, [])
end
defp weave(_query, [part], [], acc) do
Enum.reverse([part | acc])
end
defp weave(query, [part | parts], [param | params], acc) do
weave(query, parts, params, [encode_param(query, param), part | acc])
end
@doc false
defp encode_param(query, param) when is_list(param) do
values = Enum.map_join(param, ",", &encode_param(query, &1))
case query.type do
:select ->
# We pass lists to in clauses, and they shouldn't have brackets around them.
values
_ ->
"[" <> values <> "]"
end
end
defp encode_param(_query, param) when is_integer(param) do
Integer.to_string(param)
end
defp encode_param(_query, true) do
"1"
end
defp encode_param(_query, false) do
"0"
end
defp encode_param(_query, param) when is_float(param) do
to_string(param)
end
defp encode_param(_query, param) when is_float(param) do
to_string(param)
end
defp encode_param(_query, nil) do
"NULL"
end
defp encode_param(_query, %DateTime{} = datetime) do
iso_date =
datetime
|> DateTime.truncate(:second)
|> DateTime.to_iso8601()
|> String.replace("Z", "")
"'#{iso_date}'"
end
defp encode_param(_query, %NaiveDateTime{} = naive_datetime) do
naive =
naive_datetime
|> NaiveDateTime.truncate(:second)
|> NaiveDateTime.to_iso8601()
"'#{naive}'"
end
defp encode_param(_query, %Date{} = date) do
"'#{Date.to_iso8601(date)}'"
end
defp encode_param(_query, param) do
"'" <> escape(param) <> "'"
end
defp escape(s) do
s
|> String.replace("_", "\_")
|> String.replace("'", "\'")
|> String.replace("%", "\%")
|> String.replace(~s("), ~s(\\"))
|> String.replace("\\", "\\\\")
end
end
|
lib/clickhousex/codec/values.ex
| 0.660282 | 0.535888 |
values.ex
|
starcoder
|
defmodule Bonny.Controller do
@moduledoc """
`Bonny.Controller` defines controller behaviours and generates boilerplate for generating Kubernetes manifests.
> A custom controller is a controller that users can deploy and update on a running cluster, independently of the cluster’s own lifecycle. Custom controllers can work with any kind of resource, but they are especially effective when combined with custom resources. The Operator pattern is one example of such a combination. It allows developers to encode domain knowledge for specific applications into an extension of the Kubernetes API.
Controllers allow for simple `add`, `modify`, `delete`, and `reconcile` handling of custom resources in the Kubernetes API.
"""
@callback add(map()) :: :ok | :error
@callback modify(map()) :: :ok | :error
@callback delete(map()) :: :ok | :error
@callback reconcile(map()) :: :ok | :error
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
Module.register_attribute(__MODULE__, :rule, accumulate: true)
@behaviour Bonny.Controller
@client opts[:client] || K8s.Client
# CRD defaults
@group Bonny.Config.group()
@kind Bonny.Naming.module_to_kind(__MODULE__)
@scope :namespaced
@version Bonny.Naming.module_version(__MODULE__)
@singular Macro.underscore(Bonny.Naming.module_to_kind(__MODULE__))
@plural "#{@singular}s"
@names %{}
@additional_printer_columns []
@before_compile Bonny.Controller
use Supervisor
def start_link(_) do
Supervisor.start_link(__MODULE__, %{}, name: __MODULE__)
end
@impl true
def init(_init_arg) do
children = [
{__MODULE__.WatchServer, name: __MODULE__.WatchServer},
{__MODULE__.ReconcileServer, name: __MODULE__.ReconcileServer}
]
Supervisor.init(children, strategy: :one_for_one)
end
@doc false
@spec client() :: any()
def client(), do: @client
end
end
@doc false
defmacro __before_compile__(env) do
controller = env.module
quote bind_quoted: [controller: controller] do
defmodule WatchServer do
@moduledoc "Controller watcher implementation"
use Bonny.Server.Watcher
@impl Bonny.Server.Watcher
defdelegate add(resource), to: controller
@impl Bonny.Server.Watcher
defdelegate modify(resource), to: controller
@impl Bonny.Server.Watcher
defdelegate delete(resource), to: controller
@impl Bonny.Server.Watcher
defdelegate watch_operation(), to: controller, as: :list_operation
end
defmodule ReconcileServer do
@moduledoc "Controller reconciler implementation"
use Bonny.Server.Reconciler, frequency: 30
@impl Bonny.Server.Reconciler
defdelegate reconcile(resource), to: controller
@impl Bonny.Server.Reconciler
defdelegate reconcile_operation(), to: controller, as: :list_operation
end
@doc """
Returns the `Bonny.CRD.t()` the controller manages the lifecycle of.
"""
@spec crd() :: %Bonny.CRD{}
def crd() do
%Bonny.CRD{
group: @group,
scope: @scope,
version: @version,
names: Map.merge(default_names(), @names),
additional_printer_columns: additional_printer_columns()
}
end
@spec list_operation() :: K8s.Operation.t()
def list_operation() do
crd = __MODULE__.crd()
api_version = Bonny.CRD.api_version(crd)
kind = Bonny.CRD.kind(crd)
client = __MODULE__.client()
case crd.scope do
:namespaced -> client.list(api_version, kind, namespace: Bonny.Config.namespace())
_ -> client.list(api_version, kind)
end
end
@doc """
A list of RBAC rules that this controller needs to operate.
This list will be serialized into the operator manifest when using `mix bonny.gen.manifest`.
"""
@spec rules() :: list(map())
def rules() do
Enum.reduce(@rule, [], fn {api, resources, verbs}, acc ->
rule = %{
apiGroups: [api],
resources: resources,
verbs: verbs
}
[rule | acc]
end)
end
@spec default_names() :: map()
defp default_names() do
%{
plural: @plural,
singular: @singular,
kind: @kind,
shortNames: nil
}
end
@spec additional_printer_columns() :: list(map())
defp additional_printer_columns() do
case @additional_printer_columns do
[] -> []
any -> @additional_printer_columns ++ Bonny.CRD.default_columns()
end
end
end
end
end
|
lib/bonny/controller.ex
| 0.893031 | 0.430866 |
controller.ex
|
starcoder
|
defmodule Formex.Form do
alias __MODULE__
alias Formex.Field
alias Formex.Button
alias Formex.FormNested
alias Formex.FormCollection
@doc """
Defines the Formex.Form struct.
* `:type` - the module that implements `Formex.Type`, for example: `App.ArticleType`
* `:struct` - the struct of your data, for example: `%App.Article{}`
* `:new_struct` - the `:struct` with `:params` applied
* `:struct_module` - `struct.__struct__`, for example: `App.Article`
* `:struct_info` - additional info about struct, that can differs between implementations
of `Formex.BuilderProtocol`
* `:items` - list of `Formex.Field`, `Formex.Button`, `Formex.FormCollection` and
`Formex.FormNested` structs
* `:params` - sent parameters, passed by `Plug.Conn`
* `:mapped_params` - `:params` prepared to create a `:new_struct` or to create a
changeset by `Formex.Ecto`
* `:phoenix_form` - `%Phoenix.HTML.Form{}`
* `:template` - the module that implements `Formex.Template`, for example:
`Formex.Template.BootstrapHorizontal`. Can be set via a `Formex.View.formex_form_for` options
* `:template_options`
* `:method` - `:post`, `:put` etc. May be used by `Formex.View`.
E.g. `Formex.Ecto.Builder` sets here `:put` if we editing `struct`, `:post` otherwise.
* `:submitted?` - is form submitted? Set by `Formex.Controller.handle_form/1`
* `:opts` - additional data passed in a controller. See: `Formex.Builder.create_form/5`
* `:valid?`
* `:errors`
"""
defstruct type: nil,
struct: nil,
new_struct: nil,
struct_module: nil,
struct_info: nil,
valid?: false,
items: [],
params: %{},
phoenix_form: nil,
template: nil,
method: nil,
submitted?: false,
opts: [],
errors: [],
template_options: nil,
mapped_params: %{}
@type t :: %Form{}
@doc """
Adds field to the form. More: `Formex.Field.create_field/4`, `Button.create_button/3`
"""
@spec put_item(form :: t, item :: any) :: t
def put_item(form, item) do
items = form.items ++ [item]
Map.put(form, :items, items)
end
@doc """
Returns list of `t:Formex.Field.t/0`
"""
@spec get_fields(form :: t) :: list
def get_fields(form) do
form.items
|> Enum.filter(&(&1.__struct__ == Field))
end
@doc """
Returns list of items which user can control (all except the `Button`)
"""
@spec get_fields_controllable(form :: t) :: list
def get_fields_controllable(form) do
form.items
|> Enum.filter(&is_controllable/1)
end
@doc """
Is controllable (all except the `Button`)
"""
@spec is_controllable(item :: any) :: boolean
def is_controllable(item) do
item.__struct__ != Button
end
@doc """
Returns list of items which can be validated (alias for `get_fields_controllable/1`)
"""
@spec get_fields_validatable(form :: t) :: list
def get_fields_validatable(form) do
get_fields_controllable(form)
end
@doc """
Returns list of `t:Formex.FormNested.t/0` and `t:Formex.FormCollection.t/0`
"""
@spec get_subforms(form :: t) :: list
def get_subforms(form) do
form.items
|> Enum.filter(&(&1.__struct__ == FormNested || &1.__struct__ == FormCollection))
end
@doc """
Finds form item by name
"""
@spec find(form :: t, name :: atom) :: list
def find(form, name) do
form.items
|> Enum.find(&(&1.name == name))
end
@doc """
Finds form item by struct name
"""
@spec find_by_struct_name(form :: t, name :: atom) :: list
def find_by_struct_name(form, name) do
form.items
|> Enum.find(&(&1.struct_name == name))
end
@doc """
Finds form item by `name` and returns `struct_name`
"""
@spec get_struct_name_by_name(form :: t, name :: atom) :: atom
def get_struct_name_by_name(form, name) do
form
|> find(name)
|> Map.get(:struct_name)
end
@doc """
Finds form item by `struct_name` and returns `name`
"""
@spec get_name_by_struct_name(form :: t, struct_name :: atom) :: atom
def get_name_by_struct_name(form, struct_name) do
form.items
|> Enum.find(&(&1.struct_name == struct_name))
|> Map.get(:name)
end
@doc """
Returns list of `t:Formex.FormNested.t/0`
"""
@spec get_nested(form :: t) :: list
def get_nested(form) do
form.items
|> Enum.filter(&(&1.__struct__ == FormNested))
end
@doc """
Returns list of `t:Formex.FormCollection.t/0`
"""
@spec get_collections(form :: t) :: list
def get_collections(form) do
form.items
|> Enum.filter(&(&1.__struct__ == FormCollection))
end
@doc """
Returns list of names of items with changed name (`item.name` != `item.struct_name`)
"""
@spec get_items_with_changed_name(form :: t) :: list
def get_items_with_changed_name(form) do
form
|> get_fields_controllable
|> Enum.filter(&(&1.name != &1.struct_name))
|> Enum.map(& &1.name)
end
@doc false
@spec start_creating(form :: Form.t(), type :: any, name :: Atom.t(), opts :: Map.t()) :: Form.t()
def start_creating(form, type, name, opts \\ []) do
info = form.struct_info[name]
if is_tuple(info) && elem(info, 0) == :collection do
Formex.FormCollection.start_creating(form, type, name, opts)
else
Formex.FormNested.start_creating(form, type, name, opts)
end
end
@doc false
@spec finish_creating(form :: Form.t()) :: Form.t()
def finish_creating(form) do
new_items =
form.items
|> Enum.map(fn item ->
case item do
%FormCollection{} ->
FormCollection.finish_creating(form, item)
%FormNested{} ->
FormNested.finish_creating(form, item)
_ ->
item
end
end)
form
|> Map.put(:items, new_items)
end
@doc false
@spec get_assoc_or_embed(form :: Form.t(), name :: Atom.t()) :: any
def get_assoc_or_embed(form, name) do
if is_assoc(form, name) do
form.struct_module.__schema__(:association, name)
else
form.struct_module.__schema__(:embed, name)
end
end
@doc false
@spec is_assoc(form :: Form.t(), name :: Atom.t()) :: boolean
def is_assoc(form, name) do
form.struct_module.__schema__(:association, name) != nil
end
# applies function for every select field
@doc false
@spec modify_selects_recursively(form :: Form.t(), fun :: (Field.t(), Form.t() -> Field.t())) ::
Form.t()
def modify_selects_recursively(form, fun) do
form_items =
Enum.map(form.items, fn item ->
case item do
collection = %FormCollection{} ->
forms =
collection.forms
|> Enum.map(fn nested ->
form = modify_selects_recursively(nested.form, fun)
%{nested | form: form}
end)
%{collection | forms: forms}
nested = %FormNested{} ->
%{nested | form: modify_selects_recursively(nested.form, fun)}
field = %Field{} ->
if field.type in [:select, :multiple_select] do
fun.(form, field)
else
field
end
_ ->
item
end
end)
Map.put(form, :items, form_items)
end
end
|
lib/formex/form.ex
| 0.857351 | 0.44903 |
form.ex
|
starcoder
|
defmodule Regex do
@moduledoc %B"""
Regular expressions for Elixir built on top of the re module
in the Erlang Standard Library. More information can be found
on re documentation: http://www.erlang.org/doc/man/re.html
Regular expressions in Elixir can be created using Regex.compile!
or using the special form with `%r`:
# A simple regular expressions that matches foo anywhere in the string
%r/foo/
# A regular expression with case insensitive options and handle unicode chars
%r/foo/iu
The re module provides several options, the one available in Elixir, followed by
their shortcut in parenthesis, are:
* unicode (u) - enable unicode specific patterns like \p
* caseless (i) - add case insensitivity
* dotall (s) - causes dot to match newlines and also set newline to anycrlf.
The new line setting can be overwritten by setting `(*CR)` or `(*LF)` or
`(*CRLF)` or `(*ANY)` according to re documentation
* multiline (m) - causes `^` and `$` to mark the beginning and end of each line.
You need to use `\A` and `\z` to match the end or beginning of the string
* extended (x) - whitespace characters are ignored except when escaped and
allow `#` to delimit comments
* firstline (f) - forces the unanchored pattern to match before or at the first
newline, though the matched text may continue over the newline
* ungreedy (r) - invert the "greediness" of the regexp
* groups (g) - compile with info about groups available
The options not available are:
* anchored - not available, use `^` or `\A` instead
* dollar_endonly - not available, use `\z` instead
* no_auto_capture - not available, use `?:` instead
* newline - not available, use `(*CR)` or `(*LF)` or `(*CRLF)` or `(*ANYCRLF)`
or `(*ANY)` at the beginning of the regexp according to the re documentation
Most of the functions in this module accept either a binary or a char list
as subject. The result is based on the argument (a binary will return
a binary, a char list will return a char list).
"""
defrecordp :regex, [:re_pattern, :source, :options, :groups]
@type t :: { Regex, term, term, term, term }
defexception CompileError, message: "regex could not be compiled"
@doc """
Compiles the regular expression according to the given options.
It returns `{ :ok, regex }` in case of success,
`{ :error, reason }` otherwise.
"""
def compile(source, options // "") when is_binary(source) do
options = to_binary(options)
opts = translate_options(options)
re_opts = opts -- [:groups]
groups = if opts != re_opts, do: parse_groups(source)
case :re.compile(source, re_opts) do
{ :ok, re_pattern } ->
{ :ok, regex(re_pattern: re_pattern, source: source, options: options, groups: groups) }
error ->
error
end
end
@doc """
Compiles the regular expression according to the given options.
Fails with `Regex.CompileError` if the regex cannot be compiled.
"""
def compile!(source, options // "") do
case compile(source, options) do
{ :ok, regex } -> regex
{ :error, { reason, at } } -> raise Regex.CompileError, message: "#{reason} at position #{at}"
end
end
@doc """
Runs the regular expression against the given string
and returns the index (zero indexes) where the first
match occurs, nil otherwise.
## Examples
iex> Regex.index(%r/c(d)/, "abcd")
2
iex> Regex.index(%r/e/, "abcd")
nil
"""
def index(regex(re_pattern: compiled), string) do
case :re.run(string, compiled, [{ :capture, :first, :index }]) do
:nomatch -> nil
{ :match, [{index,_}] } -> index
end
end
@doc """
Returns a boolean if there was a match or not.
## Examples
iex> Regex.match?(%r/foo/, "foo")
true
iex> Regex.match?(%r/foo/, "bar")
false
"""
def match?(regex(re_pattern: compiled), string) do
:re.run(string, compiled, [{ :capture, :none }]) == :match
end
@doc """
Runs the regular expression against the given string.
It returns a list with all matches, nil if no match ocurred, or []
if it matched, /g was specified, but nothing was captured.
## Examples
iex> Regex.run(%r/c(d)/, "abcd")
["cd", "d"]
iex> Regex.run(%r/e/, "abcd")
nil
"""
def run(regex, string, options // [])
def run(regex(re_pattern: compiled, groups: groups), string, options) do
return = Keyword.get(options, :return, return_for(string))
captures =
case Keyword.get(options, :capture, :all) do
:groups -> groups || raise ArgumentError, message: "regex was not compiled with g"
others -> others
end
case :re.run(string, compiled, [{ :capture, captures, return }]) do
:nomatch -> nil
:match -> []
{ :match, results } -> results
end
end
@doc """
Returns the given captures as a list of tuples.
Requires the regex to be compiled with the groups option.
## Examples
iex> Regex.captures(%r/c(?<foo>d)/g, "abcd")
[foo: "d"]
"""
def captures(regex(groups: groups) = regex, string, options // []) do
unless captures = Keyword.get(options, :capture) do
captures = if groups do
Enum.sort(groups)
else
raise ArgumentError, message: "regex was not compiled with g"
end
options = Keyword.put(options, :capture, captures)
end
results = run(regex, string, options)
if results, do: Enum.zip captures, results
end
@doc """
Returns the underlying `re_pattern` in the regular expression.
"""
def re_pattern(regex(re_pattern: compiled)) do
compiled
end
@doc """
Returns the regex source as binary.
## Examples
iex> Regex.source(%r(foo))
"foo"
"""
def source(regex(source: source)) do
source
end
@doc """
Returns the regex options as a string.
## Examples
iex> Regex.opts(%r(foo)m)
"m"
"""
def opts(regex(options: options)) do
options
end
@doc """
Returns list of named groups in regex.
## Examples
iex> Regex.groups(%r/(?<foo>foo)/g)
[:foo]
"""
def groups(regex(groups: groups)) do
groups
end
@doc """
Same as run, but scans the target several times collecting all matches of
the regular expression. A list is returned with each match. If the item in
the list is a binary, it means there were no captures. If the item is another
list, each element in this secondary list is a capture.
## Examples
iex> Regex.scan(%r/c(d|e)/, "abcd abce")
[["d"], ["e"]]
iex> Regex.scan(%r/c(?:d|e)/, "abcd abce")
["cd", "ce"]
iex> Regex.scan(%r/e/, "abcd")
[]
"""
def scan(regex, string, options // [])
def scan(regex(re_pattern: compiled), string, options) do
return = Keyword.get(options, :return, return_for(string))
options = [{ :capture, :all, return }, :global]
case :re.run(string, compiled, options) do
:nomatch -> []
{ :match, results } -> flatten_result(results)
end
end
@doc """
Split the given target in the number of parts specified.
If no ammount of parts is given, it defaults to :infinity.
"""
def split(regex, string, options // [])
def split(regex(re_pattern: compiled), string, options) do
parts =
cond do
Keyword.get(options, :global) == false -> 2
p = Keyword.get(options, :parts) -> p
true -> :infinity
end
return = Keyword.get(options, :return, return_for(string))
opts = [return: return, parts: parts]
:re.split(string, compiled, opts)
end
@doc %B"""
Receives a regex, a binary and a replacement and returns a new
binary where the all matches are replaced by replacement.
Inside the replacement, you can either give "&" to access the
whole regular expression or \N, where N is in integer to access
a specific matching parens. You can also set global to false
if you want to replace just the first occurrence.
## Examples
iex> Regex.replace(%r/d/, "abc", "d")
"abc"
iex> Regex.replace(%r/b/, "abc", "d")
"adc"
iex> Regex.replace(%r/b/, "abc", "[&]")
"a[b]c"
iex> Regex.replace(%r/b/, "abc", "[\\&]")
"a[&]c"
iex> Regex.replace(%r/(b)/, "abc", "[\\1]")
"a[b]c"
"""
def replace(regex(re_pattern: compiled), string, replacement, options // []) do
opts = if Keyword.get(options, :global) != false, do: [:global], else: []
return = Keyword.get(options, :return, return_for(string))
opts = [{ :return, return }|opts]
:re.replace(string, compiled, replacement, opts)
end
# Helpers
@doc false
# Unescape map function used by Macro.unescape_binary.
def unescape_map(?f), do: ?\f
def unescape_map(?n), do: ?\n
def unescape_map(?r), do: ?\r
def unescape_map(?t), do: ?\t
def unescape_map(?v), do: ?\v
def unescape_map(?a), do: ?\a
def unescape_map(_), do: false
# Private Helpers
defp return_for(element) when is_binary(element), do: :binary
defp return_for(element) when is_list(element), do: :list
defp translate_options(<<?u, t :: binary>>), do: [:unicode|translate_options(t)]
defp translate_options(<<?i, t :: binary>>), do: [:caseless|translate_options(t)]
defp translate_options(<<?x, t :: binary>>), do: [:extended|translate_options(t)]
defp translate_options(<<?f, t :: binary>>), do: [:firstline|translate_options(t)]
defp translate_options(<<?r, t :: binary>>), do: [:ungreedy|translate_options(t)]
defp translate_options(<<?s, t :: binary>>), do: [:dotall,{:newline,:anycrlf}|translate_options(t)]
defp translate_options(<<?m, t :: binary>>), do: [:multiline|translate_options(t)]
defp translate_options(<<?g, t :: binary>>), do: [:groups|translate_options(t)]
defp translate_options(<<>>), do: []
defp flatten_result(results) do
lc result inlist results do
case result do
[t] -> t
[_|t] -> t
end
end
end
defp parse_groups(source) do
options = [:global, {:capture, ['G'], :binary}]
{:ok, pattern} = :re.compile(%B"\(\?<(?<G>[^>]*)>")
case :re.run(source, pattern, options) do
:nomatch -> []
{ :match, results } ->
lc [group] inlist results, do: binary_to_atom(group)
end
end
end
|
lib/elixir/lib/regex.ex
| 0.919172 | 0.633736 |
regex.ex
|
starcoder
|
defmodule GenGossip.ClusterState do
alias GenGossip.VectorClock
@opaque t :: %__MODULE__{
owner: term,
metadata: metadata,
mod: atom,
members: [{term, Member.t}],
vector_clock: VectorClock.t
}
@type metadata :: Keyword.t
defmodule Member do
@moduledoc false
@opaque t :: %__MODULE__{
node: term,
metadata: ClusterState.metadata,
status: member_status,
vector_clock: VectorClock.t
}
@type member_status ::
:joining | :valid | :invalid |
:leaving | :exiting | :down
defstruct [:node, :status, :vector_clock, :metadata]
end
defstruct [:owner, :metadata, :mod, :members, :vector_clock]
@spec new(term) :: t
def new(mod) do
cluster_state = struct(__MODULE__, [
owner: node(),
metadata: [],
mod: mod,
members: [],
vector_clock: VectorClock.fresh()
])
end
def add_member(pnode, state, node) do
set_member(pnode, state, node, :joining)
end
def remove_member(pnode, state, node) do
set_member(pnode, state, node, :invalid)
end
def leave_member(pnode, state, node) do
set_member(pnode, state, node, :leaving)
end
def exit_member(pnode, state, node) do
set_member(pnode, state, node, :exiting)
end
def down_member(pnode, state, node) do
set_member(pnode, state, node, :down)
end
defp set_member(node, state, member, status) do
vector_clock = VectorClock.increment(state.vector_clock, node)
updated_state = update_members(node, state, member, status)
struct(updated_state, [vector_clock: vector_clock])
end
defp update_members(node, state, member, status) do
members = :orddict.update(member,
&update_member(&1, status),
default_member(member, status),
state.members)
struct(__MODULE__, [members: members])
end
defp default_member(name, status) do
struct(Member, [vector_clock: VectorClock.fresh(), status: status, node: name])
end
defp update_member(member, status) do
vector_clock = VectorClock.increment(member.vector_clock, member.node)
struct(Member, [vector_clock: vector_clock, status: status])
end
def set_owner(state, node) do
struct(state, [owner: node])
end
end
|
lib/gen_gossip/cluster_state.ex
| 0.681303 | 0.516413 |
cluster_state.ex
|
starcoder
|
defmodule ExfileB2.LocalCache do
@moduledoc """
The manager for ExfileB2's local cache of files.
"""
use GenServer
# In ms, 30 seconds.
@vacuum_interval 30_000
def start_link do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
def fetch(key),
do: GenServer.call(__MODULE__, {:fetch, key})
def store(key, iodata) do
delete(key)
byte_size = :erlang.iolist_size(iodata)
case GenServer.call(__MODULE__, {:store, key, byte_size}) do
{:ok, path} ->
copy_iodata_to_path(iodata, path)
error ->
{:error, error}
end
end
defp copy_iodata_to_path(iodata, path) do
case File.open(path, [:write], &IO.binwrite(&1, iodata)) do
{:ok, _} ->
{:ok, path}
error -> error
end
end
def delete(key),
do: GenServer.call(__MODULE__, {:delete, key})
def size(key),
do: GenServer.call(__MODULE__, {:size, key})
def flush(),
do: GenServer.call(__MODULE__, :flush)
def vacuum(),
do: GenServer.call(__MODULE__, :vacuum)
## GenServer Callbacks
def init(:ok) do
Process.send_after(self, :vacuum, @vacuum_interval)
{:ok, initial_state}
end
def handle_call({:fetch, key}, _from, state) do
{reply, state} = perform_fetch(state, key)
{:reply, reply, state}
end
def handle_call({:store, key, byte_size}, _from, state) do
{reply, state} = case Exfile.Tempfile.random_file("b2-local-cache") do
{:ok, path} ->
state = state
|> update_in([:cache], &Map.put(&1, key, {ts, byte_size, path}))
|> update_in([:bytes_used], &(&1 + byte_size))
{{:ok, path}, state}
error ->
{error, state}
end
{:reply, reply, state}
end
def handle_call({:delete, key}, _from, state) do
{:reply, :ok, perform_delete(state, key)}
end
def handle_call(:flush, _from, state) do
:ok = perform_flush(state)
{:reply, :ok, initial_state}
end
def handle_call(:vacuum, _from, state) do
{:reply, :ok, perform_vacuum(state, cache_size)}
end
def handle_info(:vacuum, state) do
state = perform_vacuum(state, cache_size)
_ = Process.send_after(self, :vacuum, @vacuum_interval)
{:noreply, state}
end
def terminate(_reason, state) do
perform_flush(state)
end
defp perform_fetch(%{cache: cache} = state, key) do
case Map.fetch(cache, key) do
{:ok, {_last_used, byte_size, path}} ->
state = state
|> update_in([:cache], &Map.put(&1, key, {ts, byte_size, path}))
{{:ok, byte_size, path}, state}
_ ->
{:error, state}
end
end
defp perform_delete(%{cache: cache} = state, key) do
case Map.fetch(cache, key) do
{:ok, {_, byte_size, path}} ->
_ = File.rm(path)
state
|> update_in([:cache], &Map.delete(&1, key))
|> update_in([:bytes_used], &(&1 - byte_size))
_ ->
state
end
end
defp perform_flush(%{cache: cache}) do
for {_, _, path} <- Map.values(cache) do
_ = File.rm(path)
end
:ok
end
defp perform_vacuum(%{bytes_used: bytes} = state, cache_size) when bytes < cache_size,
do: state
defp perform_vacuum(%{cache: cache} = state, cache_size) do
state
|> perform_delete(lru_key cache)
|> perform_vacuum(cache_size)
end
defp lru_key(cache) do
Enum.reduce(cache, {ts + 1_000_000, nil}, fn
({key, {time, _, _}}, {least_access_time, _key}) when time < least_access_time ->
{time, key}
(_, {time, key}) ->
{time, key}
end) |> elem(1)
end
defp ts, do: :erlang.system_time(:micro_seconds)
defp cache_size,
do: Application.get_env(:exfile_b2, :local_cache_size, 100_000_000)
defp initial_state, do: %{cache: %{}, bytes_used: 0}
end
|
lib/exfile_b2/local_cache.ex
| 0.593374 | 0.434221 |
local_cache.ex
|
starcoder
|
defmodule RayTracer.Pattern do
@moduledoc """
This is a common module for all kinds of patterns
"""
alias RayTracer.Color
alias RayTracer.Shape
alias RayTracer.Matrix
alias RayTracer.RTuple
@type t :: RayTracer.StripePattern.t |
RayTracer.RingPattern.t |
RayTracer.CheckerPattern.t |
RayTracer.GradientPattern.t |
RayTracer.BlendedPattern.t
defprotocol CommonProtocol do
@doc """
Computes the color for the pattern at the point in pattern space
"""
def pattern_at(pattern, position)
end
defprotocol CombinationProtocol do
@fallback_to_any true
@doc """
Computes the color for the pattern at the point in pattern space
"""
def pattern_at_shape(pattern, object, position)
end
defmacro __using__(fields \\ []) do
base_fields = [transform: Matrix.ident, inv_transform: Matrix.ident]
new_fields = base_fields ++ fields
quote do
defstruct unquote(new_fields)
end
end
defimpl CombinationProtocol, for: Any do
alias RayTracer.Color
alias RayTracer.Shape
alias RayTracer.Matrix
alias RayTracer.RTuple
alias RayTracer.Pattern
@doc """
Computes the color for the pattern on given object at the given world space
point with respect to the transformations on both the pattern
and the object.
"""
@spec pattern_at_shape(Pattern.t, Shape.t, RTuple.point) :: Color.t
def pattern_at_shape(pattern, object, position) do
pattern_space_point = Pattern.pattern_space_point(pattern, object, position)
Pattern.pattern_at(pattern, pattern_space_point)
end
end
@spec pattern_at(t, RTuple.point) :: Color.t
def pattern_at(pattern, position) do
CommonProtocol.pattern_at(pattern, position)
end
@spec pattern_at_shape(t, Shape.t, RTuple.point) :: Color.t
def pattern_at_shape(pattern, object, position) do
CombinationProtocol.pattern_at_shape(pattern, object, position)
end
@doc """
Converts the point from world space to pattern space
"""
@spec pattern_space_point(t, Shape.t, RTuple.point) :: RTuple.point
def pattern_space_point(pattern, object, position) do
object_space_point =
object.inv_transform
|> Matrix.mult(position)
pattern.inv_transform
|> Matrix.mult(object_space_point)
end
@spec set_transform(t, Matrix.matrix) :: t
def set_transform(pattern, transform) do
pattern |> struct!(transform: transform, inv_transform: transform |> Matrix.inverse)
end
end
|
lib/pattern.ex
| 0.913749 | 0.614408 |
pattern.ex
|
starcoder
|
defmodule Strava.Athlete.Stats do
@moduledoc """
Returns recent (last 4 weeks), year to date and all time stats for a given
athlete. Only available for the authenticated athlete. This is the
recommended endpoint when polling for athlete upload events.
More info: http://strava.github.io/api/v3/athlete/#stats
"""
@type t :: %__MODULE__ {
biggest_ride_distance: float,
biggest_climb_elevation_gain: float,
recent_ride_totals: Strava.Athlete.Stats.RecentTotals.t,
recent_run_totals: Strava.Athlete.Stats.RecentTotals.t,
recent_swim_totals: Strava.Athlete.Stats.RecentTotals.t,
ytd_ride_totals: Strava.Athlete.Stats.Totals.t,
ytd_run_totals: Strava.Athlete.Stats.Totals.t,
ytd_swim_totals: Strava.Athlete.Stats.Totals.t,
all_ride_totals: Strava.Athlete.Stats.Totals.t,
all_run_totals: Strava.Athlete.Stats.Totals.t,
all_swim_totals: Strava.Athlete.Stats.Totals.t
}
defstruct [
:biggest_ride_distance,
:biggest_climb_elevation_gain,
:recent_ride_totals,
:recent_run_totals,
:recent_swim_totals,
:ytd_ride_totals,
:ytd_run_totals,
:ytd_swim_totals,
:all_ride_totals,
:all_run_totals,
:all_swim_totals,
]
@spec parse(Strava.Athlete.Stats.t) :: Strava.Athlete.Stats.t
def parse(stats) do
%Strava.Athlete.Stats{stats|
recent_ride_totals: struct(Strava.Athlete.Stats.RecentTotals, stats.recent_ride_totals),
recent_run_totals: struct(Strava.Athlete.Stats.RecentTotals, stats.recent_run_totals),
recent_swim_totals: struct(Strava.Athlete.Stats.RecentTotals, stats.recent_swim_totals),
}
end
defmodule RecentTotals do
@type t :: %__MODULE__ {
achievement_count: integer,
count: integer,
distance: float,
elapsed_time: integer,
elevation_gain: float,
moving_time: integer
}
defstruct [
:achievement_count,
:count,
:distance,
:elapsed_time,
:elevation_gain,
:moving_time,
]
end
defmodule Totals do
@type t :: %__MODULE__ {
count: integer,
distance: integer,
elapsed_time: integer,
elevation_gain: integer,
moving_time: integer
}
defstruct [
:count,
:distance,
:elapsed_time,
:elevation_gain,
:moving_time,
]
end
end
|
lib/strava/athlete/stats.ex
| 0.729038 | 0.522019 |
stats.ex
|
starcoder
|
defmodule Alchemy.Embed do
@moduledoc """
A module containing structs and functions relative to Embeds.
Embeds allow you to format messages in a structured, and quite pretty way; much more
than can be done with simple text.
For a basic idea of how embeds work, check this
[link](https://cdn.discordapp.com/attachments/84319995256905728/252292324967710721/embed.png).
## Example Usage
```elixir
Cogs.def embed do
%Embed{}
|> title("The BEST embed")
|> description("the best description")
|> image("http://i.imgur.com/4AiXzf8.jpg")
|> Embed.send
end
```
Note that this is equivalent to:
```elixir
Cogs.def embed do
%Embed{title: "The BEST embed",
description: "the best description",
image: "http://i.imgur.com/4AiXzf8.jpg"}
|> Embed.send
end
```
## File Attachments
The fields that take urls can also take a special "attachment"
url referencing files uploaded alongside the embed.
```elixir
Cogs.def foo do
%Embed{}
|> image("attachment://foo.png")
|> Embed.send("", file: "foo.png")
end
```
"""
import Alchemy.Structs
alias Alchemy.Attachment
alias Alchemy.Embed.{Footer, Image, Video, Provider, Author, Field, Thumbnail}
alias Alchemy.Embed
@type url :: String.t()
@type t :: %__MODULE__{
title: String.t(),
type: String.t(),
description: String.t(),
url: String.t(),
timestamp: String.t(),
color: Integer,
footer: footer,
image: image,
thumbnail: thumbnail,
video: video,
provider: provider,
author: author,
fields: [field]
}
@derive Poison.Encoder
defstruct [
:title,
:type,
:description,
:url,
:timestamp,
:color,
:footer,
:image,
:thumbnail,
:video,
:provider,
:author,
fields: []
]
@typedoc """
Represents the author of an embed.
- `name`
The name of the author
- `url`
The author's url
- `icon_url`
A link to the author's icon image
- `proxy_icon_url`
A proxied url for the author's icon image
"""
@type author :: %Author{
name: String.t(),
url: url,
icon_url: url,
proxy_icon_url: url
}
@typedoc """
Represents a file attached to an embed.
- `id`
The attachment id
- `filename`
The name of the file attached
- `size`
The size of the file attached
- `url`
The source url of a file
- `proxy_url`
A proxied url of a file
- `height`
The height of the file, if it's an image
- `width`
The width of a file, if it's an image
"""
@type attachment :: %Attachment{
id: String.t(),
filename: String.t(),
size: Integer,
url: url,
proxy_url: url,
height: Integer | nil,
width: Integer | nil
}
@typedoc """
Represents a field in an embed.
- `name`
The title of the field
- `value`
The text of the field
- `inline`
Whether or not the field should be aligned with other inline fields.
"""
@type field :: %Field{
name: String.t(),
value: String.t(),
inline: Boolean
}
@typedoc """
Represents an Embed footer.
- `text`
The text of the footer
- `icon_url`
The url of the image in the footer
- `proxy_icon_url`
The proxied url of the footer's icon. Setting this when sending an embed serves
no purpose.
"""
@type footer :: %Footer{
text: String.t(),
icon_url: url,
proxy_icon_url: url
}
@typedoc """
Represents the image of an embed.
- `url`
A link to this image
The following parameters shouldn't be set when sending embeds:
- `proxy_url`
A proxied url of the image
- `height`
The height of the image.
- `width`
The width of the image.
"""
@type image :: %Image{
url: url,
proxy_url: url,
height: Integer,
width: Integer
}
@typedoc """
Represents the provider of an embed.
This is usually comes from a linked resource (youtube video, etc.)
- `name`
The name of the provider
- `url`
The source of the provider
"""
@type provider :: %Provider{
name: String.t(),
url: url
}
@typedoc """
Represents the thumnail of an embed.
- `url`
A link to the thumbnail image.
- `proxy_url`
A proxied link to the thumbnail image
- `height`
The height of the thumbnail
- `width`
The width of the thumbnail
"""
@type thumbnail :: %Thumbnail{
url: url,
proxy_url: url,
height: Integer,
width: Integer
}
@typedoc """
Represents a video attached to an embed.
Users can't set this themselves.
- `url`
The source of the video
- `height`
The height of the video
- `width`
The width of the video
"""
@type video :: %Video{
url: url,
height: Integer,
width: Integer
}
@doc false
def from_map(map) do
map
|> field?("footer", Footer)
|> field?("image", Image)
|> field?("video", Video)
|> field?("provider", Provider)
|> field?("author", Author)
|> field_map("fields", &map_struct(&1, Field))
|> to_struct(__MODULE__)
end
# removes all the null keys from the map
@doc false
# This will also convert datetime objects into iso_8601
def build(struct) when is_map(struct) do
{_, struct} = Map.pop(struct, :__struct__)
struct
|> Enum.filter(fn {_, v} -> v != nil and v != [] end)
|> Enum.map(fn {k, v} -> {k, build(v)} end)
|> Enum.into(%{})
end
def build(value) do
value
end
@doc """
Adds a title to an embed.
## Examples
```elixir
Cogs.def title(string) do
%Embed{}
|> title(string)
|> Embed.send
end
"""
@spec title(Embed.t(), String.t()) :: Embed.t()
def title(embed, string) do
%{embed | title: string}
end
@doc """
Sets the url for an embed.
## Examples
```elixir
Cogs.def embed(url) do
%Embed{}
|> url(url)
|> Embed.send
end
```
"""
@spec url(Embed.t(), url) :: Embed.t()
def url(embed, url) do
%{embed | url: url}
end
@doc """
Adds a description to an embed.
```elixir
Cogs.def embed(description) do
%Embed{}
|> title("generic title")
|> description(description)
|> Embed.send
end
```
"""
@spec description(Embed.t(), String.t()) :: Embed.t()
def description(embed, string) do
%{embed | description: string}
end
@doc """
Adds author information to an embed.
Note that the `proxy_icon_url`, `height`, and `width` fields have no effect,
when using a pre-made `Author` struct.
## Options
- `name`
The name of the author.
- `url`
The url of the author.
- `icon_url`
The url of the icon to display.
## Examples
```elixir
Cogs.def embed do
%Embed{}
|> author(name: "John",
url: "https://discord.com/developers"
icon_url: "http://i.imgur.com/3nuwWCB.jpg")
|> Embed.send
end
```
"""
@spec author(Embed.t(), [name: String.t(), url: url, icon_url: url] | Author.t()) ::
Embed.t()
def author(embed, %Author{} = author) do
%{embed | author: author}
end
def author(embed, options) do
%{embed | author: Enum.into(options, %{})}
end
@doc """
Sets the color of an embed
Color should be 3 byte integer, with each byte representing a single
color component; i.e. `0xRrGgBb`
## Examples
```elixir
Cogs.def embed do
{:ok, message} =
%Embed{description: "the best embed"}
|> color(0xc13261)
|> Embed.send
Process.sleep(2000)
Client.edit_embed(message, embed |> color(0x5aa4d4))
end
```
"""
@spec color(Embed.t(), Integer) :: Embed.t()
def color(embed, integer) do
%{embed | color: integer}
end
@doc """
Adds a footer to an embed.
Note that the `proxy_icon_url` field has no effect,
when using a pre-made `Footer` struct.
## Options
- `text`
The content of the footer.
- `icon_url`
The icon the footer should have
## Examples
```elixir
Cogs.def you do
%Embed{}
|> footer(text: "<- this is you",
icon_url: message.author |> User.avatar_url)
|> Embed.send
end
```
"""
@spec footer(Embed.t(), [text: String.t(), icon_url: url] | Footer.t()) :: Embed.t()
def footer(embed, %Footer{} = footer) do
%{embed | footer: footer}
end
def footer(embed, options) do
%{embed | footer: Enum.into(options, %{})}
end
@doc """
Adds a field to an embed.
Fields are appended when using this method, so the order you pipe them in,
is the order they'll end up when sent. The name and value must be non empty
strings. You can have a maximum of `25` fields.
## Parameters
- `name`
The title of the embed.
- `value`
The text of the field
## Options
- `inline`
When setting this to `true`, up to 3 fields can appear side by side,
given they are all inlined.
## Examples
```elixir
%Embed{}
|> field("Field1", "the best field!")
|> field("Inline1", "look a field ->")
|> field("Inline2", "<- look a field")
```
"""
@spec field(Embed.t(), String.t(), String.t()) :: Embed.t()
def field(embed, name, value, options \\ []) do
field =
%{name: name, value: value}
|> Map.merge(Enum.into(options, %{}))
%{embed | fields: embed.fields ++ [field]}
end
@doc """
Adds a thumbnail to an embed.
## Examples
```elixir
%Embed{}
|> thumbnail("http://i.imgur.com/4AiXzf8.jpg")
```
"""
@spec thumbnail(Embed.t(), url) :: Embed.t()
def thumbnail(embed, url) do
%{embed | thumbnail: %{url: url}}
end
@doc """
Sets the main image of the embed.
## Examples
```elixir
%Embed{}
|> image("http://i.imgur.com/4AiXzf8.jpg")
"""
@spec image(Embed.t(), url) :: Embed.t()
def image(embed, url) do
%{embed | image: %{url: url}}
end
@doc """
Adds a timestamp to an embed.
Note that the Datetime object will get converted to an `iso8601` formatted string.
## Examples
%Embed{} |> timestamp(DateTime.utc_now())
"""
@spec timestamp(Embed.t(), DateTime.t()) :: DateTime.t()
def timestamp(embed, %DateTime{} = time) do
%{embed | timestamp: DateTime.to_iso8601(time)}
end
@doc """
Sends an embed to the same channel as the message triggering a command.
This macro can't be used outside of `Alchemy.Cogs` commands.
See `Alchemy.Client.send_message/3` for a list of options that can be
passed to this macro.
## Examples
```elixir
Cogs.def blue do
%Embed{}
|> color(0x1d3ad1)
|> description("Hello!")
|> Embed.send("Here's an embed, and a file", file: "foo.txt")
end
```
"""
defmacro send(embed, content \\ "", options \\ []) do
quote do
Alchemy.Client.send_message(
var!(message).channel_id,
unquote(content),
[{:embed, unquote(embed)} | unquote(options)]
)
end
end
end
|
lib/Structs/Messages/Embed/embed.ex
| 0.921172 | 0.830937 |
embed.ex
|
starcoder
|
defmodule CoursePlanner.Classes.Class do
@moduledoc """
This module holds the model for the class table
"""
use Ecto.Schema
import Ecto.Changeset
import Ecto.Query
alias CoursePlanner.{Repo, Courses.OfferedCourse, Attendances.Attendance, Classes}
alias Ecto.{Time, Date, Changeset}
schema "classes" do
field :date, Date
field :starting_at, Time
field :finishes_at, Time
field :classroom, :string
belongs_to :offered_course, OfferedCourse
has_many :attendances, Attendance, on_delete: :delete_all
has_many :students, through: [:offered_course, :students]
timestamps()
end
@doc """
Builds a changeset based on the `struct` and `params`.
"""
def changeset(struct, params \\ %{}) do
cast_params =
[:offered_course_id, :date, :starting_at, :finishes_at, :classroom]
struct
|> cast(params, cast_params)
|> validate_required([:offered_course_id, :date, :starting_at, :finishes_at])
|> validate_date()
|> Classes.validate_for_holiday()
end
def changeset(struct, params, :create) do
struct
|> changeset(params)
|> validate_offered_course()
|> validate_duration()
end
def changeset(struct, params, :update) do
struct
|> changeset(params)
|> validate_duration()
end
def validate_duration(%{changes: changes, valid?: true} = changeset) do
starting_at = Map.get(changes, :starting_at) || Map.get(changeset.data, :starting_at)
finishes_at = Map.get(changes, :finishes_at) || Map.get(changeset.data, :finishes_at)
cond do
Time.compare(starting_at, Time.from_erl({0, 0, 0})) == :eq ->
add_error(changeset, :starting_at, "Starting time cannot be zero")
Time.compare(finishes_at, Time.from_erl({0, 0, 0})) == :eq ->
add_error(changeset, :finishes_at, "Finishing time cannot be zero")
Time.compare(starting_at, finishes_at) != :lt ->
add_error(changeset, :finishes_at,
"Finishing time should be greater than the starting time")
true -> changeset
end
end
def validate_duration(changeset), do: changeset
def validate_offered_course(%{changes: changes, valid?: true} = changeset) do
offered_course_id = Map.get(changes, :offered_course_id)
query = from oc in OfferedCourse,
join: t in assoc(oc, :teachers),
join: s in assoc(oc, :students),
preload: [teachers: t, students: s],
where: oc.id == ^offered_course_id
case Repo.one(query) do
nil -> add_error(changeset, :offered_course_id,
"Attached course should have at least one teacher and one student")
_ -> changeset
end
end
def validate_offered_course(changeset), do: changeset
defp validate_date(%{valid?: true} = changeset) do
term = OfferedCourse
|> Repo.get(Changeset.get_field(changeset, :offered_course_id))
|> Repo.preload([:term])
|> Map.get(:term)
st = term
|> Map.get(:start_date)
|> Date.cast!()
en = term
|> Map.get(:end_date)
|> Date.cast!()
date = Changeset.get_field(changeset, :date)
case {Date.compare(st, date), Date.compare(en, date)} do
{:gt, _} -> Changeset.add_error(changeset, :date,
"The date can't be before the term's beginning")
{_, :lt} -> Changeset.add_error(changeset, :date,
"The date can't be after the term's end")
{_, _} -> changeset
end
end
defp validate_date(changeset), do: changeset
end
|
lib/course_planner/classes/class.ex
| 0.766206 | 0.469763 |
class.ex
|
starcoder
|
defmodule Codex.OAuth do
@moduledoc """
Handles OAuth related functionality for interaction with Goodreads API ONLY.
You should not use this OAuth client to interact with other APIs, for it doesn't do endpoint
discovery or anything like that. It's specifically written for Goodreads' API.
"""
alias Codex.{Config, HttpClient}
@doc """
Get a Goodreads token and token secret.
If signing goes well, returns `{:ok, tuple}` where tuple is a 2-element tuple containing the token and token secret.
Otherwise, returns `{:error, reason}`.
Your `API_KEY` and `API_SECRET` should be stored in your config, like:
```elixir
config :codex,
api_key: "YOUR_API_KEY",
api_secret: "YOUR_API_SECRET"
```
## Examples
iex> Codex.OAuth.get_request_token_and_secret()
{:ok, %{"oauth_token" => "TOKEN", "oauth_token_secret" => "TOKEN_<PASSWORD>"}}
> Notice that this just obtains a token and token secret from the Goodread's OAuth service. It doesn't
store your token in any way or do anything else with it. You should do all your token storage and
management logic yourself.
"""
@spec get_request_token_and_secret() :: {:ok, map()} | {:error, any()}
def get_request_token_and_secret() do
endpoint = "oauth/request_token"
headers = [{:Authorization, generate_oauth_header(endpoint)}]
case HttpClient.signed_get(endpoint, headers) do
{:ok, body} ->
{:ok, extract_token_and_secret(body)}
{:error, _} = err ->
err
end
end
@doc """
Get the authorization url, provided an OAuth token and optionally a callback URL
## Args:
* `token` - The OAuth token obtained from Goodreads. This can be obtained by using `Codex.OAuth.get_request_token_and_secret/2`
* `callback_url` - Optional. A URL for your application for an endpoint to which the user should be redirected
after giving your app permission.
## Examples:
iex> Codex.OAuth.get_request_authorization_url("API_TOKEN")
"https://www.goodreads.com/oauth/authorize?oauth_token=API_TOKEN"
iex> Codex.OAuth.get_request_authorization_url("API_TOKEN", "https://myapp.com/goodreads_oauth_callback")
"https://www.goodreads.com/oauth/authorize?oauth_token=API_TOKEN&oauth_callback=https://myapp.com/goodreads_oauth_callback"
"""
def get_request_authorization_url(token, callback_url \\ nil)
def get_request_authorization_url(token, nil), do: "#{Config.api_url()}oauth/authorize?oauth_token=#{token}"
def get_request_authorization_url(token, callback_url) do
"#{Config.api_url()}oauth/authorize?oauth_token=#{token}&oauth_callback=#{callback_url}"
end
@doc """
Exchange the request token and secret for an access token and secret. In order for this to work,
the user should have provided access to your application via the authorization URL, which can be
obtained using `Codex.OAuth.get_request_authorization_url`.
## Args:
* `request_token` - the initial request token.
* `request_token_secret` - the initial request token secret.
An initial request token and token secret can be obtained by calling `Codex.OAuth.get_request_token_and_secret/0`.
## Examples:
iex> Codex.OAuth.get_access_token_and_secret("REQUEST_TOKEN", "REQUEST_TOKEN_SECRET")
{:ok, %{"oauth_token" => "ACCESS_TOKEN", "oauth_token_secret" => "ACCESS_TOKEN_SECRET"}}
"""
def get_access_token_and_secret(request_token, request_token_secret) do
endpoint = "oauth/access_token"
headers = [{:Authorization, generate_oauth_header(endpoint, request_token, request_token_secret)}]
case HttpClient.signed_get(endpoint, headers) do
{:ok, body} ->
{:ok, extract_token_and_secret(body)}
{:error, _} = err ->
err
end
end
@doc """
Generate an Authorization header with a valid OAuth signature, taking the consumer key and secret
from the config. You may optionally pass, a token, token secret and query params.
## Args:
* `token` - (optional) a request token previously obtained from the OAuth service.
* `token_secret` - (optional) a request token secret previously obtained from the OAuth service.
* `params` - (optional) the query params from the request, as these need to be included in the signature.
## Examples:
iex> Codex.OAuth.generate_oauth_header("oauth/request_token")
"OAuth oauth_consumer_key=***,oauth_nonce=oHaBYEbXwtv7lWIoDkXMQT-I5iJThNliAls4vGDm,oauth_signature_method=HMAC-SHA1,oauth_timestamp=1581858006,oauth_token=***,oauth_version=1.0,oauth_signature=***"
"""
def generate_oauth_header(endpoint, token \\ nil, token_secret \\ nil, params \\ %{}) do
{key, secret} = get_goodreads_key_and_secret_from_config()
key
|> generate_oauth_data(token, params)
|> generate_signature(endpoint, secret, token_secret)
|> encode_header()
end
defp generate_oauth_data(key, token, params) do
%{
"oauth_consumer_key" => key,
"oauth_nonce" => get_random_string(),
"oauth_signature_method" => "HMAC-SHA1",
"oauth_timestamp" => get_timestamp(),
"oauth_token" => token,
"oauth_version" => "1.0"
}
|> Map.merge(params)
|> Enum.reject(fn {_k, v} -> is_nil(v) end)
|> Map.new()
end
defp generate_signature(params, endpoint, secret, token_secret) do
encoded_params =
params
|> concatenate_params("&")
|> URI.encode_www_form()
base_string = "GET&#{URI.encode_www_form(full_url(endpoint))}&#{encoded_params}"
{params, sign(base_string, "#{secret}&#{token_secret}")}
end
defp encode_header({params, signature}) do
oauth_data =
params
|> Map.take(oauth_params_list())
|> concatenate_params(",")
"OAuth #{oauth_data},oauth_signature=#{signature}"
end
defp get_random_string() do
40
|> :crypto.strong_rand_bytes()
|> Base.url_encode64
|> binary_part(0, 40)
end
defp get_timestamp(), do: DateTime.utc_now() |> DateTime.to_unix()
defp concatenate_params(params, joiner) do
params
|> Enum.sort_by(fn {k, _v} -> k end)
|> Enum.reduce("", fn
{k, v}, "" ->
"#{k}=#{v}"
{k, v}, acc ->
"#{acc}#{joiner}#{k}=#{v}"
end)
end
defp full_url(endpoint), do: Config.api_url() <> endpoint
defp sign(text, key) do
:sha
|> :crypto.hmac(key, text)
|> Base.encode64()
end
defp extract_token_and_secret(body) do
for pair <- String.split(body, "&"),
[key, value] = String.split(pair, "="),
into: %{},
do: {key, value}
end
defp get_goodreads_key_and_secret_from_config() do
{Application.get_env(:codex, :api_key), Application.get_env(:codex, :api_secret)}
end
defp oauth_params_list do
[
"oauth_consumer_key",
"oauth_nonce",
"oauth_signature_method",
"oauth_timestamp",
"oauth_token",
"oauth_version"
]
end
end
|
lib/codex/oauth.ex
| 0.884772 | 0.58059 |
oauth.ex
|
starcoder
|
defmodule HTS221.CTRLReg2 do
@moduledoc """
Control the memory boot, heater element, and one shot initialization for the
HTS221
"""
import Bitwise
@reboot_memory 0x80
@heater_enabled 0x02
@one_shot_get_new_data_set 0x01
@type boot_mode() :: :normal | :reboot_memory
@type heater_mode() :: :disabled | :enabled
@type one_shot() :: :waiting | :get_new_dataset
@type t() :: %__MODULE__{
boot: boot_mode(),
heater: heater_mode(),
one_shot: one_shot()
}
defstruct boot: :normal, heater: :disabled, one_shot: :waiting
@doc """
Parse a binary register response into the `HTS221.CTRLReg2` structure
"""
@spec from_binary(binary()) :: t()
def from_binary(
<<boot_mode_bit::size(1), _::size(5), heater_bit::size(1), one_shot_bit::size(1)>>
) do
%__MODULE__{
boot: boot_mode_from_bit(boot_mode_bit),
heater: heater_mode_from_bit(heater_bit),
one_shot: one_shot_from_bit(one_shot_bit)
}
end
@doc """
Make a `HTS221.CTRLReg2` structure into a binary string to be written to the
register
"""
@spec to_binary(t()) :: binary()
def to_binary(%__MODULE__{} = ctrl_reg2) do
<<0x21, fields_to_mask(ctrl_reg2)>>
end
defp boot_mode_from_bit(0), do: :normal
defp boot_mode_from_bit(1), do: :reboot_memory
defp heater_mode_from_bit(0), do: :disabled
defp heater_mode_from_bit(1), do: :enabled
defp one_shot_from_bit(0), do: :waiting
defp one_shot_from_bit(1), do: :get_new_dataset
defp fields_to_mask(%__MODULE__{} = ctrl_reg2) do
0
|> mask_field(:boot, ctrl_reg2.boot)
|> mask_field(:heater, ctrl_reg2.heater)
|> mask_field(:one_shot, ctrl_reg2.one_shot)
end
defp mask_field(mask, _, value) when value in [:waiting, :disabled, :normal], do: mask
defp mask_field(mask, :boot, :reboot_memory), do: mask ||| @reboot_memory
defp mask_field(mask, :heater, :enabled), do: mask ||| @heater_enabled
defp mask_field(mask, :one_shot, :get_new_dataset), do: mask ||| @one_shot_get_new_data_set
defimpl HTS221.Register do
alias HTS221.{CTRLReg2, IORead, IOWrite}
def read(_) do
{:ok, IORead.new(0x21, 1)}
end
def write(ctrl_reg2) do
{:ok, IOWrite.new(CTRLReg2.to_binary(ctrl_reg2))}
end
end
end
|
lib/hts221/ctrl_reg2.ex
| 0.763484 | 0.413388 |
ctrl_reg2.ex
|
starcoder
|
defmodule TelemetryMetricsCloudwatch do
@moduledoc """
This is a [Amazon CloudWatch](https://aws.amazon.com/cloudwatch/) Reporter for
[`Telemetry.Metrics`](https://github.com/beam-telemetry/telemetry_metrics) definitions.
Provide a list of metric definitions to the `init/2` function. It's recommended to
run TelemetryMetricsCloudwatch under a supervision tree, usually under Application.
def start(_type, _args) do
# List all child processes to be supervised
children = [
{TelemetryMetricsCloudwatch, [metrics: metrics()]}
...
]
opts = [strategy: :one_for_one, name: ExampleApp.Supervisor]
Supervisor.start_link(children, opts)
end
defp metrics do
[
counter("http.request.count"),
last_value("vm.memory.total", unit: :byte),
last_value("vm.total_run_queue_lengths.total")
]
end
You can also provide options for the namespace used in CloudWatch (by default, "Telemetry")
and the minimum frequency (in milliseconds) with which data will be posted (see section
below for posting rules). For instance:
...
children = [
{TelemetryMetricsCloudwatch, metrics: metrics(), namespace: "Backend", push_interval: 30_000}
]
...
## Telemetry.Metrics Types Supported
`TelemetryMetricsCloudwatch` supports 4 of the [Metrics](https://hexdocs.pm/telemetry_metrics/Telemetry.Metrics.html#module-metrics):
* [Counter](https://hexdocs.pm/telemetry_metrics/Telemetry.Metrics.html#counter/2):
Counter metric keeps track of the total number of specific events emitted.
* [LastValue](https://hexdocs.pm/telemetry_metrics/Telemetry.Metrics.html#last_value/2):
Last value keeps track of the selected measurement found in the most recent event.
* [Summary](https://hexdocs.pm/telemetry_metrics/Telemetry.Metrics.html#summary/2): Summary
aggregates measurement's values into statistics, e.g. minimum and maximum, mean, or percentiles.
This sends every measurement to CloudWatch.
* [Sum](https://hexdocs.pm/telemetry_metrics/Telemetry.Metrics.html#sum/2): Sum metric keeps track
of the sum of selected measurement's values carried by specific events. If you are using Summary
for a metric already, then CloudWatch can calculate a Sum using that Summary metric. If you
only need a Sum (and no other summary metrics) then use this Sum metric instead.
These metrics are sent to CloudWatch based on the rules described below.
## When Data is Sent
Cloudwatch has [certain constraints](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html)
on the number of metrics that can be sent up at any given time. `TelemetryMetricsCloudwatch`
will send accumulated metric data at least every minute (configurable by the `:push_interval`
option) or when the data cache has reached the maximum size that CloudWatch will accept.
## Units
In order to report metrics in the CloudWatch UI, they must be one of the following values:
* Time units: `:second`, `:microsecond`, `:millisecond`
* Byte sizes: `:byte`, `:kilobyte`, `:megabyte`, `:gigabyte`, `:terabyte`
* Bit sizes: `:bit`, `:kilobit`, `:megabit`, `:gigabit`, `:terabit`
For `Telementry.Metrics.Counter`s, the unit will always be `:count`. Otherwise, the unit will be treated as `nil`.
## Notes on AWS
[`ExAws`](https://hexdocs.pm/ex_aws/ExAws.html) is the library used to send metrics to CloudWatch. Make sure your
[keys are configured](https://hexdocs.pm/ex_aws/ExAws.html#module-aws-key-configuration) and that they have the
[correct permissions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/permissions-reference-cw.html) of `cloudwatch:PutMetricData`.
Up to 10 tags are sent up to AWS as dimensions for a given metric. Every metric name will have a suffix added based on
the metric type (CloudWatch doesn't allow different units / measurements with the same name). So, for instance,
if your metrics are:
summary("my_app.repo.query.total_time", unit: {:nanosecond, :millisecond})
count("my_app.repo.query.total_time")
Then the metric names in CloudWatch will be:
* `my_app.repo.query.total_time.summary` (with all data points recorded)
* `my_app.repo.query.total_time.count` (with the number of queries recorded)
"""
use GenServer
require Logger
alias TelemetryMetricsCloudwatch.{Cache, Cloudwatch}
@doc """
Start the `TelemetryMetricsCloudwatch` `GenServer`.
Available options:
* `:name` - name of the reporter instance.
* `:metrics` - a list of `Telemetry.Metrics` to track.
* `:namespace` - Namespace to use in CloudWatch
* `:push_interval` - The minimum interval that metrics are guaranteed to be pushed to cloudwatch (in milliseconds)
"""
def start_link(opts) do
server_opts = Keyword.take(opts, [:name])
metrics =
opts[:metrics] ||
raise ArgumentError, "the :metrics option is required by #{inspect(__MODULE__)}"
Cache.validate_metrics(metrics)
namespace = Keyword.get(opts, :namespace, "Telemetry")
push_interval = Keyword.get(opts, :push_interval, 60_000)
GenServer.start_link(__MODULE__, {metrics, namespace, push_interval}, server_opts)
end
@impl true
def init({metrics, namespace, push_interval}) do
Process.flag(:trap_exit, true)
groups = Enum.group_by(metrics, & &1.event_name)
for {event, metrics} <- groups do
id = {__MODULE__, event, self()}
:telemetry.attach(id, event, &handle_telemetry_event/4, {self(), metrics})
end
state = %Cache{
metric_names: Map.keys(groups),
namespace: namespace,
last_run: System.monotonic_time(:second),
push_interval: push_interval
}
schedule_push_check(state)
{:ok, state}
end
@impl true
def handle_info(:push_check, state) do
schedule_push_check(state)
{:noreply, push_check(state)}
end
@impl true
def handle_info({:handle_event, measurements, metadata, metrics}, state) do
newstate =
Enum.reduce(metrics, state, fn metric, state ->
state
|> Cache.push_measurement(measurements, metadata, metric)
|> push_check()
end)
{:noreply, newstate}
end
@impl true
def handle_info(_message, state), do: {:noreply, state}
defp handle_telemetry_event(_event_name, measurements, metadata, {pid, metrics}),
do: Kernel.send(pid, {:handle_event, measurements, metadata, metrics})
defp schedule_push_check(%Cache{push_interval: push_interval}),
do: Process.send_after(self(), :push_check, push_interval)
defp push_check(%Cache{last_run: last_run, push_interval: push_interval} = state) do
# https://docs.aws.amazon.com/cli/latest/reference/cloudwatch/put-metric-data.html
# We can publish up to 150 values per metric for up to 20 different metrics
metric_count = Cache.metric_count(state)
metric_age = System.monotonic_time(:second) - last_run
push_interval = push_interval / 1000
cond do
metric_age >= push_interval and metric_count > 0 ->
push(state)
metric_count == 20 ->
push(state)
Cache.max_values_per_metric(state) == 150 ->
push(state)
true ->
state
end
end
defp push(%Cache{namespace: namespace} = state) do
{state, metric_data} = Cache.pop_metrics(state)
Cloudwatch.send_metrics(metric_data, namespace)
Map.put(state, :last_run, System.monotonic_time(:second))
end
@impl true
def terminate(_, %Cache{metric_names: events}) do
for event <- events do
:telemetry.detach({__MODULE__, event, self()})
end
:ok
end
end
|
lib/telemetry_metrics_cloudwatch.ex
| 0.938018 | 0.641394 |
telemetry_metrics_cloudwatch.ex
|
starcoder
|
defmodule Sanbase.Clickhouse.HistoricalBalance.Utils do
@moduledoc ~s"""
Helper functions used when working with the historical balances
"""
@type in_type :: %{
sign: non_neg_integer(),
balance: float(),
datetime: DateTime.t()
}
@type out_type :: %{
balance: float(),
datetime: DateTime.t()
}
@doc ~s"""
Take a list of addresses and 1-arity function that returns `{:ok, balances}` or
an error and sum all balances with the corresponding datetimes.
The combined balance of 2+ addresses is the sum of their balances for every
datetime
"""
def combine_historical_balances(addresses, fun) when is_function(fun, 1) do
result =
addresses
|> Sanbase.Parallel.map(fn address ->
{:ok, balances} = fun.(address)
balances
end)
|> Enum.zip()
|> Enum.map(&Tuple.to_list/1)
|> Enum.map(fn
[] ->
[]
[%{datetime: datetime} | _] = list ->
balance = list |> Enum.map(& &1.balance) |> Enum.sum()
%{datetime: datetime, balance: balance}
end)
{:ok, result}
end
@doc ~s"""
Clickhouse fills empty buckets with 0 while we need it filled with the last
seen value. As the balance changes happen only when a transfer occurs
then we need to fetch the whole history of changes in order to find the balance
"""
@spec fill_gaps_last_seen_balance(list(in_type)) :: list(out_type)
def fill_gaps_last_seen_balance(values) do
values
|> Enum.reduce({[], 0}, fn
%{has_changed: 0, datetime: dt}, {acc, last_seen} ->
{[%{balance: last_seen, datetime: dt} | acc], last_seen}
%{balance: balance, datetime: dt}, {acc, _last_seen} ->
{[%{balance: balance, datetime: dt} | acc], balance}
end)
|> elem(0)
|> Enum.reverse()
end
def maybe_fill_gaps_last_seen_balance({:ok, values}) do
result =
values
|> Enum.reduce({[], 0}, fn
%{has_changed: 0, datetime: dt}, {acc, last_seen} ->
{[%{balance: last_seen, datetime: dt} | acc], last_seen}
%{balance: balance, datetime: dt}, {acc, _last_seen} ->
{[%{balance: balance, datetime: dt} | acc], balance}
end)
|> elem(0)
|> Enum.reverse()
{:ok, result}
end
def maybe_fill_gaps_last_seen_balance({:error, error}), do: {:error, error}
def maybe_update_first_balance({:ok, [%{has_changed: 0} | _] = data}, fun)
when is_function(fun, 0) do
case fun.() do
{:ok, balance} ->
[first_elem | rest] = data
result = [%{first_elem | has_changed: 1, balance: balance} | rest]
{:ok, result}
{:error, error} ->
{:error, error}
end
end
def maybe_update_first_balance({:ok, result}, _function), do: {:ok, result}
def maybe_update_first_balance({:error, error}, _function), do: {:error, error}
def maybe_drop_not_needed({:ok, result}, before_datetime) do
result =
result
|> Enum.drop_while(fn %{datetime: dt} -> DateTime.compare(dt, before_datetime) == :lt end)
{:ok, result}
end
def maybe_drop_not_needed({:error, error}, _before_datetime), do: {:error, error}
end
|
lib/sanbase/clickhouse/historical_balance/utils.ex
| 0.844938 | 0.562207 |
utils.ex
|
starcoder
|
defmodule Neoprice.Cache do
@moduledoc "macro to define a buffer"
use GenServer
alias Neoprice.Cryptocompare
require Logger
@minute 60
@hour 3600
@day 86_400
defmodule Config do
@moduledoc false
defstruct [:cache_name, :definition, :aggregation, :duration]
end
defmacro __using__(opts \\ []) do
quote do
def worker do
import Supervisor.Spec
state = %{
module: __MODULE__
}
worker(unquote(__MODULE__), [state], id: __MODULE__)
end
def from_symbol,
do: unquote(if is_nil(opts[:from_symbol]), do: "BTC", else: opts[:from_symbol])
def to_symbol, do: unquote(if is_nil(opts[:to_symbol]), do: "BTC", else: opts[:to_symbol])
def config, do: unquote(if is_nil(opts[:config]), do: [], else: opts[:config])
def start_day,
do: unquote(if is_nil(opts[:start_day]), do: 1_500_000_000, else: opts[:start_day])
def price, do: unquote(__MODULE__).price(__MODULE__)
def last_price_full, do: unquote(__MODULE__).last_price_full(__MODULE__)
end
end
def start_link(state) do
GenServer.start_link(__MODULE__, state, module: state.module)
end
def init(state) do
Enum.each(state.module.config, fn cache ->
:ets.new(cache.cache_name, [:public, :ordered_set, :named_table, {:read_concurrency, true}])
end)
Process.send_after(self(), :seed, 0)
{:ok, state}
end
def handle_info(:seed, state) do
Process.send_after(self(), :sync, 10_000)
seed(state)
{:noreply, state}
end
def handle_info(:sync, state) do
Process.send_after(self(), :sync, 10_000)
sync(state)
{:noreply, state}
end
def price(module) do
Cryptocompare.last_price(module.from_symbol(), module.to_symbol())
end
def last_price_full(module) do
Cryptocompare.last_price_full(module.from_symbol(), module.to_symbol())
end
defp seed(state) do
Enum.each(state.module.config, fn cache ->
seed(state.module, cache)
end)
end
defp seed(module, cache) do
{from, to} = time_frame(module, cache)
elements =
Cryptocompare.get_price(
cache.definition,
from,
to,
module.from_symbol(),
module.to_symbol(),
cache.aggregation
)
:ets.insert(cache.cache_name, elements)
end
def sync(state) do
Enum.each(state.module.config, fn cache ->
sync_cache(cache, state.module)
end)
end
defp sync_cache(
%{
cache_name: cache_name,
definition: definition,
aggregation: aggregation
} = config,
module
) do
cache = :ets.tab2list(cache_name)
{last_time, _} = List.last(cache) || {0, ""}
if next_value(definition, last_time, aggregation) < now() do
Logger.debug(fn ->
"Syncing #{cache_name}"
end)
elements =
Cryptocompare.get_price(
definition,
last_time + 1,
now(),
module.from_symbol,
module.to_symbol,
aggregation
)
:ets.insert(cache_name, elements)
delete_old_values(config, module, cache)
end
end
defp time_frame(module, %{duration: :start}), do: {module.start_day, now()}
defp time_frame(_, %{duration: duration}) do
now = now()
{now - duration, now}
end
defp delete_old_values(config, module, cache) do
{from, _} = time_frame(module, config)
Enum.reduce_while(cache, nil, fn {k, _}, _ ->
if k < from do
:ets.delete(config.cache_name, k)
Logger.debug(fn ->
"Deleteting #{k}"
end)
{:cont, nil}
else
{:halt, nil}
end
end)
end
defp next_value(:day, time, aggregation), do: time + @day * aggregation
defp next_value(:hour, time, aggregation), do: time + @hour * aggregation
defp next_value(:minute, time, aggregation), do: time + @minute * aggregation
defp now, do: DateTime.utc_now() |> DateTime.to_unix()
end
|
apps/neoprice/lib/neoprice/cache.ex
| 0.550607 | 0.490663 |
cache.ex
|
starcoder
|
defmodule Contentful.Query do
require Logger
@moduledoc """
This module provides the chainable query syntax for building queries against the
APIs of Contentful.
The chains will then be serialized to a URL and send to the API. A basic query looks like this:
```
Entity
|> skip(3)
|> limit(2)
|> fetch_all
```
wherein `Entity` is one of the modules that exhibit `Contentful.Queryable` behaviour, such as
`Contentful.Delivery.Entries`, `Contentful.Delivery.Assets` and `Contentful.Delivery.ContentTypes`.
As an example, querying all entries of a given `Contentful.Space` (represented by its `space_id`) can
be done as follows:
```
Contentful.Delivery.Entries
|> Contentful.Query.fetch_all(space_id)
```
"""
alias Contentful.Configuration
alias Contentful.ContentType
alias Contentful.Delivery
alias Contentful.Delivery.Assets
alias Contentful.Delivery.Entries
alias Contentful.Delivery.Spaces
alias Contentful.Request
alias Contentful.Space
alias Contentful.SysData
@allowed_filter_modifiers [:all, :in, :nin, :ne, :lte, :gte, :lt, :gt, :match, :exists]
@doc """
adds the `include` parameter to a query.
This allows for fetching associated items up to a collection of `Contentful.Entry`.
The `include` call will _only_ work with `Contentful.Delivery.Entries`, as it is meaningless to
other entities.
## Example:
alias Contentful.Delivery.Entries
Entries |> include(2) |> fetch_all
# translates in the background to
"<api_url>/entries?include=2"
"""
@spec include({Entries, list()}, integer()) :: {Entries, list()}
def include(queryable, number \\ 1)
def include({Entries, parameters}, number) do
if number > 10 do
raise(ArgumentError, "Include depth cannot be higher than 10!")
end
{Entries, parameters |> Keyword.put(:include, number)}
end
def include(Entries, number) do
include({Entries, []}, number)
end
def include(queryable, _number) do
queryable
end
@doc """
adds the `limit` parameter to a call, limiting the amount of entities returned.
The caller will still retreive the total amount of entities, if successful.
The limit defaults to `1000`, the maximum `limit` allowed for API calls.
## Examples
alias Contentful.Delivery.Assets
Assets |> limit(2) |> fetch_all
# translates in the background to
"<api_url>/assets?limit=2"
"""
@spec limit({module(), list()}, integer()) :: {module(), list()}
def limit(queryable, number \\ 1000)
def limit({queryable, parameters}, number) do
{queryable, parameters |> Keyword.put(:limit, number)}
end
def limit(queryable, number) do
limit({queryable, []}, number)
end
@doc """
adds the `skip` parameter to API calls, allowing to skip over a number of entities, effectively
allowing the implementation of pagination if desired.
## Examples
alias Contentful.Delivery.Assets
Assets |> skip(10) |> fetch_all
# translates in the background to a call to the API
"<api_url>/assets?skip=10"
"""
@spec skip({module(), list()}, non_neg_integer()) :: {module(), list()}
def skip({queryable, parameters}, number) do
{queryable, parameters |> Keyword.put(:skip, number)}
end
def skip(queryable, number) do
skip({queryable, []}, number)
end
@doc """
adds a `content_type` parameter for filtering sets of `Contentful.Entry`
by a `Contentful.ContentType`, effectively allowing for scoping by content type.
`content_type` will only work with `Contentful.Delivery.Entries` at the moment.
## Examples
alias Contentful.Delivery.Entries
Entries |> content_type("foobar") |> fetch_all
# translates in the background to
"<api_url>/entries?content_type=foobar"
# also works with passing `Contentful.ContentType`:
my_content_type = %Contentful.ContentType{sys: %Contentful.SysData{id: "foobar"}}
Entries |> content_type(my_content_type) |> fetch_all
"""
@spec content_type({Entries, list()}, String.t() | ContentType.t()) :: {Entries, list()}
def content_type({Entries, parameters}, c_type) when is_binary(c_type) do
{Entries, parameters |> Keyword.put(:content_type, c_type)}
end
def content_type({Entries, parameters}, %ContentType{sys: %SysData{id: value}} = _c_type) do
content_type({Entries, parameters}, value)
end
def content_type(Entries, c_type) do
content_type({Entries, []}, c_type)
end
def content_type(queryable, _c_type) do
queryable
end
@doc """
will __resolve__ a query chain by eagerly calling the API and resolving the response immediately
## Examples
alias Contentful.Delivery.Entries
{:ok, entries, total: _total_count_of_entries} =
Entries |> content_type("foobar") |> limit(1) |> fetch_all
"""
@spec fetch_all({module(), list()}, String.t(), String.t(), String.t()) ::
{:ok, list(struct()), total: non_neg_integer()}
| {:error, :rate_limit_exceeded, wait_for: integer()}
| {:error, atom(), original_message: String.t()}
def fetch_all(
queryable,
space \\ Configuration.get(:space_id),
env \\ Configuration.get(:environment),
api_key \\ Configuration.get(:access_token)
)
def fetch_all({Spaces, _}, _, _, _) do
{:error, [message: "Fetching a spaces collection is not supported, use fetch_one/1 instead"],
total: 0}
end
def fetch_all(queryable, %Space{sys: %SysData{id: space}}, env, api_key) do
fetch_all(queryable, space, env, api_key)
end
def fetch_all(
{queryable, parameters},
space,
env,
api_key
) do
params = parameters |> Request.collection_query_params()
url =
[
space |> Delivery.url(env),
queryable.endpoint()
]
|> Enum.join()
|> URI.parse()
|> add_query_params(params)
|> to_string()
{url, api_key |> Request.headers()}
|> Delivery.send_request()
|> Delivery.parse_response(&queryable.resolve_collection_response/1)
end
def fetch_all(queryable, space, env, api_key) do
fetch_all({queryable, []}, space, env, api_key)
end
@doc """
will __resolve__ a query chain by eagerly calling the API asking for _one_ entity
## Examples
import Contentful.Query
alias Contentful.Delivery.{Spaces, Entries}
# Note: Spaces is the only Queryable that can be fetched without an id
Spaces |> fetch_one
# all others would throw an error, so an id has to be passed:
Entries |> fetch_one("my_entry_id")
"""
@spec fetch_one(module(), String.t() | nil, String.t(), String.t(), String.t()) ::
{:ok, struct()}
| {:error, :rate_limit_exceeded, wait_for: integer()}
| {:error, atom(), original_message: String.t()}
@deprecated """
Use Contentful.Query.by/2 and Contentful.Query.fetch_all/4 with an id instead
"""
def fetch_one(
queryable,
id \\ nil,
space \\ Configuration.get(:space_id),
env \\ Configuration.get(:environment),
api_key \\ Configuration.get(:access_token)
)
def fetch_one(queryable, id, %Space{sys: %SysData{id: space_id}}, env, api_key) do
fetch_one(queryable, id, space_id, env, api_key)
end
def fetch_one(
queryable,
id,
space,
env,
api_key
) do
url =
case {queryable, id} do
{Spaces, nil} ->
[space |> Delivery.url()]
{Spaces, id} ->
[id |> Delivery.url()]
{_queryable, nil} ->
raise ArgumentError, "id is missing!"
{{module, _parameters}, id} ->
# drops the parameters, as single query responses don't allow parameters
[space |> Delivery.url(env), module.endpoint(), "/#{id}"]
_ ->
[space |> Delivery.url(env), queryable.endpoint(), "/#{id}"]
end
# since you can pass compose into fetch one, we strip extra params here
queryable =
case queryable do
{module, parameters} ->
Logger.warn("Stripping parameters: #{inspect(parameters)}")
module
_ ->
queryable
end
{url, api_key |> Request.headers()}
|> Delivery.send_request()
|> Delivery.parse_response(&queryable.resolve_entity_response/1)
end
@doc """
Adds a filter condition to the query.
This will work for Entries *requiring* a call to `content_type` before:
## Example
import Contentful.Query
alias Contentful.Delivery.Entries
{:ok, entries, total: 1}
= Entries
|> content_type("dogs")
|> by(name: "Hasso", breed: "dalmatian")
|> fetch_all
This will also allow for more complex queries using modifiers:
## Example
import Contentful.Query
alias Contentful.Delivery.Entries
{:ok, entries, total: 100}
= Entries
|> content_type("dogs")
|> by(name: [ne: "Hasso"], breed: "dalmatian")
|> fetch_all
Allowed modifiers are `[:in, :nin, :ne, :lte, :gte, :lt, :gt, :match, :exist]`. See the
[official docs](https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/search-parameters/equality-operator)
for adding search parameters this way.
Working with `Contentful.Delivery.Assets` requires no `content_type` call:
## Example
import Contentful.Query
alias Contentful.Delivery.Assets
{:ok, assets, total: 1} = Assets |> by(id: "foobar") |> fetch_all
Calling `by/2` allows for adding multiple conditions to the query:
## Example
import Contentful.Query
alias Contentful.Delivery.Assets
{:ok, assets, total: 200}
= Assets
|> by(tags: [nin: "maps"])
|> fetch_all
"""
@spec by(tuple(), list()) :: tuple()
@doc since: "0.4.0"
def by({Entries, parameters}, new_select_params) do
select_params = parameters |> Keyword.take([:select_params])
content_type_present? = parameters |> Keyword.take([:content_type]) |> length() > 0
unless content_type_present? do
raise %ArgumentError{
message: """
Filtering for entries requires a content_type, example:
Entries |> content_type("cats") |> by(name: "Gretchen")
"""
}
end
{Entries,
parameters |> Keyword.put(:select_params, select_params |> Keyword.merge(new_select_params))}
end
def by({Assets, parameters}, new_select_params) do
select_params = parameters |> Keyword.take([:select_params])
{Assets,
parameters |> Keyword.put(:select_params, select_params |> Keyword.merge(new_select_params))}
end
def by(Entries, select_params) do
by({Entries, []}, select_params)
end
def by(Assets, select_params) do
by({Assets, []}, select_params)
end
def by(queryable, _select_params) do
queryable
end
@doc """
allows for full text search over all entries fields. The original nomenclature fromthe API docs is `query`.
This has been renamed for clarity here.
## Example
import Contentful.Query
{Entries, [query: "Nyancat"]} = Entries |> search_full_text("Nyancat")
# or, with full `fetch_all`
{:ok, nyan_cats, total: 616} =
Entries
|> search_full_text("Nyancat")
|> fetch_all
"""
@spec search_full_text(tuple(), term()) :: tuple()
@doc since: "0.4.0"
def search_full_text({Entries, parameters}, term) do
{Entries, parameters |> Keyword.put(:query, term)}
end
def search_full_text(Entries, term) do
search_full_text({Entries, []}, term)
end
def search_full_text(queryable, _term) do
queryable
end
@doc """
will __resolve__ a query chain by constructing a `Stream.resource` around a possible API response
allowing for lazy evaluation of queries. Cann be helpful with translating collection calls of
unknown size.
Be careful when using this, as one can run into API rate limits quickly for very large sets.
## Examples
import Contentful.Query
alias Contentful.Delivery.{Assets, Spaces}
# translates into two api calls in the background
Assets |> stream |> Enum.take(2000)
# you can use limit() to set the page size, in the example, stream would call the API
# 10 times total.
Assets |> limit(100) |> Enum.take(1000)
# will not work with Spaces, though, as they is no collection endpoint
"""
@spec stream(tuple(), String.t(), String.t(), String.t()) ::
Enumerable.t()
def stream(
queryable,
space \\ Configuration.get(:space_id),
env \\ Configuration.get(:environment),
api_key \\ Configuration.get(:access_token)
)
def stream(Spaces, _space, _env, _api_key) do
{:error, [message: "Streaming a spaces collection is not supported"], total: 0}
end
def stream(args, space, env, api_key) do
Contentful.Stream.stream(args, space, env, api_key)
end
@doc """
Returns the filter modifiers supported byt the Query syntax
"""
@spec allowed_filter_modifiers() :: list()
@doc since: "0.4.0"
def allowed_filter_modifiers do
@allowed_filter_modifiers
end
defp add_query_params(uri, []) do
uri
end
defp add_query_params(%URI{} = uri, params) do
uri |> Map.put(:query, URI.encode_query(params))
end
end
|
lib/contentful/query.ex
| 0.904354 | 0.873539 |
query.ex
|
starcoder
|
defmodule Datix.DateTime do
@moduledoc """
A `DateTime` parser using `Calendar.strftime` format-string.
"""
@doc """
Parses a datetime string according to the given `format`.
See the `Calendar.strftime` documentation for how to specify a format-string.
The `:ok` tuple contains always an UTC datetime and a tuple with the time zone
infos.
## Options
* `:calendar` - the calendar to build the `Date`, defaults to `Calendar.ISO`
* `:preferred_date` - a string for the preferred format to show dates,
it can't contain the `%x` format and defaults to `"%Y-%m-%d"`
if the option is not received
* `:month_names` - a list of the month names, if the option is not received
it defaults to a list of month names in English
* `:abbreviated_month_names` - a list of abbreviated month names, if the
option is not received it defaults to a list of abbreviated month names in
English
* `:day_of_week_names` - a list of day names, if the option is not received
it defaults to a list of day names in English
* `:abbreviated_day_of_week_names` - a list of abbreviated day names, if the
option is not received it defaults to a list of abbreviated day names in
English
* `:preferred_time` - a string for the preferred format to show times,
it can't contain the `%X` format and defaults to `"%H:%M:%S"`
if the option is not received
* `:am_pm_names` - a keyword list with the names of the period of the day,
defaults to `[am: "am", pm: "pm"]`.
## Examples
```elixir
iex> Datix.DateTime.parse("2021/01/10 12:14:24", "%Y/%m/%d %H:%M:%S")
{:ok, ~U[2021-01-10 12:14:24Z], {"UTC", 0}}
iex> Datix.DateTime.parse("2018/06/27 11:23:55 CEST+0200", "%Y/%m/%d %H:%M:%S %Z%z")
{:ok, ~U[2018-06-27 09:23:55Z], {"CEST", 7_200}}
```
"""
@spec parse(String.t(), String.t(), list()) ::
{:ok, DateTime.t(), {String.t(), integer()}}
| {:error, :invalid_date}
| {:error, :invalid_input}
| {:error, {:parse_error, expected: String.t(), got: String.t()}}
| {:error, {:conflict, [expected: term(), got: term(), modifier: String.t()]}}
| {:error, {:invalid_string, [modifier: String.t()]}}
| {:error, {:invalid_integer, [modifier: String.t()]}}
| {:error, {:invalid_modifier, [modifier: String.t()]}}
def parse(datetime_str, format_str, opts \\ []) do
with {:ok, data} <- Datix.strptime(datetime_str, format_str, opts) do
new(data, opts)
end
end
@doc """
Parses a datetime string according to the given `format`, erroring out for
invalid arguments.
This function is just defined for UTC datetimes.
## Examples
```elixir
iex> Datix.DateTime.parse!("2018/06/27 11:23:55 UTC+0000", "%Y/%m/%d %H:%M:%S %Z%z")
~U[2018-06-27 11:23:55Z]
iex> Datix.DateTime.parse!("2018/06/27 11:23:55 CEST+0200", "%Y/%m/%d %H:%M:%S %Z%z")
** (ArgumentError) parse!/3 is just defined for UTC, not for CEST
```
"""
@spec parse!(String.t(), String.t(), list()) :: DateTime.t()
def parse!(datetime_str, format_str, opts \\ []) do
datetime_str
|> Datix.strptime!(format_str, opts)
|> new(opts)
|> case do
{:ok, datetime, {"UTC", 0}} ->
datetime
{:ok, _datetime, {zone_abbr, _zone_offset}} ->
raise ArgumentError, "parse!/3 is just defined for UTC, not for #{zone_abbr}"
{:error, reason} ->
raise ArgumentError, "cannot build date-time, reason: #{inspect(reason)}"
end
end
@doc false
def new(data, opts) do
with {:ok, date} <- Datix.Date.new(data, opts),
{:ok, time} <- Datix.Time.new(data, opts),
{:ok, datetime} <- DateTime.new(date, time) do
time_zone(datetime, data)
end
end
defp time_zone(datetime, data) do
case {Map.get(data, :zone_abbr), Map.get(data, :zone_offset)} do
{nil, nil} ->
{:ok, datetime, {"UTC", 0}}
{nil, 0} ->
{:ok, datetime, {"UTC", 0}}
{nil, zone_offset} = zone ->
{:ok, DateTime.add(datetime, -1 * zone_offset), zone}
{"UTC", 0} ->
{:ok, datetime, {"UTC", 0}}
{_zone_abbr, zone_offset} = zone ->
{:ok, DateTime.add(datetime, -1 * zone_offset), zone}
end
end
end
|
lib/datix/date_time.ex
| 0.936008 | 0.928539 |
date_time.ex
|
starcoder
|
defmodule ExUnitFixtures.FixtureModule do
@moduledoc """
Sets up a module as an importable module of fixtures.
This module can be used in any module that defines common fixtures to be
shared amongst many tests.
By using `ExUnitFixtures.FixtureModule` a module will become a fixture module.
A fixture module can be used by other test cases, as well as imported into
other fixture modules.
For example:
defmodule MyFixtures do
use ExUnitFixtures.FixtureModule
deffixture database do
%{db: :db}
end
deffixture user(database) do
%{user: user}
end
end
defmodule MyTests do
use ExUnitFixtures
use MyFixtures
use ExUnit.Case, async: true
@fixtures: [:user]
test "that we have a user", %{user: user} do
assert user == :user
end
end
#### Overriding Fixtures
When importing fixtures into a module it's possible to override some of those
fixtures, by calling deffixture with an already used name. The overriding
fixture may depend on the existing fixture, but any other fixture in the
current module or importing modules will only be able to get the overriding
fixture.
defmodule MyFixtures do
use ExUnitFixtures.FixtureModule
deffixture user do
make_user()
end
end
defmodule InactiveUserTests do
deffixture user(user) do
%{user | active: false}
end
@fixtures: :user
test "that user is inactive", %{user: user} do
assert user.active == false
end
end
#### Loading Fixture Code
All the examples in this file have shown a fixture module defined within the
same file as the tests. This is not too likely to happen in an actual project.
It's more likely that you'd want to define a fixture module in one file and
then import it into many other files.
By default ExUnitFixtures makes this fairly easy - any file named
`fixtures.exs` in any folder underneath `test/` will automatically be loaded
into the VM when calling `ExUnitFixtures.start/1`.
Any fixture modules defined within these files will also automatically be
imported into the current module as documented in `ExUnitFixtures.AutoImport`.
If you wish to load in fixtures that are not contained within a `fixtures.exs`
file, then you should load them into the VM with `Code.require_file` in your
`test_helpers.exs` and then manually `use` the fixture module.
"""
defmacro __using__(_opts) do
quote do
Module.register_attribute __MODULE__, :__fixtures, accumulate: true
import ExUnitFixtures
@before_compile ExUnitFixtures.FixtureModule
Module.register_attribute(__MODULE__,
:fixture_modules,
accumulate: true)
ExUnitFixtures.Imp.ModuleStore.register(__MODULE__, __ENV__.file)
if Application.get_env(:ex_unit_fixtures, :auto_import) do
use ExUnitFixtures.AutoImport
end
defmacro __using__(opts) do
ExUnitFixtures.FixtureModule.register_fixtures(__MODULE__, opts)
end
end
end
defmacro __before_compile__(_) do
quote do
@fixtures_ ExUnitFixtures.Imp.Preprocessing.preprocess_fixtures(
@__fixtures, Enum.uniq(@fixture_modules)
)
def fixtures do
@fixtures_
end
end
end
@doc """
Body of the nested `__using__` func in any module that has used
`FixtureModule`.
"""
def register_fixtures(fixture_module, _opts \\ []) do
quote do
Module.register_attribute(__MODULE__,
:fixture_modules,
accumulate: true)
@fixture_modules unquote(fixture_module)
end
end
end
|
lib/ex_unit_fixtures/fixture_module.ex
| 0.888976 | 0.616662 |
fixture_module.ex
|
starcoder
|
defmodule HAP.Accessory do
@moduledoc """
Represents a single accessory object, containing a number of services
"""
defstruct name: "Generic HAP Accessory",
model: "Generic HAP Model",
manufacturer: "Generic HAP Manufacturer",
serial_number: "Generic Serial Number",
firmware_revision: "1.0",
services: []
@typedoc """
Represents an accessory consisting of a number of services. Contains the following
fields:
* `name`: The name to assign to this accessory, for example 'Ceiling Fan'
* `model`: The model name to assign to this accessory, for example 'FanCo Whisper III'
* `manufacturer`: The manufacturer of this accessory, for example 'FanCo'
* `serial_number`: The serial number of this accessory, for example '0012345'
* `firmware_revision`: The firmware revision of this accessory, for example '1.0'
* `services`: A list of services to include in this accessory
"""
@type t :: %__MODULE__{
name: name(),
model: model(),
manufacturer: manufacturer(),
serial_number: serial_number(),
firmware_revision: firmware_revision(),
services: [HAP.Service.t()]
}
@typedoc """
The name to advertise for this accessory, for example 'HAP Light Bulb'
"""
@type name :: String.t()
@typedoc """
The model of this accessory, for example 'HAP Light Bulb Supreme'
"""
@type model :: String.t()
@typedoc """
The manufacturer of this accessory, for example 'HAP Co.'
"""
@type manufacturer :: String.t()
@typedoc """
The serial number of this accessory, for example '0012345'
"""
@type serial_number :: String.t()
@typedoc """
The firmware recvision of this accessory, for example '1.0' or '1.0.1'
"""
@type firmware_revision :: String.t()
@doc false
def compile(%__MODULE__{services: services} = accessory) do
all_services =
[%HAP.Services.AccessoryInformation{accessory: accessory}, %HAP.Services.ProtocolInformation{}] ++
services
%__MODULE__{
services: all_services |> Enum.map(&HAP.Service.compile/1)
}
end
@doc false
def get_service(%__MODULE__{services: services}, iid) do
with {:ok, service_index} <- HAP.IID.service_index(iid),
%HAP.Service{} = service <- Enum.at(services, service_index) do
{:ok, service}
else
_ -> {:error, -70_409}
end
end
end
|
lib/hap/accessory.ex
| 0.856737 | 0.466299 |
accessory.ex
|
starcoder
|
defmodule Day06 do
def part1(input) do
parse(input)
|> Day06Part1.solve
end
def part2(input) do
parse(input)
|> Day06Part2.solve
end
defp parse(input) do
InstructionParser.parse(input)
end
end
defmodule Day06Part1 do
use Bitwise
def solve(input) do
input
|> Enum.reduce(make_grid(), fn instruction, acc ->
case instruction do
{:turn_on, coordinates} ->
update_lights(acc, coordinates, fn row_bits, active_bits ->
row_bits ||| active_bits
end)
{:turn_off, coordinates} ->
update_lights(acc, coordinates, fn row_bits, active_bits ->
row_bits &&& bnot(active_bits)
end)
{:toggle, coordinates} ->
update_lights(acc, coordinates, fn row_bits, active_bits ->
bxor(row_bits, active_bits)
end)
end
end)
|> count_lights
end
defp make_grid() do
%{}
end
defp update_lights(grid, coordinates, update) do
{{upper_row, left_col}, {lower_row, right_col}} = coordinates
bit_range = left_col..right_col
Enum.reduce(upper_row..lower_row, grid, fn row, acc ->
num_bits = bit_range.last - bit_range.first + 1
active_bits = (((1 <<< num_bits) - 1) <<< bit_range.first)
row_bits = Map.get(acc, row, 0)
row_bits = update.(row_bits, active_bits)
Map.put(acc, row, row_bits)
end)
end
defp count_lights(grid) do
Map.values(grid)
|> Enum.reduce(0, fn row, acc ->
acc + count_bits(row)
end)
end
defp count_bits(n), do: count_bits(n, 0)
defp count_bits(0, acc), do: acc
defp count_bits(n, acc) do
count_bits(n &&& (n - 1), acc + 1)
end
end
defmodule Day06Part2 do
def solve(input) do
grid = make_grid()
Enum.each(input, fn instruction ->
case instruction do
{:turn_on, coordinates} ->
update_lights(grid, coordinates, fn value ->
value + 1
end)
{:turn_off, coordinates} ->
update_lights(grid, coordinates, fn value ->
max(value - 1, 0)
end)
{:toggle, coordinates} ->
update_lights(grid, coordinates, fn value ->
value + 2
end)
end
end)
count_lights(grid)
end
defp make_grid() do
:counters.new(1000 * 1000, [])
end
defp update_lights(grid, coordinates, update) do
{{upper_row, left_col}, {lower_row, right_col}} = coordinates
Enum.each(upper_row..lower_row, fn row ->
Enum.each(left_col..right_col, fn col ->
key = row * 1000 + col + 1
:counters.put(grid, key, update.(:counters.get(grid, key)))
end)
end)
end
defp count_lights(grid) do
Enum.reduce(1..1000*1000, 0, fn key, acc ->
acc + :counters.get(grid, key)
end)
end
end
defmodule InstructionParser do
import NimbleParsec
defp pack_coordinate([x, y]) do
{x, y}
end
defp pack_pair([coord1, coord2]) do
{coord1, coord2}
end
defp pack([coordinates], operation) do
{operation, coordinates}
end
blanks = ascii_string([?\s], min: 1)
coordinate = integer(min: 1)
|> ignore(string(","))
|> integer(min: 1)
|> reduce({:pack_coordinate, []})
two_coordinates = coordinate
|> ignore(blanks)
|> ignore(string("through"))
|> ignore(blanks)
|> concat(coordinate)
|> reduce({:pack_pair, []})
turn_on = ignore(string("turn on"))
|> ignore(blanks)
|> concat(two_coordinates)
|> reduce({:pack, [:turn_on]})
turn_off = ignore(string("turn off"))
|> ignore(blanks)
|> concat(two_coordinates)
|> reduce({:pack, [:turn_off]})
toggle = ignore(string("toggle"))
|> ignore(blanks)
|> concat(two_coordinates)
|> reduce({:pack, [:toggle]})
defparsecp :main,
choice([turn_on, turn_off, toggle])
|> eos()
def parse(input) do
Enum.map(input, fn(line) ->
{:ok, [res], _, _, _, _} = main(line)
res
end)
end
end
|
day06/lib/day06.ex
| 0.572006 | 0.719655 |
day06.ex
|
starcoder
|
defmodule Mix.Releases.Profile do
@moduledoc """
Represents the configuration profile for a specific environment and release.
More generally, a release has a profile, as does an environment, and
when determining the configuration for a release in a given environment, the
environment profile overrides the release profile.
"""
defstruct output_dir: nil,
vm_args: nil, # path to a custom vm.args
cookie: nil,
config: nil, # path to a custom config.exs
sys_config: nil, # path to a custom sys.config
code_paths: nil, # list of additional code paths to search
executable: false, # whether it's an executable release
exec_opts: [transient: false], # options for an executable release
erl_opts: nil, # string to be passed to erl
run_erl_env: nil, # string to be passed to run_erl
dev_mode: nil, # boolean
include_erts: nil, # boolean | "path/to/erts"
include_src: nil, # boolean
include_system_libs: nil, # boolean | "path/to/libs"
included_configs: [], # list of path representing additional config files
strip_debug_info: nil, # boolean
plugins: [], # list of module names
overlay_vars: [], # keyword list
overlays: [], # overlay list
overrides: nil, # override list [app: app_path]
commands: nil, # keyword list
pre_configure_hook: nil, # path or nil
post_configure_hook: nil, # path or nil
pre_start_hook: nil, # path or nil
post_start_hook: nil, # path or nil
pre_stop_hook: nil, # path or nil
post_stop_hook: nil, # path or nil
pre_upgrade_hook: nil, # path or nil
post_upgrade_hook: nil, # path or nil
pre_configure_hooks: nil, # path or nil
post_configure_hooks: nil, # path or nil
pre_start_hooks: nil, # path or nil
post_start_hooks: nil, # path or nil
pre_stop_hooks: nil, # path or nil
post_stop_hooks: nil, # path or nil
pre_upgrade_hooks: nil, # path or nil
post_upgrade_hooks: nil # path or nil
@type t :: %__MODULE__{
output_dir: nil | String.t,
vm_args: nil | String.t,
cookie: nil | Atom.t,
config: nil | String.t,
sys_config: nil | String.t,
code_paths: nil | [String.t],
erl_opts: nil | String.t,
run_erl_env: nil | String.t,
dev_mode: nil | boolean,
include_erts: nil | boolean | String.t,
include_src: nil | boolean,
include_system_libs: nil | boolean | String.t,
included_configs: [String.t],
strip_debug_info: nil | boolean,
plugins: [module()],
overlay_vars: nil | Keyword.t,
overlays: Mix.Releases.Overlay.overlay,
overrides: nil | [{atom, String.t}],
commands: nil | [{atom, String.t}],
pre_configure_hook: nil | String.t,
post_configure_hook: nil | String.t,
pre_start_hook: nil | String.t,
post_start_hook: nil | String.t,
pre_stop_hook: nil | String.t,
post_stop_hook: nil | String.t,
pre_upgrade_hook: nil | String.t,
post_upgrade_hook: nil | String.t,
pre_configure_hooks: nil | String.t,
post_configure_hooks: nil | String.t,
pre_start_hooks: nil | String.t,
post_start_hooks: nil | String.t,
pre_stop_hooks: nil | String.t,
post_stop_hooks: nil | String.t,
pre_upgrade_hooks: nil | String.t,
post_upgrade_hooks: nil | String.t
}
end
|
deps/distillery/lib/mix/lib/releases/models/profile.ex
| 0.588771 | 0.405272 |
profile.ex
|
starcoder
|
defmodule LibPE.Section do
@moduledoc false
alias LibPE.Section
defstruct [
:name,
:padding,
:virtual_data,
:virtual_size,
:virtual_address,
:raw_data,
:size_of_raw_data,
:pointer_to_raw_data,
:pointer_to_relocations,
:pointer_to_linenumbers,
:number_of_relocations,
:number_of_linenumbers,
:flags
]
def parse(rest, number, full_image) do
List.duplicate(nil, number)
|> Enum.reduce({[], rest}, fn _, {sections, rest} ->
{section, rest} = parse_section(rest, full_image)
{sections ++ [section], rest}
end)
end
defp parse_section(
<<name::binary-size(8), virtual_size::little-size(32), virtual_address::little-size(32),
size_of_raw_data::little-size(32), pointer_to_raw_data::little-size(32),
pointer_to_relocations::little-size(32), pointer_to_linenumbers::little-size(32),
number_of_relocations::little-size(16), number_of_linenumbers::little-size(16),
flags::little-size(32), rest::binary()>>,
full_image
) do
raw_data = binary_part(full_image, pointer_to_raw_data, size_of_raw_data)
# According to spec there should only be a zero padding difference between raw_data
# and virtual data... BUT in production we can see that Microsoft is using other paddings
# such as 'PADDINGXX' in some cases :-(
virtual_data =
binary_part(full_image, pointer_to_raw_data, min(size_of_raw_data, virtual_size))
|> LibPE.binary_pad_trailing(virtual_size)
padding =
if virtual_size >= size_of_raw_data do
"\0"
else
binary_part(raw_data, virtual_size, min(16, size_of_raw_data - virtual_size))
end
section = %Section{
name: String.trim_trailing(name, "\0"),
padding: padding,
virtual_size: virtual_size,
virtual_address: virtual_address,
raw_data: raw_data,
virtual_data: virtual_data,
size_of_raw_data: size_of_raw_data,
pointer_to_raw_data: pointer_to_raw_data,
pointer_to_relocations: pointer_to_relocations,
pointer_to_linenumbers: pointer_to_linenumbers,
number_of_relocations: number_of_relocations,
number_of_linenumbers: number_of_linenumbers,
flags: LibPE.SectionFlags.decode(flags)
}
{section, rest}
end
def encode(%Section{
name: name,
virtual_size: virtual_size,
virtual_address: virtual_address,
size_of_raw_data: size_of_raw_data,
pointer_to_raw_data: pointer_to_raw_data,
pointer_to_relocations: pointer_to_relocations,
pointer_to_linenumbers: pointer_to_linenumbers,
number_of_relocations: number_of_relocations,
number_of_linenumbers: number_of_linenumbers,
flags: flags
}) do
flags = LibPE.SectionFlags.encode(flags)
name = LibPE.binary_pad_trailing(name, 8)
<<name::binary-size(8), virtual_size::little-size(32), virtual_address::little-size(32),
size_of_raw_data::little-size(32), pointer_to_raw_data::little-size(32),
pointer_to_relocations::little-size(32), pointer_to_linenumbers::little-size(32),
number_of_relocations::little-size(16), number_of_linenumbers::little-size(16),
flags::little-size(32)>>
end
end
|
lib/libpe/section.ex
| 0.544559 | 0.465691 |
section.ex
|
starcoder
|
defmodule Microsoft.Azure.TemplateLanguageExpressions.JSONParser do
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import NimbleParsec
defmodule JSONDocument do
@derive {Inspect, only: [:value]}
defstruct value: nil, space_before: nil, space_after: nil
use Accessible
# defimpl Inspect, for: __MODULE__ do
# def inspect(doc, _opts) do
# "JSONDocument \"#{doc |> JSONParser.encode()}\""
# end
# end
end
defmodule JSONArray do
@derive {Inspect, except: [:space]}
defstruct value: nil, space: nil
use Accessible
end
defmodule JSONObject do
@derive {Inspect, only: [:value]}
defstruct value: nil, space: nil
use Accessible
end
defmodule JSONFloatObject do
@derive {Inspect, only: [:value]}
defstruct value: nil, string: nil
use Accessible
end
defmodule JSONArrayElement do
@derive {Inspect, only: [:value]}
defstruct value: nil, space_before: nil, space_after: nil
use Accessible
end
defmodule JSONObjectElement do
@derive {Inspect, only: [:key, :value]}
defstruct key: nil,
value: nil,
space_before_key: nil,
space_after_key: nil,
space_before_value: nil,
space_after_value: nil
use Accessible
# defimpl Inspect, for: __MODULE__ do
# def inspect(e, _opts) do
# "\"#{e.key}\": #{inspect e.value}"
# end
# end
end
t_sign =
optional(
choice([
string("+"),
string("-")
])
)
defp reduce_to_integer(acc),
do:
acc
|> Enum.join()
|> String.to_integer(10)
t_integer =
optional(t_sign)
|> ascii_string([?0..?9], min: 1)
|> lookahead_not(
choice([
string("e"),
string("E"),
string(".")
])
)
|> reduce(:reduce_to_integer)
defp reduce_to_float(acc) do
with float_as_string <- acc |> Enum.join(),
{float_value, ""} <- float_as_string |> Float.parse() do
%JSONFloatObject{value: float_value, string: float_as_string}
end
end
t_number =
optional(t_sign)
|> ascii_string([?0..?9], min: 1)
|> optional(
string(".")
|> ascii_string([?0..?9], min: 1)
)
|> optional(
choice([
string("e"),
string("E")
])
|> optional(t_sign)
|> ascii_string([?0..?9], min: 1)
)
|> reduce(:reduce_to_float)
t_boolean =
choice([
string("true") |> replace(true),
string("false") |> replace(false)
])
t_null =
string("null")
|> replace(nil)
# " \u0009 \u000d\u000a " |> whitespace()
t_whitespace =
ascii_char([0x20, 0x0D, 0x0A, 0x09])
|> times(min: 1)
|> reduce({List, :to_string, []})
# t_newline =
# ascii_char([0x0D, 0x0A]) |> times(min: 1)
# t_online_comment =
# string("#")
defp not_single_line_end(<<?\r, ?\n, _::binary>>, context, _, _), do: {:halt, context}
defp not_single_line_end(<<?\r, _::binary>>, context, _, _), do: {:halt, context}
defp not_single_line_end(<<?\n, _::binary>>, context, _, _), do: {:halt, context}
defp not_single_line_end(_, context, _, _), do: {:cont, context}
t_comment_single_line =
string("//")
|> repeat_while(
utf8_char([]),
{:not_single_line_end, []}
)
|> choice([
string("\r\n"),
string("\r"),
string("\n")
])
|> reduce({List, :to_string, []})
defp not_multi_line_end(<<?*, ?/, _::binary>>, context, _, _), do: {:halt, context}
defp not_multi_line_end(_, context, _, _), do: {:cont, context}
t_comment_multi_line =
string("/*")
|> repeat_while(
utf8_char([]),
{:not_multi_line_end, []}
)
|> string("*/")
|> reduce({List, :to_string, []})
defp reduce_to_whitespace_or_comment([{:wsoc, []}]), do: {:wsoc, nil}
defp reduce_to_whitespace_or_comment([{:wsoc, wsoc}]), do: {:wsoc, wsoc |> List.to_string()}
defp reduce_to_whitespace_or_comment(wsoc) when is_binary(wsoc), do: {:wsoc, wsoc}
t_whitespace_or_comment =
repeat(
choice([
t_whitespace,
t_comment_single_line,
t_comment_multi_line
])
)
|> tag(:wsoc)
|> reduce(:reduce_to_whitespace_or_comment)
defp repeat_while_not_quote(<<?", _::binary>>, context, _, _), do: {:halt, context}
defp repeat_while_not_quote(_, context, _, _), do: {:cont, context}
# https://hexdocs.pm/nimble_parsec/NimbleParsec.html#utf8_string/3
# regular: '0020' . '10ffff' - '"' - '\'
# escapes: \" \\ \/ \b \f \n \r \t \uFFFF
t_string =
ignore(string(~S/"/))
|> repeat_while(
choice([
replace(string(~S/\"/), ~S/"/),
utf8_char([])
]),
{:repeat_while_not_quote, []}
)
|> ignore(string(~S/"/))
|> reduce({List, :to_string, []})
defp reduce_to_element(acc) do
case acc do
[value] ->
%JSONArrayElement{value: value}
[value, {:wsoc, wsoc2}] ->
%JSONArrayElement{value: value, space_after: wsoc2}
[{:wsoc, wsoc1}, value] ->
%JSONArrayElement{value: value, space_before: wsoc1}
[{:wsoc, wsoc1}, value, {:wsoc, wsoc2}] ->
%JSONArrayElement{value: value, space_before: wsoc1, space_after: wsoc2}
end
end
t_element =
optional(t_whitespace_or_comment)
|> concat(parsec(:t_value))
|> optional(t_whitespace_or_comment)
|> reduce(:reduce_to_element)
defp reduce_to_array([{:array_empty, [{:wsoc, ws}]}]),
do: %JSONArray{value: [], space: ws}
defp reduce_to_array([{:array, array}]), do: %JSONArray{value: array}
t_array =
ignore(string("["))
|> optional(t_element)
|> repeat(
ignore(string(","))
|> concat(t_element)
)
|> ignore(string("]"))
|> tag(:array)
|> reduce(:reduce_to_array)
t_array_empty =
ignore(string("["))
|> optional(t_whitespace_or_comment)
|> ignore(string("]"))
|> tag(:array_empty)
|> reduce(:reduce_to_array)
defp ws(nil), do: ""
defp ws(whitespace) when is_binary(whitespace), do: whitespace
defp reduce_to_member(acc) do
case acc do
[key, {:colon, ":"}, value] ->
%JSONObjectElement{key: key, value: value}
[key, {:colon, ":"}, value, {:wsoc, wsoc4}] ->
%JSONObjectElement{key: key, value: value, space_after_value: wsoc4}
[key, {:colon, ":"}, {:wsoc, wsoc3}, value] ->
%JSONObjectElement{key: key, value: value, space_before_value: wsoc3}
[key, {:colon, ":"}, {:wsoc, wsoc3}, value, {:wsoc, wsoc4}] ->
%JSONObjectElement{
key: key,
value: value,
space_before_value: wsoc3,
space_after_value: wsoc4
}
[key, {:wsoc, wsoc2}, {:colon, ":"}, value] ->
%JSONObjectElement{key: key, value: value, space_after_key: wsoc2}
[key, {:wsoc, wsoc2}, {:colon, ":"}, value, {:wsoc, wsoc4}] ->
%JSONObjectElement{
key: key,
value: value,
space_after_key: wsoc2,
space_after_value: wsoc4
}
[key, {:wsoc, wsoc2}, {:colon, ":"}, {:wsoc, wsoc3}, value] ->
%JSONObjectElement{
key: key,
value: value,
space_after_key: wsoc2,
space_before_value: wsoc3
}
[key, {:wsoc, wsoc2}, {:colon, ":"}, {:wsoc, wsoc3}, value, {:wsoc, wsoc4}] ->
%JSONObjectElement{
key: key,
value: value,
space_after_key: wsoc2,
space_before_value: wsoc3,
space_after_value: wsoc4
}
[{:wsoc, wsoc1}, key, {:colon, ":"}, value] ->
%JSONObjectElement{key: key, value: value, space_before_key: wsoc1}
[{:wsoc, wsoc1}, key, {:colon, ":"}, value, {:wsoc, wsoc4}] ->
%JSONObjectElement{
key: key,
value: value,
space_before_key: wsoc1,
space_after_value: wsoc4
}
[{:wsoc, wsoc1}, key, {:colon, ":"}, {:wsoc, wsoc3}, value] ->
%JSONObjectElement{
key: key,
value: value,
space_before_key: wsoc1,
space_before_value: wsoc3
}
[{:wsoc, wsoc1}, key, {:colon, ":"}, {:wsoc, wsoc3}, value, {:wsoc, wsoc4}] ->
%JSONObjectElement{
key: key,
value: value,
space_before_key: wsoc1,
space_before_value: wsoc3,
space_after_value: wsoc4
}
[{:wsoc, wsoc1}, key, {:wsoc, wsoc2}, {:colon, ":"}, value] ->
%JSONObjectElement{
key: key,
value: value,
space_before_key: wsoc1,
space_after_key: wsoc2
}
[{:wsoc, wsoc1}, key, {:wsoc, wsoc2}, {:colon, ":"}, value, {:wsoc, wsoc4}] ->
%JSONObjectElement{
key: key,
value: value,
space_before_key: wsoc1,
space_after_key: wsoc2,
space_after_value: wsoc4
}
[{:wsoc, wsoc1}, key, {:wsoc, wsoc2}, {:colon, ":"}, {:wsoc, wsoc3}, value] ->
%JSONObjectElement{
key: key,
value: value,
space_before_key: wsoc1,
space_after_key: wsoc2,
space_before_value: wsoc3
}
[
{:wsoc, wsoc1},
key,
{:wsoc, wsoc2},
{:colon, ":"},
{:wsoc, wsoc3},
value,
{:wsoc, wsoc4}
] ->
%JSONObjectElement{
key: key,
value: value,
space_before_key: wsoc1,
space_after_key: wsoc2,
space_before_value: wsoc3,
space_after_value: wsoc4
}
end
end
# whitespace #1
t_member =
optional(t_whitespace_or_comment)
# key
|> concat(t_string)
# whitespace #2
|> optional(t_whitespace_or_comment)
# :
|> concat(string(":") |> unwrap_and_tag(:colon))
# whitespace #3
|> optional(t_whitespace_or_comment)
# value
|> concat(parsec(:t_value))
# whitespace #4
|> optional(t_whitespace_or_comment)
|> reduce(:reduce_to_member)
defp reduce_to_object([{:object_empty, [{:wsoc, ws}]}]),
do: %JSONObject{value: [], space: ws}
defp reduce_to_object([{:object, array}]), do: %JSONObject{value: array}
t_object =
ignore(string("{"))
|> optional(t_member)
|> repeat(
ignore(ascii_char([?,]))
|> concat(t_member)
)
|> ignore(string("}"))
|> tag(:object)
|> reduce(:reduce_to_object)
t_object_empty =
ignore(string("{"))
|> optional(t_whitespace_or_comment)
|> ignore(string("}"))
|> tag(:object_empty)
|> reduce(:reduce_to_object)
t_value =
choice([
t_null,
t_string,
t_boolean,
t_integer,
t_number,
t_array,
t_array_empty,
t_object,
t_object_empty
])
defp reduce_surrounded_value(v) do
case v do
[{:json, [v]}] ->
%JSONDocument{value: v}
[{:json, [{:wsoc, ws1}, v]}] ->
%JSONDocument{value: v, space_before: ws1}
[{:json, [v, {:wsoc, ws2}]}] ->
%JSONDocument{value: v, space_after: ws2}
[{:json, [{:wsoc, ws1}, v, {:wsoc, ws2}]}] ->
%JSONDocument{value: v, space_before: ws1, space_after: ws2}
end
end
t_surrounded_value =
optional(t_whitespace_or_comment)
|> concat(t_value)
|> optional(t_whitespace_or_comment)
|> tag(:json)
|> reduce(:reduce_surrounded_value)
defparsecp(:t_value, t_value)
defparsecp(:t_surrounded_value, t_surrounded_value)
def parse(v) do
case v |> t_surrounded_value() do
{:ok, [result], "", _, _, _} -> result
{:ok, _, unparsed, _, _, _} -> {:error, unparsed}
{:error, _, unparsed, _, _, _} -> {:error, unparsed}
end
end
defp map_join(values, mapper) when is_list(values) and is_function(mapper) do
values
|> Enum.map(mapper)
|> Enum.join(",")
end
defp surrounded(v, :object), do: "{#{v}}"
defp surrounded(v, :array), do: "[#{v}]"
defp surrounded(v, s1, s2), do: "#{ws(s1)}#{v}#{ws(s2)}"
defp poison_encode(value) do
case value |> Poison.encode() do
{:ok, v} -> v
error -> error |> IO.inspect(label: :problem)
end
end
def encode(v) do
case v do
%JSONDocument{value: value, space_before: wsoc1, space_after: wsoc2} ->
value |> encode() |> surrounded(wsoc1, wsoc2)
%JSONArray{value: [], space: ws} ->
ws |> surrounded(:array)
%JSONArray{value: values, space: nil} ->
values |> map_join(&encode/1) |> surrounded(:array)
%JSONArrayElement{value: element_value, space_before: wsoc1, space_after: wsoc2} ->
element_value |> encode() |> surrounded(wsoc1, wsoc2)
%JSONObject{value: [], space: ws} ->
ws |> surrounded(:object)
%JSONObject{value: values, space: nil} ->
values |> map_join(&encode/1) |> surrounded(:object)
%JSONObjectElement{
key: key,
value: value,
space_before_key: wsoc1,
space_after_key: wsoc2,
space_before_value: wsoc3,
space_after_value: wsoc4
} ->
"#{ws(wsoc1)}\"#{key}\"#{ws(wsoc2)}:#{ws(wsoc3)}#{value |> encode()}#{ws(wsoc4)}"
%JSONFloatObject{value: _value, string: float_as_string} ->
float_as_string
value when is_list(value) ->
value |> map_join(&encode/1) |> surrounded(:array)
{:error, error_message} when is_binary(error_message) ->
"\"Evaluation error: #{error_message}\""
value ->
value |> poison_encode()
end
end
def to_elixir(v) do
case v do
%JSONDocument{value: value} -> value |> to_elixir()
%JSONArray{value: values} -> values |> to_elixir()
%JSONArrayElement{value: value} -> value |> to_elixir()
%JSONObject{value: values} -> values |> Enum.map(&to_elixir/1) |> Enum.into(%{})
%JSONObjectElement{key: key, value: value} -> {key, value |> to_elixir()}
%JSONFloatObject{value: float_value} -> float_value
array when is_list(array) -> array |> Enum.map(&to_elixir/1) |> Enum.into([])
value -> value
end
end
def get(x, path, alternative \\ nil)
def get(nil, _, alternative), do: alternative
def get(x, [], _), do: x
def get(x, [name | tail], alternative) when x != nil,
do:
x
|> get_in([:value, :value])
|> Enum.find(&(&1 |> Map.get(:key) == name))
|> get(tail, alternative)
def fetch({:property, name}, value = %JSONObject{}) when is_binary(name),
do:
value
|> Map.get(:value)
|> Enum.find(&(&1 |> Map.get(:key) == name))
|> Map.get(:value)
def fetch({:property, name}, value)
when is_binary(name) and is_map(value),
do: value |> Map.fetch!(name)
def fetch({:indexer, name}, array = %JSONArray{}) when is_binary(name),
do:
array
|> Map.get(:value)
|> Enum.find(&(&1 |> Map.get(:key) == name))
|> Map.get(:value)
def fetch({:indexer, name}, value = %{}) when is_binary(name),
do: value |> Map.fetch!(name)
def fetch({:indexer, index}, array = %JSONArray{}) when is_integer(index),
do:
array
|> Map.get(:value)
|> Enum.at(index)
|> Map.get(:value)
def fetch({:indexer, index}, value)
when is_integer(index) and is_list(value),
do: value |> Enum.at(index)
end
|
lib/json_parser.ex
| 0.580709 | 0.454412 |
json_parser.ex
|
starcoder
|
defmodule ESpec.SuiteRunner do
@moduledoc """
Defines functions for running specs in modules.
"""
alias ESpec.Configuration
alias ESpec.Example
alias ESpec.ExampleRunner
@doc """
Runs `before_all` hook, examples and then `after_all` hook.
"""
def run(module, opts, shuffle \\ true) do
run_before_all(module)
examples = run_module_examples(module, opts, shuffle)
run_after_all(module)
examples
end
defp run_module_examples(module, opts, shuffle) do
examples_to_run = filter(module.examples, opts)
if shuffle do
run_examples(Enum.shuffle(examples_to_run))
else
run_examples(examples_to_run)
end
end
@doc "Runs examples."
def run_examples(examples, sync \\ Configuration.get(:sync)) do
if sync do
run_sync(examples)
else
{async, sync} = partition_async(examples)
run_async(async) ++ run_sync(sync)
end
end
@doc false
def partition_async(examples) do
Enum.split_with(examples, &(Example.extract_option(&1, :async) === true))
end
defp run_before_all(module) do
if Enum.member?(module.__info__(:functions), {:before_all_function, 0}) do
module.before_all_function()
end
end
defp run_after_all(module) do
if Enum.member?(module.__info__(:functions), {:after_all_function, 0}) do
module.after_all_function()
end
end
defp run_async(examples) do
examples
|> Task.async_stream(&ExampleRunner.run/1, timeout: :infinity)
|> Stream.map(fn task_result ->
case task_result do
{:ok, example_result} -> example_result
{:exit, reason} -> raise "Asynchronous test run exited with reason: #{inspect(reason)}"
end
end)
|> Enum.to_list()
end
defp run_sync(examples), do: Enum.map(examples, &ExampleRunner.run(&1))
@doc false
def filter(examples, opts) do
file_opts = opts[:file_opts] || []
examples = filter_shared(examples)
examples = if Enum.any?(file_opts), do: file_opts_filter(examples, file_opts), else: examples
examples = if opts[:focus], do: filter_focus(examples), else: examples
examples = if opts[:only], do: filter_only(examples, opts[:only]), else: examples
examples = if opts[:exclude], do: filter_only(examples, opts[:exclude], true), else: examples
examples = if opts[:string], do: filter_string(examples, opts[:string]), else: examples
examples
end
defp filter_shared(examples), do: Enum.filter(examples, &(!&1.shared))
defp file_opts_filter(examples, file_opts) do
grouped_by_file = Enum.group_by(examples, fn example -> example.file end)
filtered =
Enum.reduce(grouped_by_file, [], fn {file, exs}, acc ->
opts = opts_for_file(file, file_opts)
line = Keyword.get(opts, :line)
if line, do: examples_for_line(exs, line, acc), else: acc ++ exs
end)
filtered
end
defp examples_for_line(exs, line, acc) do
block_filtered = filtered_examples_within_block(exs, line)
if Enum.empty?(block_filtered) do
closest = get_closest(Enum.map(exs, & &1.line), line)
acc ++ Enum.filter(exs, &(&1.line == closest))
else
acc ++ block_filtered
end
end
defp filtered_examples_within_block(examples, line) do
Enum.filter(examples, fn ex ->
ex
|> Example.extract_contexts()
|> Enum.map(fn c -> c.line end)
|> Enum.member?(line)
end)
end
defp get_closest(arr, value) do
arr = Enum.sort(arr)
line =
arr
|> Enum.reverse()
|> Enum.find(fn l -> l <= value end)
if line, do: line, else: hd(arr)
end
defp opts_for_file(file, opts_list) do
case opts_list |> Enum.find(fn {k, _} -> k == file end) do
{_file, opts} -> opts
nil -> []
end
end
defp filter_focus(examples) do
Enum.filter(examples, fn example ->
contexts = Example.extract_contexts(example)
example.opts[:focus] || Enum.any?(contexts, & &1.opts[:focus])
end)
end
defp filter_string(examples, string) do
Enum.filter(examples, fn example ->
description = Enum.join([example.description | Example.context_descriptions(example)])
String.contains?(description, string)
end)
end
defp filter_only(examples, only, reverse \\ false) do
[key, value] = extract_opts(only)
Enum.filter(examples, fn example ->
tag_values = filter_tag_value(example, key)
if reverse do
if Enum.empty?(tag_values), do: true, else: !any_with_tag?(tag_values, value)
else
any_with_tag?(tag_values, value)
end
end)
end
defp filter_tag_value(example, key) do
contexts = Example.extract_contexts(example)
key = String.to_atom(key)
example_tag_value = example.opts[key]
context_tag_values = Enum.map(contexts, & &1.opts[key])
Enum.filter([example_tag_value | context_tag_values], & &1)
end
defp any_with_tag?(tag_values, value) do
Enum.any?(tag_values, &any_condition(&1, value))
end
defp any_condition(tag, value) do
cond do
is_atom(tag) ->
if value, do: Atom.to_string(tag) == value, else: tag
is_integer(tag) ->
if value, do: Integer.to_string(tag) == value, else: tag
true ->
if value, do: tag == value, else: tag
end
end
defp extract_opts(key_value) do
if String.match?(key_value, ~r/:/) do
String.split(key_value, ":")
else
[key_value, false]
end
end
end
|
lib/espec/suite_runner.ex
| 0.613815 | 0.505066 |
suite_runner.ex
|
starcoder
|
defmodule NxMath do
import Nx.Defn
@moduledoc """
Documentation for `NxMath`.
"""
@log2 Nx.log(2)
defnp factorial(x) do
{factorial, _} =
while {factorial = 1, x}, Nx.greater(x, 1) do
{factorial * x, x - 1}
end
factorial
end
defnp broadcast(s, t, {type, bit}) do
Nx.broadcast(Nx.tensor(s, type: {type, bit}), Nx.shape(t))
end
defnp c(n) do
if n == 1 do
1 - Nx.log(2)
else
-Nx.power(Nx.log(2), n)
end
end
defnp(f(x, i), do: c(i) / factorial(i) * Nx.power(x, i))
@doc """
Calculates the exponential of each element in the tensor.
This argorithm is based on
["Fast Exponential Computation on SIMD Architecture" by <NAME>.,
in the proceedings *HiPEAC 2015: 1st Workshop On Approximate Computing (WAPCO),*
Amsterdam, NL, Jan, 2015,
DOI:10.13140/2.1.4362.3207.](https://www.researchgate.net/publication/272178514_Fast_Exponential_Computation_on_SIMD_Architectures)
## Examples
iex> NxMath.exp16(0)
#Nx.Tensor<
f16
1.0
>
"""
defn exp16(t0) do
t0 =
rewrite_types(
t0,
max_float_type: {:f, 16},
max_signed_type: {:f, 16},
max_unsigned_type: {:f, 16}
)
greater_equal_12 = Nx.greater_equal(t0, 12)
less_12 = Nx.less(t0, 12)
t = (t0 / @log2) |> Nx.as_type({:f, 16})
xf =
(t - Nx.floor(t))
|> Nx.as_type({:f, 16})
{kxf, _, _} =
while {kxf = broadcast(0, t, {:f, 16}), n = 1, xf}, Nx.less(n, 4) do
{
(kxf + f(xf, n)) |> Nx.as_type({:f, 16}),
n + 1,
xf
}
end
is_zero = Nx.equal(t, 0)
isnt_zero = Nx.not_equal(t, 0)
value_zero = is_zero * broadcast(1, t, {:f, 16})
value =
Nx.round(1024 * (t - kxf + 15) * less_12)
|> Nx.as_type({:u, 16})
is_inf = Nx.logical_or(Nx.greater(value, 0x7BFF), greater_equal_12)
isnt_inf = Nx.logical_or(Nx.greater(value, 0x7BFF), greater_equal_12) |> Nx.logical_not()
value_not_zero =
(Nx.logical_and(isnt_inf, isnt_zero) * value)
|> Nx.as_type({:u, 16})
|> Nx.bitcast({:f, 16})
value_not_inf =
(value_zero + value_not_zero)
|> Nx.bitcast({:u, 16})
(value_not_inf + is_inf * 0x7C00)
|> Nx.as_type({:u, 16})
|> Nx.bitcast({:f, 16})
end
end
|
lib/nx_math.ex
| 0.844714 | 0.617022 |
nx_math.ex
|
starcoder
|
defmodule Membrane.Element.Action do
@moduledoc """
This module contains type specifications of actions that can be returned
from element callbacks.
Returning actions is a way of element interaction with
other elements and parts of framework. Each action may be returned by any
callback (except for `c:Membrane.Element.Base.handle_init/1`
and `c:Membrane.Element.Base.handle_shutdown/2`, as they
do not support returning any actions) unless explicitly stated otherwise.
"""
alias Membrane.{Buffer, Caps, Clock, Event, Notification}
alias Membrane.Pad
@typedoc """
Sends a message to the parent.
"""
@type notify_t :: {:notify, Notification.t()}
@typedoc """
Sends an event through a pad (input or output).
Forbidden when playback state is stopped.
"""
@type event_t :: {:event, {Pad.ref_t(), Event.t()}}
@typedoc """
Allows to split callback execution into multiple applications of another callback
(called from now sub-callback).
Executions are synchronous in the element process, and each of them passes
subsequent arguments from the args_list, along with the element state (passed
as the last argument each time).
Return value of each execution of sub-callback can be any valid return value
of the original callback (this also means sub-callback can return any action
valid for the original callback, unless expliciltly stated). Returned actions
are executed immediately (they are NOT accumulated and executed after all
sub-callback executions are finished).
Useful when a long action is to be undertaken, and partial results need to
be returned before entire process finishes (e.g. default implementation of
`c:Membrane.Filter.handle_process_list/4` uses split action to invoke
`c:Membrane.Filter.handle_process/4` with each buffer)
"""
@type split_t :: {:split, {callback_name :: atom, args_list :: [[any]]}}
@typedoc """
Sends caps through a pad.
The pad must have output direction. Sent caps must fit constraints on the pad.
Forbidden when playback state is stopped.
"""
@type caps_t :: {:caps, {Pad.ref_t(), Caps.t()}}
@typedoc """
Sends buffers through a pad.
The pad must have output direction.
Allowed only when playback state is playing.
"""
@type buffer_t :: {:buffer, {Pad.ref_t(), Buffer.t() | [Buffer.t()]}}
@typedoc """
Makes a demand on a pad.
The pad must have input direction and work in pull mode. This action does NOT
entail _sending_ demand through the pad, but just _requesting_ some amount
of data from pad's internal queue, which _sends_ demands automatically when it
runs out of data.
If there is any data available at the pad, the data is passed to
`c:Membrane.Filter.handle_process_list/4`, `c:Membrane.Endpoint.handle_write_list/4`
or `c:Membrane.Sink.handle_write_list/4` callback. Invoked callback is
guaranteed not to receive more data than demanded.
Demand size can be either a non-negative integer, that overrides existing demand,
or a function that is passed current demand, and is to return the new demand.
Allowed only when playback state is playing.
"""
@type demand_t :: {:demand, {Pad.ref_t(), demand_size_t}}
@type demand_size_t :: pos_integer | (pos_integer() -> non_neg_integer())
@typedoc """
Executes `c:Membrane.Element.WithOutputPads.handle_demand/5` callback
for the given pad if its demand is greater than 0.
The pad must have output direction and work in pull mode.
## Redemand in Sources and Endpoints
In case of Sources and Endpoints, `:redemand` is just a helper that simplifies element's code.
The element doesn't need to generate the whole demand synchronously at `handle_demand`
or store current demand size in its state, but it can just generate one buffer
and return `:redemand` action.
If there is still one or more buffers to produce, returning `:redemand` triggers
the next invocation of `handle_demand`. In such case, the element is to produce
next buffer and call `:redemand` again.
If there are no more buffers demanded, `handle_demand` is not invoked and
the loop ends.
One more advantage of the approach with `:redemand` action is that produced buffers
are sent one after another in separate messages and this can possibly improve
the latency.
## Redemand in Filters
Redemand in Filters is useful in a situation where not the entire demand of
output pad has been satisfied and there is a need to send a demand for additional
buffers through the input pad.
A typical example of this situation is a parser that has not demanded enough
bytes to parse the whole frame.
## Usage limitations
Allowed only when playback state is playing.
"""
@type redemand_t :: {:redemand, Pad.ref_t()}
@typedoc """
Sends buffers/caps/event to all output pads of element (or to input pads when
event occurs on the output pad).
Used by default implementations of
`c:Membrane.Element.WithInputPads.handle_caps/4` and
`c:Membrane.Element.Base.handle_event/4` callbacks in filter.
Allowed only when _all_ below conditions are met:
- element is filter,
- callback is `c:Membrane.Filter.handle_process_list/4`,
`c:Membrane.Element.WithInputPads.handle_caps/4`
or `c:Membrane.Element.Base.handle_event/4`,
- playback state is valid for sending buffer, caps or event action
respectively.
Keep in mind that `c:Membrane.Filter.handle_process_list/4` can only
forward buffers, `c:Membrane.Element.WithInputPads.handle_caps/4` - caps
and `c:Membrane.Element.Base.handle_event/4` - events.
"""
@type forward_t :: {:forward, Buffer.t() | [Buffer.t()] | Caps.t() | Event.t() | :end_of_stream}
@typedoc """
Suspends/resumes change of playback state.
- `playback_change: :suspend` may be returned only from
`c:Membrane.Element.Base.handle_stopped_to_prepared/2`,
`c:Membrane.Element.Base.handle_playing_to_prepared/2`,
`c:Membrane.Element.Base.handle_prepared_to_playing/2` and
`c:Membrane.Element.Base.handle_prepared_to_stopped/2` callbacks,
and defers playback state change until `playback_change: :resume` is returned.
- `playback_change: :resume` may be returned from any callback, only when
playback state change is suspended, and causes it to finish.
There is no straight limit how long playback change can take, but keep in mind
that it may affect application quality if not done quick enough.
"""
@type playback_change_t :: {:playback_change, :suspend | :resume}
@typedoc """
Starts a timer that will invoke `c:Membrane.Element.Base.handle_tick/3` callback
every `interval` according to the given `clock`.
The timer's `id` is passed to the `c:Membrane.Element.Base.handle_tick/3`
callback and can be used for changing its interval via `t:timer_interval_t/0`
or stopping it via `t:stop_timer_t/0`.
If `interval` is set to `:no_interval`, the timer won't issue any ticks until
the interval is set with `t:timer_interval_t/0` action.
If no `clock` is passed, parent's clock is chosen.
Timers use `Process.send_after/3` under the hood.
"""
@type start_timer_t ::
{:start_timer,
{timer_id :: any, interval :: Ratio.t() | non_neg_integer | :no_interval}
| {timer_id :: any, interval :: Ratio.t() | non_neg_integer | :no_interval,
clock :: Clock.t()}}
@typedoc """
Changes interval of a timer started with `t:start_timer_t/0`.
Permitted only from `c:Membrane.Element.Base.handle_tick/3`, unless the interval
was previously set to `:no_interval`.
If the `interval` is `:no_interval`, the timer won't issue any ticks until
another `t:timer_interval_t/0` action. Otherwise, the timer will issue ticks every
new `interval`. The next tick after interval change is scheduled at
`new_interval + previous_time`, where previous_time is the time of the latest
tick or the time of returning `t:start_timer_t/0` action if no tick has been
sent yet. Note that if `current_time - previous_time > new_interval`, a burst
of `div(current_time - previous_time, new_interval)` ticks is issued immediately.
"""
@type timer_interval_t ::
{:timer_interval,
{timer_id :: any, interval :: Ratio.t() | non_neg_integer | :no_interval}}
@typedoc """
Stops a timer started with `t:start_timer_t/0` action.
This action is atomic: stopping timer guarantees that no ticks will arrive from it.
"""
@type stop_timer_t :: {:stop_timer, timer_id :: any}
@typedoc """
This action sets the latency for the element.
This action is not premitted in callback `c:Membrane.Element.Base.handle_init/1`.
"""
@type latency_t :: {:latency, latency :: non_neg_integer}
@typedoc """
Marks that processing via a pad (output) has been finished and the pad instance
won't be used anymore.
Triggers `end_of_stream/3` callback at the receiver element.
Allowed only when playback is in playing state.
"""
@type end_of_stream_t :: {:end_of_stream, Pad.ref_t()}
@typedoc """
Type that defines a single action that may be returned from element callbacks.
Depending on element type, callback, current playback state and other
circumstances there may be different actions available.
"""
@type t ::
event_t
| notify_t
| split_t
| caps_t
| buffer_t
| demand_t
| redemand_t
| forward_t
| playback_change_t
| start_timer_t
| stop_timer_t
| latency_t
| end_of_stream_t
end
|
lib/membrane/element/action.ex
| 0.933142 | 0.609146 |
action.ex
|
starcoder
|
defmodule MmoGame.Grid do
@moduledoc """
Grid related functions.
"""
@type t :: %__MODULE__{
rows: pos_integer(),
columns: pos_integer(),
walls: %{optional(coordinate()) => boolean()}
}
@type row :: non_neg_integer()
@type col :: non_neg_integer()
@type coordinate :: {row, col}
@type move_direction :: :up | :down | :left | :right
@move_directions [:up, :down, :left, :right]
@enforce_keys [:rows, :columns, :walls]
defstruct @enforce_keys
@spec new(%{
rows: pos_integer(),
columns: pos_integer(),
walls: list(coordinate())
}) ::
{:error, :invalid_grid_parameters | :invalid_wall_coordinate} | {:ok, t()}
def new(%{rows: rows, columns: columns, walls: walls})
when is_integer(rows) and is_integer(columns) and is_list(walls) and
rows > 0 and columns > 0 do
struct(__MODULE__, %{rows: rows, columns: columns, walls: %{}})
|> place_walls(walls)
end
def new(_), do: {:error, :invalid_grid_parameters}
defp place_walls(%__MODULE__{} = grid, []), do: {:ok, grid}
defp place_walls(%__MODULE__{} = grid, new_walls) do
grid =
Enum.reduce_while(new_walls, grid, fn wall, acc ->
place_wall(acc, wall)
end)
case grid do
%__MODULE__{} = grid ->
{:ok, grid}
{:error, :invalid_wall_coordinate} ->
{:error, :invalid_wall_coordinate}
end
end
defp place_wall(%__MODULE__{rows: rows, columns: columns, walls: walls} = grid, {row, column})
when is_integer(row) and is_integer(column) and
row < rows and column < columns and row >= 0 and
column >= 0 do
updated_walls = Map.put(walls, {row, column}, true)
{:cont, Map.put(grid, :walls, updated_walls)}
end
defp place_wall(
_grid,
_coordinate
),
do: {:halt, {:error, :invalid_wall_coordinate}}
@spec draw(t(), %{optional(MmoGame.Grid.coordinate()) => [MmoGame.Hero.hero_name()]}) ::
{:ok,
[
[
%{
required(:wall) => boolean(),
optional(coordinate()) => [{MmoGame.Hero.hero_name(), :hero_dead | :hero_alive}]
}
]
]}
| {:error, :invalid_grid}
def draw(%__MODULE__{rows: rows, columns: columns} = grid, heroes_coordinates)
when is_map(heroes_coordinates) do
grid =
Enum.map(0..(rows - 1), fn row ->
Enum.map(0..(columns - 1), fn col ->
wall_map_without_coordinates!(grid, {row, col})
|> Map.merge(map_of_heroes_in_coordinate({row, col}, heroes_coordinates))
end)
end)
{:ok, grid}
end
def draw(_, _), do: {:error, :invalid_grid}
defp map_of_heroes_in_coordinate(coordinate, heroes_coordinates) do
case Map.get(heroes_coordinates, coordinate, nil) do
nil -> %{}
list -> %{heroes: list}
end
end
# Used for random position on the board
defp draw_with_coordinates!(%__MODULE__{rows: rows, columns: columns} = grid) do
Enum.map(0..(rows - 1), fn row ->
Enum.map(0..(columns - 1), fn col ->
wall_map_with_coordinates!(grid, {row, col})
end)
end)
end
defp wall_map_without_coordinates!(%__MODULE__{} = grid, {row, col}) do
case wall?(grid, {row, col}) do
{:ok, true} -> %{wall: true}
{:ok, false} -> %{wall: false}
end
end
defp wall_map_with_coordinates!(%__MODULE__{} = grid, {row, col}) do
case wall?(grid, {row, col}) do
{:ok, true} -> %{row: row, col: col, wall: true}
{:ok, false} -> %{row: row, col: col, wall: false}
end
end
@spec default_grid :: {:ok, t()}
def default_grid() do
rows = 10
colums = 10
walls =
Enum.map(0..(rows - 1), fn row ->
Enum.map(0..(colums - 1), fn column ->
# returns something like
# %{row: row, column: column, wall: true}
default_wall_maps_case!(row, column)
end)
end)
|> List.flatten()
|> Enum.filter(& &1.wall)
|> Enum.map(&{&1.row, &1.column})
new(%{rows: rows, columns: colums, walls: walls})
end
defp default_wall_maps_case!(row, column) do
case {row, column} do
{row, _} when row in [0, 9] ->
%{row: row, column: column, wall: true}
{_, column} when column in [0, 9] ->
%{row: row, column: column, wall: true}
{4, column} when column in [1, 3, 4, 5, 6, 9] ->
%{row: row, column: column, wall: true}
{row, 4} when row in [4, 5, 6, 7, 9] ->
%{row: row, column: column, wall: true}
_ ->
%{row: row, column: column, wall: false}
end
end
@spec random_non_wall_position(t()) ::
{:ok, coordinate()} | {:error, :invalid_grid}
def random_non_wall_position(%__MODULE__{} = grid) do
map_element =
grid
|> draw_with_coordinates!()
|> List.flatten()
|> Enum.filter(&(!&1.wall))
|> Enum.random()
{:ok, {map_element.row, map_element.col}}
end
def random_non_wall_position(_), do: {:error, :invalid_grid}
defp wall?(
%__MODULE__{rows: rows, columns: columns, walls: walls},
{row, column}
)
when is_integer(row) and is_integer(column) and row < rows and column < columns and
row >= 0 and
column >= 0,
do: {:ok, walls[{row, column}] == true}
@spec can_move?(t(), coordinate(), move_direction()) ::
{:error, :invalid_move} | {:ok, coordinate()}
# I'm considering a hero will never reach grid border (grid always have walls on its border)
def can_move?(%__MODULE__{} = grid, {_row, _column} = coordinate, direction)
when direction in @move_directions do
new_coordinate = adjacent_coordinate(coordinate, direction)
check_move_and_return(grid, new_coordinate)
end
def can_move?(_, _, _), do: {:error, :invalid_move_parameters}
defp check_move_and_return(%__MODULE__{} = grid, {new_row, new_column}) do
case wall?(grid, {new_row, new_column}) do
{:ok, true} -> {:error, :invalid_move}
{:ok, false} -> {:ok, {new_row, new_column}}
end
end
defp adjacent_coordinate({row, column}, :up), do: {row - 1, column}
defp adjacent_coordinate({row, column}, :right), do: {row, column + 1}
defp adjacent_coordinate({row, column}, :down), do: {row + 1, column}
defp adjacent_coordinate({row, column}, :left), do: {row, column - 1}
defp adjacent_coordinate(coordinate, :up_right),
do: adjacent_coordinate(coordinate, :up) |> adjacent_coordinate(:right)
defp adjacent_coordinate(coordinate, :down_right),
do: adjacent_coordinate(coordinate, :down) |> adjacent_coordinate(:right)
defp adjacent_coordinate(coordinate, :down_left),
do: adjacent_coordinate(coordinate, :down) |> adjacent_coordinate(:left)
defp adjacent_coordinate(coordinate, :up_left),
do: adjacent_coordinate(coordinate, :up) |> adjacent_coordinate(:left)
@spec calculate_perimeter!(t(), coordinate()) :: [coordinate()] | no_return()
def calculate_perimeter!(%__MODULE__{} = grid, coordinate) do
# run clockwise starting on top
perimeter = [:up, :up_right, :right, :down_right, :down, :down_left, :left, :up_left]
Enum.map(perimeter, fn direction ->
coordinate = adjacent_coordinate(coordinate, direction)
check_move_and_return(grid, coordinate) |> elem(1)
end)
|> Enum.filter(&(&1 != :invalid_move))
## Add actual coordinate as well
|> List.insert_at(-1, coordinate)
end
end
|
lib/mmo_game/grid.ex
| 0.920799 | 0.651424 |
grid.ex
|
starcoder
|
defmodule Artemis.Helpers.DateTime do
@doc """
Sort an enumerable by Date
Calling `sort` on Date structs will return unexpected results, since:
> In Elixir structs are compared by their contents, in alphabetical order of the fields.
> In this case the most significant would be the `day`, then `month` and `year`
From: https://stackoverflow.com/questions/41655852/sorting-list-of-dates-in-elixir/41655967
"""
def sort_by_date(dates) do
Enum.sort_by(dates, & &1, &date_sorter/2)
end
defp date_sorter(date_1, date_2) do
case Date.compare(date_1, date_2) do
r when r == :lt or r == :eq -> true
_ -> false
end
end
@doc """
Sort an enumerable by DateTime
Calling `sort` on DateTime structs will return unexpected results, since:
> In Elixir structs are compared by their contents, in alphabetical order of the fields.
> In this case the most significant would be the `day`, then `month` and `year`
From: https://stackoverflow.com/questions/41655852/sorting-list-of-dates-in-elixir/41655967
"""
def sort_by_date_time(date_times) do
Enum.sort_by(date_times, & &1, &date_time_sorter/2)
end
defp date_time_sorter(date_time_1, date_time_2) do
type = date_time_1.__struct__
case type.compare(date_time_1, date_time_2) do
r when r == :lt or r == :eq -> true
_ -> false
end
end
@doc """
Adds microseconds to an existing DateTime:
#DateTime<2020-01-03 19:25:05Z>
#DateTime<2020-01-03 19:25:05.000000Z>
Also works on NaiveDateTimes:
~N[2020-01-03 19:30:00]
~N[2020-01-03 19:30:00.000000]
"""
def add_microseconds(date_time, value \\ 0, precision \\ 6) do
%{date_time | microsecond: {value, precision}}
end
@doc """
Returns a Timex.Interval instance. Can be passed into `Enum.map` to iterate
from the start date to end date using a specified interval.
For more options see: https://hexdocs.pm/timex/Timex.Interval.html
"""
def get_iterable_interval(oldest, newest, options \\ [])
def get_iterable_interval(nil, _newest, _options), do: []
def get_iterable_interval(_oldest, nil, _options), do: []
def get_iterable_interval(oldest, newest, options) when newest == oldest do
updated_newest = Timex.shift(newest, microseconds: 1)
get_iterable_interval(oldest, updated_newest, options)
end
def get_iterable_interval(oldest, newest, options) do
default_options = [
from: oldest,
# Include starting value when iterating
left_open: false,
# Include end value when iterating
right_open: false,
# Set the unit for each iteration
step: [
weeks: 1
],
until: newest
]
default_options
|> Keyword.merge(options)
|> Timex.Interval.new()
end
end
|
apps/artemis/lib/artemis/helpers/date_time.ex
| 0.87142 | 0.502136 |
date_time.ex
|
starcoder
|
defmodule AdventOfCode2019.OxygenSystem do
@moduledoc """
Day 15 — https://adventofcode.com/2019/day/15
"""
require AdventOfCode2019.IntcodeComputer
@spec part1(Enumerable.t()) :: integer
def part1(in_stream) do
in_stream
|> load_program()
|> locate()
|> List.first()
end
@spec part2(Enumerable.t()) :: integer
def part2(in_stream) do
in_stream
|> load_program()
|> locate()
|> List.last()
end
@spec load_program(Enumerable.t()) :: map
defp load_program(in_stream) do
in_stream
|> Stream.map(&AdventOfCode2019.IntcodeComputer.load_program/1)
|> Enum.take(1)
|> List.first()
end
@spec locate(map) :: list
defp locate(program) do
locate({:noop, {program, 0, 0}, nil}, {0, 0}, [], nil, 1, :fore, %{{0, 0} => []}, 0)
end
@type position :: {integer, integer}
@spec locate(
{atom, {map, integer, integer}, integer | nil},
position,
list,
list | nil,
integer,
atom,
map,
integer
) :: list
defp locate({:output, state, 0}, {x, y}, path, oxy, move, _dir, area, len) do
{dx, dy} = fore(move)
area = Map.put(area, {x + dx, y + dy}, [])
{move, dir} = turn({x, y}, path, move, area)
AdventOfCode2019.IntcodeComputer.step(state, [move])
|> locate({x, y}, path, oxy, move, dir, area, len)
end
defp locate({:output, state, _loc}, {x, y}, [head | path], oxy, move, :back, area, len) do
{dx, dy} = fore(move)
pos = {x + dx, y + dy}
{move, dir} = turn(pos, path, head, area)
AdventOfCode2019.IntcodeComputer.step(state, [move])
|> locate(pos, path, oxy, move, dir, area, len)
end
defp locate({:output, _state, 2} = data, pos, path, nil, move, :fore, area, _len) do
locate(data, pos, path, [move | path], move, :fore, area, length(path) + 1)
end
defp locate({:output, state, _loc}, {x, y}, path, oxy, move, :fore, area, len) do
{dx, dy} = fore(move)
pos = {x + dx, y + dy}
AdventOfCode2019.IntcodeComputer.step(state, [move])
|> locate(pos, [move | path], oxy, move, :fore, Map.put(area, pos, path), len)
end
defp locate({:done, _state, nil}, _pos, _path, oxy, _move, :stop, area, len) do
min =
Map.values(area)
|> Enum.max_by(fn path -> length(path) end)
|> Enum.reverse()
|> deoverlap(Enum.reverse(oxy))
[len, min]
end
defp locate({_result, state, _loc}, pos, path, oxy, move, dir, area, len) do
AdventOfCode2019.IntcodeComputer.step(state, [move])
|> locate(pos, path, oxy, move, dir, area, len)
end
@spec fore(1 | 2 | 3 | 4) :: {-1 | 0 | 1, -1 | 0 | 1}
defp fore(1), do: {0, 1}
defp fore(2), do: {0, -1}
defp fore(3), do: {-1, 0}
defp fore(4), do: {1, 0}
@spec fore(1 | 2 | 3 | 4, position) :: position
defp fore(move, {x, y}) do
fore(move)
|> (fn {dx, dy} -> {x + dx, y + dy} end).()
end
@spec turn(position, list, integer, map) :: {integer, atom}
defp turn(pos, path, move, area) do
turn(move)
|> fore(pos)
|> turn(pos, path, turn(move), area, 1)
end
@spec turn(1 | 2 | 3 | 4) :: 1 | 2 | 3 | 4
defp turn(1), do: 3
defp turn(2), do: 4
defp turn(3), do: 2
defp turn(4), do: 1
@spec turn(position, position, list, integer, map, integer) :: {integer, atom}
defp turn(_new_pos, _pos, [], _move, _area, turns) when turns > 4, do: {0, :stop}
defp turn(_new_pos, _pos, [move | _path], _move, _area, 4), do: {back(move), :back}
defp turn(new_pos, pos, path, move, area, turns) when is_map_key(area, new_pos) do
turn(move)
|> fore(pos)
|> turn(pos, path, turn(move), area, turns + 1)
end
defp turn(_new_pos, _pos, _path, move, _area, _turns), do: {move, :fore}
@spec back(1 | 2 | 3 | 4) :: 1 | 2 | 3 | 4
defp back(1), do: 2
defp back(2), do: 1
defp back(3), do: 4
defp back(4), do: 3
@spec deoverlap(list, list) :: integer
defp deoverlap([same | longest], [same | oxy]), do: deoverlap(longest, oxy)
defp deoverlap(longest, oxy), do: length(longest) + length(oxy) + 1
end
|
lib/advent_of_code_2019/day15.ex
| 0.797517 | 0.579371 |
day15.ex
|
starcoder
|
defmodule SnapFramework.Component do
alias Scenic.Graph
alias Scenic.Primitive
require SnapFramework.Macros
require Logger
@moduledoc """
## Overview
SnapFramework.Component is nearly identical to a Scene. The main different is the addition of the defcomponent macro,
as well as the addition of the scenic opts key.
defcomponent build out your scenic validate function and helper functions, automatically so you don't have to.
``` elixir
defmodule Example.Component.MyComponent do
use SnapFramework.Component,
template: "lib/scenes/my_component.eex",
controller: :none,
assigns: []
defcomponent :my_component, :tuple
end
```
The above example defines a component that takes in a tuple for data. and build your helper function defined as ```my_component/3```
## Templates
Component templates also have an additional feature that primitives or scene templates do not have. You can inject children into them.
Lets write a basic icon button component that takes an icon child.
``` elixir
# template
<%= graph font_size: 20 %>
<%= primitive Scenic.Primitive.Circle,
15,
id: :bg,
translate: {23, 23}
%>
@children
# component module
defmodule Example.Component.IconButton do
use SnapFramework.Component,
name: :icon_button,
template: "lib/icons/icon_button/icon_button.eex",
controller: :none,
assigns: [],
opts: []
defcomponent :icon_button, :any
end
```
Now lets see how to use this component with children in a scene. This assumes we've already made an icon component.
``` elixir
<%= graph font_size: 20 %>
<%= component Example.Component.IconButton,
nil,
id: :icon_button
do %>
<%= component Example.Component.Icon,
@icon,
id: :icon
%>
<% end %>
```
That's all there is to putting children in components!
"""
@opts_schema [
name: [required: false, type: :atom],
template: [required: true, type: :string],
controller: [required: true, type: :any],
assigns: [required: true, type: :any],
opts: [required: false, type: :any]
]
defmacro __using__(opts) do
case NimbleOptions.validate(opts, @opts_schema) do
{:ok, opts} ->
quote do
use SnapFramework.Scene,
template: unquote(opts[:template]),
controller: unquote(opts[:controller]),
assigns: unquote(opts[:assigns]),
opts: unquote(opts[:opts]),
type: :component
import SnapFramework.Component
alias Scenic.Primitives
require SnapFramework.Macros
require EEx
require Logger
Module.register_attribute(__MODULE__, :assigns, persist: true)
Module.register_attribute(__MODULE__, :preload, persist: true)
end
{:error, error} ->
raise Exception.message(error)
end
end
defmacro defcomponent(name, data_type) do
quote do
case unquote(data_type) do
:string ->
def validate(data) when is_bitstring(data), do: {:ok, data}
:number ->
def validate(data) when is_number(data), do: {:ok, data}
:list ->
def validate(data) when is_list(data), do: {:ok, data}
:map ->
def validate(data) when is_map(data), do: {:ok, data}
:atom ->
def validate(data) when is_atom(data), do: {:ok, data}
:tuple ->
def validate(data) when is_tuple(data), do: {:ok, data}
:any ->
def validate(data), do: {:ok, data}
_ ->
def validate(data), do: {:ok, data}
end
if unquote(data_type) != :any do
def validate(data) do
{
:error,
"""
#{IO.ANSI.red()}Invalid #{__MODULE__} specification
Received: #{inspect(data)}
#{IO.ANSI.yellow()}
The data for a #{__MODULE__} is just the #{inspect(unquote(data_type))} string to be displayed in the button.#{IO.ANSI.default_color()}
"""
}
end
end
def unquote(name)(graph, data, options \\ [])
def unquote(name)(%Graph{} = g, data, options) do
add_to_graph(g, data, options)
end
def unquote(name)(
%Scenic.Primitive{module: Scenic.Primitive.Component, data: {mod, _, id}} = p,
data,
options
) do
data =
case mod.validate(data) do
{:ok, data} -> data
{:error, msg} -> raise msg
end
Primitive.put(p, {__MODULE__, data, id}, options)
end
def unquote(name)(%Scenic.Primitive{module: mod} = p, data, opts) do
data =
case mod.validate(data) do
{:ok, data} -> data
{:error, error} -> raise Exception.message(error)
end
Primitive.put(p, data, opts)
end
end
end
end
|
lib/component.ex
| 0.88323 | 0.836888 |
component.ex
|
starcoder
|
defmodule TwoFactorInACan.Totp do
@moduledoc """
Provides functions for working with time based one time password (TOTP) style
two factor authentication (2FA) as defined in RFC 4226.
## Summary
For details on RFC 4226, see https://tools.ietf.org/rfc/rfc4226.txt.
TOTP two factor authentication uses the HMAC-based one time password (HOTP)
algorithm with the current time passed in as the count to verify that the end
user and the consuming application have the same secret.
It does this by generating a token from the current time and secret and
comparing the generated secret to the one the end user has supplied. If they
match, then it is extremely unlikely that the end user does not have a
different secret.
## Security Concerns
When implementing 2FA using TOTP it is very important to also lock out any
users which do not send a matching token some small number of consecutive
times. By default tokens are 6 characters long which means that there is a
1/1,000,000 chance of guessing correctly. This is relatively trivial to brute
force without some lock out mechanism.
Care should also be token when adjusting the token length. Longer tokens are
more secure, and shorter tokens are less secure (as they are easier to
guess).
If there is drift between clocks of users and the machine verifying tokens
then the `acceptable_past_tokens` and `acceptable_future_tokens` options can
be used to specify a window of acceptable tokens. This can also be useful to
allow users to still submit tokens which were just seconds ago valid as a
user experience enhancement.
It should be noted that increasing the window of acceptable tokens also makes
it easier to randomly guess a token. Care should be taken to way the pros and
cons of this. As an example, by allowing one past token and one future token
to be valid, there are now (usually) 3 tokens that an attacker could guess.
This increases the chance of successfully guessing tokens to 1/333,333.
## Example Usage
### Secret Generation
When a user first sets up TOTP 2FA, the server must generate a secret that
will be shared between the server and the user. The `TwoFactorInACan.Secrets`
module can be used for this.
```elixir
secret = TwoFactorInACan.Secrets.generate_totp_secret()
<<109, 159, 180, 42, 128, 80, 183, 56, 163, 232, 151, 242, 233, 37, 167, 178,
253, 23, 18, 159>>
```
Every user should have a different secret. No secrets should be shared.
### Transmitting the Secret to the User
This secret must be securely transmitted to the user. It is extremely
important that the method of transfer is secure. If an attacker can intercept
the secret in transit, then they will be able to entirely bypass this form of
2FA. The attacker then only needs to acquire the user's password and this 2FA
method is rendered useless.
Using HTTPS is a must for transferring this secret. Do not transfer anything
over HTTP!
A common method of transferring the secret to an end user is by generating a
QR code containing the secret. Smart phone applications such as Google
Authenticator, Duo, and Authy can then scan these codes, extract the secret,
and securely store the secret.
For these applications to understand that a secret is being transmitted, the
following url should be encoded to a QR Code:
```elixir
"otpauth://totp/MyDescription?secret=MySecret&issuer=MyAppName"
|> MyQRModule.encode # Note: Not a real module!
```
In the above URL, MySecret should be the secret encoded in base32, MyAppName
and MyDescription can be anything, and assist the user in figuring out which
token to use on your site if they have many.
### Verify User Received Secret
At this point the end user should be asked to supply the current token as
generated by their authenticator application.
Upon receiving the token from the user, it should be verified against the
secret generated earlier.
```elixir
TwoFactorInACan.Totp.same_secret?(generated_secret, user_supplied_token)
true
```
If the above function returns `true` then the the user has properly setup
2FA.
If it returns `false`, then the incorrect token was sent. The user will need
to send another correct token before their account should be configured to
require TOTP 2FA at login.
### Store the secret
The server will need to store the generated secret in some manner associated
with the user so that it can be used to verify the user's identity at login.
Care should be taken to ensure this is stored securely. If storing in a
database, the database should be encrypted. Even if the database is
encrypted, the field should be encrypted. You can even go the extra mile and
use a different encryption key per user stored with the key stored outside of
the database.
If the secret is leaked or compromised by an attacker, then the attacker will
be able to bypass this method of 2FA. In this event, the user should be
prompted to re-setup TOTP 2FA. Ideally the user's account should be locked
down until they can prove their identity through other means.
### Verify Token at Login
When the user logs in, they should also supply the current TOTP token. The
server should verify that the supplied TOTP token is generated using the same
secret that is stored associated with that user.
```elixir
TwoFactorInACan.Totp.same_secret?(stored_user_secret, user_supplied_token)
true
```
If this returns true, then the user has sufficiently proven their identity
and authentication should succeed (assuming they also supplied the correct
password!)
If this returns false, then login should fail.
It can be a good practice to provide the user with a message that does not
reveal whether the username didn't exist, the password was wrong, or 2FA
failed. This makes an attackers job more difficult. There is some debate on
whether this is an effective security measure.
"""
alias TwoFactorInACan.Hotp
@doc """
Outputs the current TOTP token for the given secret.
Expects a 160-bit binary key by default. To use secrets that are encoded the
`:secret_format` option can be supplied. It expects one of the following
values:
- `:binary` (default)
- `:base32`
- `:base64`
The following options are supported:
- `:secret_format` - the format that the secret is passed in as. Options
include:
- `:binary` (default)
- `:base32`
- `:base64`
- `:token_length` (Default: 6) - the length of the generated token. A longer
token is harder to guess and thus more secure. A longer token can also be
more difficult for users to accurately transmit. Although everything in
`TwoFactorInACan` supports variable token length, you should be sure that
other apps and programs used support the token length set here.
- `:offset_seconds` (Default: 0) - The number of seconds to offset the
current timestamp by. If the current timestamp was 600 and the offset was
60, then the timestamp of 660 would be used to calculate the time interval.
This can be useful to account for drift or to purposefully allow the last
token to be valid as well in functions that use this function.
- `:interval_seconds` (Default: 30) - The number of seconds that must pass
before a new time_interval (and thus TOTP token) is returned by this
function. This should probably never be anything but the default (30
seconds) during actual use, as nearly all apps that generate TOTP tokens
assume a 30 second interval.
- `:injected_timestamp` (default: current time) - The unix timestamp to use
when calculating the time interval. This should only be used during testing
to ensure the same token or interval is always returned. When this option
is not supplied, the current timestamp is used.
# Examples
```elixir
iex> secret = TwoFactorInACan.Secrets.generate_totp_secret()
iex> TwoFactorInACan.Totp.current_token_value(secret)
"858632"
iex> secret = TwoFactorInACan.Secrets.generate_totp_secret(format: :base32)
iex> TwoFactorInACan.Totp.current_token_value(secret, secret_format: :base32)
"743622"
iex> secret = TwoFactorInACan.Secrets.generate_totp_secret(format: :base64)
iex> TwoFactorInACan.Totp.current_token_value(secret, secret_format: :base64)
"384012"
```
"""
def current_token_value(secret, opts \\ []) do
time_interval = time_interval(opts)
Hotp.generate_token(secret, time_interval, opts)
end
@doc """
Calculates the current time interval as an integer used in the HOTP algorithm
as the count to calculate the current token value.
The following options are supported:
- `:offset_seconds` (Default: 0) - The number of seconds to offset the
current timestamp by. If the current timestamp was 600 and the offset was
60, then the timestamp of 660 would be used to calculate the time interval.
This can be useful to account for drift or to purposefully allow the last
token to be valid as well in functions that use this function.
- `:interval_seconds` (Default: 30) - The number of seconds that must pass
before a new time_interval (and thus TOTP token) is returned by this
function. This should probably never be anything but the default (30
seconds) during actual use, as nearly all apps that generate TOTP tokens
assume a 30 second interval.
- `:injected_timestamp` (default: current time) - The unix timestamp to use
when calculating the time interval. This should only be used during testing
to ensure the same token or interval is always returned. When this option
is not supplied, the current timestamp is used.
## Examples
```elixir
iex> TwoFactorInACan.Totp.time_interval()
51802243
iex> TwoFactorInACan.Totp.time_interval(offset_seconds: 30)
51802244
iex> TwoFactorInACan.Totp.time_interval(interval_seconds: 60)
25901122
iex> TwoFactorInACan.Totp.time_interval(injected_timestamp: 1554067403)
51802246
iex> TwoFactorInACan.Totp.time_interval(injected_timestamp: 60, interval_seconds: 10)
6
```
"""
@spec time_interval(key: :atom) :: integer()
def time_interval(opts \\ []) do
offset = Keyword.get(opts, :offset_seconds, 0)
interval_seconds = Keyword.get(opts, :interval_seconds, 30)
seconds_since_epoch = Keyword.get(opts, :injected_timestamp, now()) + offset
intervals_since_epoch = seconds_since_epoch / interval_seconds
trunc(intervals_since_epoch)
end
@doc """
Verifies that the provided TOTP token was generated using the provided
secret.
This function uses the secret to generate a token. It then compares the
generated token to the supplied token. If they match, then it can be
probabilistically inferred that the entity that supplied the token also knew
the secret.
This function allows a number of options:
- `:acceptable_past_tokens` (Default: `0`) - The number of past tokens which
should result in this function returning true. Setting this to `1` can be a
friendly way to allow users to still verify if they have taken too long to
submit their token. It should be noted that this does result in two tokens
being valid instead of one, which makes a valid token easeier to guess.
This value should not be set too high or security is greatly compromised.
- `:acceptable_future_tokens` (Default: `0`) - The number of future tokens
which should result in this function returning true. It should be noted
that setting this to a nonzero value will allow more tokens to be valid and
thus make a valid token easier to guess. This value should not be set too
high or security is greatly compromised.
- `:token_length` (Default: 6) - the length of the generated token. A longer
token is harder to guess and thus more secure. A longer token can also be
more difficult for users to accurately transmit. Although everything in
`TwoFactorInACan` supports variable token length, you should be sure that
other apps and programs used support the token length set here.
- `:offset_seconds` (Default: `0`) - The number of seconds to offset the
current timestamp by. If the current timestamp was 600 and the offset was
60, then the timestamp of 660 would be used to calculate the time interval.
This can be useful to account for drift or difference in clock times
between two entities.
- `:interval_seconds` (Default: `30`) - The number of seconds that must pass
before a new time_interval (and thus TOTP token) is returned by this
function. This should probably never be anything but the default (30
seconds) during actual use, as nearly all apps that generate TOTP tokens
assume a 30 second interval.
- `:injected_timestamp` (default: current time) - The unix timestamp to use
when calculating the time interval. This should only be used during testing
to ensure the same token or interval is always returned. When this option
is not supplied, the current timestamp is used.
- `:secret_format` (default: `:binary`) - The format of the passed in secret.
Can be one of `:binary`, `:base32`, or `:base64`.
## Examples
```elixir
iex> secret = TwoFactorInACan.Secrets.generate_totp_secret()
iex> current_token = TwoFactorInACan.Totp.current_token_value(secret)
iex> TwoFactorInACan.Totp.same_secret?(secret, current_token)
true
```
"""
def same_secret?(secret, token, opts \\ []) do
acceptable_future_tokens = Keyword.get(opts, :acceptable_future_tokens, 0)
acceptable_past_tokens = Keyword.get(opts, :acceptable_past_tokens, 0)
interval_seconds = Keyword.get(opts, :interval_seconds, 30)
{offset_seconds, opts} = Keyword.pop(opts, :offset_seconds, 0)
time_intervals_to_check = -acceptable_past_tokens..acceptable_future_tokens
Enum.any?(time_intervals_to_check, fn offset ->
this_interval_offset_seconds = offset * interval_seconds + offset_seconds
opts_with_offset = Keyword.put(opts, :offset_seconds, this_interval_offset_seconds)
token == current_token_value(secret, opts_with_offset)
end)
end
defp now do
DateTime.utc_now() |> DateTime.to_unix(:second)
end
end
|
lib/totp/totp.ex
| 0.90171 | 0.932207 |
totp.ex
|
starcoder
|
defmodule ServerTimingPlug do
@moduledoc """
ServerTimingPlug is a Plug that can be used to generate an HTTP Server-Timing
header so that your browser can display timing metrics for a given request.
For more details on Server-Timing see the MDN documentation
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Server-Timing.
## Usage
To use ServerTimingPlug in your application, open up your `endpoint.ex` file and
add the following entry:
`plug ServerTimingPlug`
With that in place, you can call `ServerTimingPlug.capture_timing` from anywhere
within your project, and your timings will be available in your browser's developer
console when the response is received. An important thing to note here is that
you must call `ServerTimingPlug.capture_timing` from within the same Phoenix process
that is handling the request. The reason for this being that `ServerTimingPlug` uses the
Process Dictionary under the hood and it is only able to add timing entries if
`ServerTimingPlug.capture_timing` is called from within the same process. Look at the
function documentation for `ServerTimingPlug.capture_timing/2` and
`ServerTimingPlug.capture_timing/3` to see how to capture timings. Be sure that each
captured timing entry has a unique name as per
https://w3c.github.io/server-timing/#the-server-timing-header-field.
## Configuration
ServerTimingPlug can be configured in a number of ways. It can be statically configured via
the options passed to it in `endpoint.ex`, it can be configured via environment variables,
or it can be configured via the application configuration (Elixir's `Config` module).
To configure ServerTimingPlug via the plug entry, you can do the following:
`plug ServerTimingPlug, header_unit: :millisecond, enabled: true`
In this case, `ServerTimingPlug` is statically configured and those are the options that
will always be in effect. If you want to dynamically control the options (for example
perhaps you want to have this plug enabled in your Dev/QA/Staging environments but disabled
in your production environment but still ship the same build artifact), you can do the following:
`plug ServerTimingPlug, header_unit: :millisecond, enabled: {:system, "SERVER_TIMING_PLUG_ENABLED"}`
If instead you want to configure ServerTimingPlug via Elixir's `Config`, you can do the following:
`plug ServerTimingPlug, header_unit: :config, enabled: :config`
and in your `releases.exs` or `prod.exs` file add the following:
```elixir
config :server_timing_plug, header_unit: :millisecond, enabled: true
```
To summarize, below is a breakdown of all the options along with their possible values:
- `header_unit`: The time unit that the Server-Timing header gets converted to
- `:second`
- `:millisecond`
- `:microsecond`
- `:nanosecond`
- `:native`
- `:config`
- `{:system, "YOUR_ENV_VAR"}`
- `enabled`: Is the ServerTimingPlug enabled and capturing timings
- `true`
- `false`
- `:config`
- `{:system, "YOUR_ENV_VAR"}`
"""
@behaviour Plug
@typedoc """
Valid time units for when calling `capture_timing/2` and `capture_timing/3` with a tuple.
These are the valid time units that can be passed to `System.convert_time_unit/3` to convert
the time entry.
"""
@type timing_unit :: :second | :millisecond | :microsecond | :nanosecond | :native
require Logger
alias __MODULE__
alias Plug.Conn
alias ServerTimingPlug.{ConfigOpts, TimingEntry}
@impl true
def init(opts) do
header_unit = Keyword.get(opts, :header_unit, :millisecond)
enabled = Keyword.get(opts, :enabled, true)
ConfigOpts.new(header_unit, enabled)
end
@impl true
def call(conn, opts) do
updated_opts =
opts
|> resolve_header_unit()
|> resolve_enabled()
# Set initial state for request server timings
Process.put(ServerTimingPlug, {updated_opts, []})
# Register callback to be called before the conn responds to the request
Conn.register_before_send(conn, &attach_timings/1)
end
@doc """
Store a server timing entry in the Process Dictionary.
Below are the arguments that you can pass to `capture_timing/3`:
- `name`: The name of the timing event. Be sure to use only alphanumeric characters, underscores
and periods to ensure that the browser can report the timing correctly.
- `duration`: The time of the event that you want to track. If passing in just an integer, it is
assumed that is in `:native` time. To specify the unit of measurement for the provided duration,
use the `{duration, :unit}` form like `{580, :millisecond}` for example. :native is the assumed
duration type as you should be using `System.monotonic_time/0` to time your various functions or
code blocks.
- `description` (optional): A more in depth description of your event that you are timing.
"""
@spec capture_timing(String.t(), integer() | {integer(), timing_unit()}, String.t() | nil) :: :ok
def capture_timing(name, duration, description \\ nil)
def capture_timing(name, {duration, unit}, description) do
case Process.get(ServerTimingPlug) do
{%ConfigOpts{enabled: true} = opts, timings_list} ->
time_in_native = System.convert_time_unit(duration, unit, :native)
updated_timings_list = [TimingEntry.new(name, time_in_native, description) | timings_list]
Process.put(ServerTimingPlug, {opts, updated_timings_list})
:ok
_ ->
:ok
end
end
def capture_timing(name, duration, description) do
capture_timing(name, {duration, :native}, description)
end
@doc """
The callback that is invoked by `Plug.Conn.register_before_send/2`.
Given a `%Plug.Conn{}` struct, `attach_timings/1` formats the timing values
residing in the Process Dictionary, generates the `Server-Timing` header value,
attaches the header and then returns the updated `%Plug.Conn{}` struct.
"""
@spec attach_timings(Plug.Conn.t()) :: Plug.Conn.t()
def attach_timings(%Conn{} = conn) do
case Process.get(ServerTimingPlug) do
{%ConfigOpts{enabled: true} = opts, timings_list} ->
timing_header =
timings_list
|> Enum.reverse()
|> format_timing_header(opts)
Conn.put_resp_header(conn, "Server-Timing", timing_header)
_ ->
conn
end
end
defp format_timing_header(timings_list, %ConfigOpts{} = opts) do
timings_list
|> Enum.map(fn %TimingEntry{} = timing_entry ->
formatted_duration = format_duration(timing_entry.duration, opts.header_unit)
case timing_entry.description do
nil -> "#{timing_entry.name};dur=#{formatted_duration}"
description -> "#{timing_entry.name};desc=\"#{description}\";dur=#{formatted_duration}"
end
end)
|> Enum.join(", ")
end
defp format_duration(duration, header_unit) do
duration
|> Decimal.div(System.convert_time_unit(1, header_unit, :native))
|> Decimal.to_string(:normal)
end
defp resolve_header_unit(%ConfigOpts{header_unit: :config} = opts) do
header_unit = Application.get_env(:server_timing_plug, :header_unit)
%{opts | header_unit: header_unit}
end
defp resolve_header_unit(%ConfigOpts{header_unit: {:system, env_var}} = opts) do
header_unit =
case System.get_env(env_var) do
"second" -> :second
"millisecond" -> :millisecond
"microsecond" -> :microsecond
"nanosecond" -> :nanosecond
"native" -> :native
end
%{opts | header_unit: header_unit}
end
defp resolve_header_unit(opts) do
opts
end
defp resolve_enabled(%ConfigOpts{enabled: :config} = opts) do
enabled = Application.get_env(:server_timing_plug, :enabled)
%{opts | enabled: enabled}
end
defp resolve_enabled(%ConfigOpts{enabled: {:system, env_var}} = opts) do
enabled =
case System.get_env(env_var) do
"true" -> true
"false" -> false
end
%{opts | enabled: enabled}
end
defp resolve_enabled(opts) do
opts
end
end
|
lib/server_timing_plug.ex
| 0.918818 | 0.809088 |
server_timing_plug.ex
|
starcoder
|
defmodule Faker.Company.En do
import Faker, only: [sampler: 2]
alias Faker.Person.En, as: Person
@moduledoc """
Functions for company data in English
"""
@doc """
Returns a random complete English business related bullshit
## Examples
iex> Faker.Company.En.bs()
"syndicate e-business e-business"
iex> Faker.Company.En.bs()
"scale global metrics"
iex> Faker.Company.En.bs()
"optimize scalable markets"
iex> Faker.Company.En.bs()
"implement out-of-the-box content"
"""
@spec bs() :: String.t()
def bs, do: "#{bullshit_prefix()} #{bullshit()} #{bullshit_suffix()}"
@doc """
Returns a random English business related bullshit
## Examples
iex> Faker.Company.En.bullshit()
"web-enabled"
iex> Faker.Company.En.bullshit()
"e-business"
iex> Faker.Company.En.bullshit()
"web-enabled"
iex> Faker.Company.En.bullshit()
"next-generation"
"""
@spec bullshit() :: String.t()
sampler(:bullshit, [
"clicks-and-mortar",
"value-added",
"vertical",
"proactive",
"robust",
"revolutionary",
"scalable",
"leading-edge",
"innovative",
"intuitive",
"strategic",
"e-business",
"mission-critical",
"sticky",
"one-to-one",
"24/7",
"end-to-end",
"global",
"B2B",
"B2C",
"granular",
"frictionless",
"virtual",
"viral",
"dynamic",
"24/365",
"best-of-breed",
"killer",
"magnetic",
"bleeding-edge",
"web-enabled",
"interactive",
"dot-com",
"sexy",
"back-end",
"real-time",
"efficient",
"front-end",
"distributed",
"seamless",
"extensible",
"turn-key",
"world-class",
"open-source",
"cross-platform",
"cross-media",
"synergistic",
"bricks-and-clicks",
"out-of-the-box",
"enterprise",
"integrated",
"impactful",
"wireless",
"transparent",
"next-generation",
"cutting-edge",
"user-centric",
"visionary",
"customized",
"ubiquitous",
"plug-and-play",
"collaborative",
"compelling",
"holistic",
"rich"
])
@doc """
Returns a random English business related bullshit prefix
## Examples
iex> Faker.Company.En.bullshit_prefix()
"syndicate"
iex> Faker.Company.En.bullshit_prefix()
"visualize"
iex> Faker.Company.En.bullshit_prefix()
"incentivize"
iex> Faker.Company.En.bullshit_prefix()
"scale"
"""
@spec bullshit_prefix() :: String.t()
sampler(:bullshit_prefix, [
"implement",
"utilize",
"integrate",
"streamline",
"optimize",
"evolve",
"transform",
"embrace",
"enable",
"orchestrate",
"leverage",
"reinvent",
"aggregate",
"architect",
"enhance",
"incentivize",
"morph",
"empower",
"envisioneer",
"monetize",
"harness",
"facilitate",
"seize",
"disintermediate",
"synergize",
"strategize",
"deploy",
"brand",
"grow",
"target",
"syndicate",
"synthesize",
"deliver",
"mesh",
"incubate",
"engage",
"maximize",
"benchmark",
"expedite",
"reintermediate",
"whiteboard",
"visualize",
"repurpose",
"innovate",
"scale",
"unleash",
"drive",
"extend",
"engineer",
"revolutionize",
"generate",
"exploit",
"transition",
"e-enable",
"iterate",
"cultivate",
"matrix",
"productize",
"redefine",
"recontextualize"
])
@doc """
Returns a random English business related bullshit suffix
## Examples
iex> Faker.Company.En.bullshit_suffix()
"e-services"
iex> Faker.Company.En.bullshit_suffix()
"niches"
iex> Faker.Company.En.bullshit_suffix()
"e-business"
iex> Faker.Company.En.bullshit_suffix()
"systems"
"""
@spec bullshit_suffix() :: String.t()
sampler(:bullshit_suffix, [
"synergies",
"web-readiness",
"paradigms",
"markets",
"partnerships",
"infrastructures",
"platforms",
"initiatives",
"channels",
"eyeballs",
"communities",
"ROI",
"solutions",
"e-tailers",
"e-services",
"action-items",
"portals",
"niches",
"technologies",
"content",
"vortals",
"supply-chains",
"convergence",
"relationships",
"architectures",
"interfaces",
"e-markets",
"e-commerce",
"systems",
"bandwidth",
"infomediaries",
"models",
"mindshare",
"deliverables",
"users",
"schemas",
"networks",
"applications",
"metrics",
"e-business",
"functionalities",
"experiences",
"web services",
"methodologies"
])
@doc """
Returns a random English business related buzzword
## Examples
iex> Faker.Company.En.buzzword()
"upward-trending"
iex> Faker.Company.En.buzzword()
"full-range"
iex> Faker.Company.En.buzzword()
"uniform"
iex> Faker.Company.En.buzzword()
"tertiary"
"""
@spec buzzword() :: String.t()
sampler(:buzzword, [
"24 hour",
"24/7",
"3rd generation",
"4th generation",
"5th generation",
"6th generation",
"actuating",
"analyzing",
"assymetric",
"asynchronous",
"attitude-oriented",
"background",
"bandwidth-monitored",
"bi-directional",
"bifurcated",
"bottom-line",
"clear-thinking",
"client-driven",
"client-server",
"coherent",
"cohesive",
"composite",
"context-sensitive",
"contextually-based",
"content-based",
"dedicated",
"demand-driven",
"didactic",
"directional",
"discrete",
"disintermediate",
"dynamic",
"eco-centric",
"empowering",
"encompassing",
"even-keeled",
"executive",
"explicit",
"exuding",
"fault-tolerant",
"foreground",
"fresh-thinking",
"full-range",
"global",
"grid-enabled",
"heuristic",
"high-level",
"holistic",
"homogeneous",
"human-resource",
"hybrid",
"impactful",
"incremental",
"intangible",
"interactive",
"intermediate",
"leading edge",
"local",
"logistical",
"maximized",
"methodical",
"mission-critical",
"mobile",
"modular",
"motivating",
"multimedia",
"multi-state",
"multi-tasking",
"national",
"needs-based",
"neutral",
"next generation",
"non-volatile",
"object-oriented",
"optimal",
"optimizing",
"radical",
"real-time",
"reciprocal",
"regional",
"responsive",
"scalable",
"secondary",
"solution-oriented",
"stable",
"static",
"systematic",
"systemic",
"system-worthy",
"tangible",
"tertiary",
"transitional",
"uniform",
"upward-trending",
"user-facing",
"value-added",
"web-enabled",
"well-modulated",
"zero administration",
"zero defect",
"zero tolerance"
])
@doc """
Returns a random English business related buzzword prefix
## Examples
iex> Faker.Company.En.buzzword_prefix()
"Configurable"
iex> Faker.Company.En.buzzword_prefix()
"Advanced"
iex> Faker.Company.En.buzzword_prefix()
"Grass-roots"
iex> Faker.Company.En.buzzword_prefix()
"Automated"
"""
@spec buzzword_prefix() :: String.t()
sampler(:buzzword_prefix, [
"Adaptive",
"Advanced",
"Ameliorated",
"Assimilated",
"Automated",
"Balanced",
"Business-focused",
"Centralized",
"Cloned",
"Compatible",
"Configurable",
"Cross-group",
"Cross-platform",
"Customer-focused",
"Customizable",
"Decentralized",
"De-engineered",
"Devolved",
"Digitized",
"Distributed",
"Diverse",
"Down-sized",
"Enhanced",
"Enterprise-wide",
"Ergonomic",
"Exclusive",
"Expanded",
"Extended",
"Face to face",
"Focused",
"Front-line",
"Fully-configurable",
"Function-based",
"Fundamental",
"Future-proofed",
"Grass-roots",
"Horizontal",
"Implemented",
"Innovative",
"Integrated",
"Intuitive",
"Inverse",
"Managed",
"Mandatory",
"Monitored",
"Multi-channelled",
"Multi-lateral",
"Multi-layered",
"Multi-tiered",
"Networked",
"Object-based",
"Open-architected",
"Open-source",
"Operative",
"Optimized",
"Optional",
"Organic",
"Organized",
"Persevering",
"Persistent",
"Phased",
"Polarised",
"Pre-emptive",
"Proactive",
"Profit-focused",
"Profound",
"Programmable",
"Progressive",
"Public-key",
"Quality-focused",
"Reactive",
"Realigned",
"Re-contextualized",
"Re-engineered",
"Reduced",
"Reverse-engineered",
"Right-sized",
"Robust",
"Seamless",
"Secured",
"Self-enabling",
"Sharable",
"Stand-alone",
"Streamlined",
"Switchable",
"Synchronised",
"Synergistic",
"Synergized",
"Team-oriented",
"Total",
"Triple-buffered",
"Universal",
"Up-sized",
"Upgradable",
"User-centric",
"User-friendly",
"Versatile",
"Virtual",
"Visionary",
"Vision-oriented"
])
@doc """
Returns a random English business related buzzword suffix
## Examples
iex> Faker.Company.En.buzzword_suffix()
"encoding"
iex> Faker.Company.En.buzzword_suffix()
"standardization"
iex> Faker.Company.En.buzzword_suffix()
"Graphical User Interface"
iex> Faker.Company.En.buzzword_suffix()
"product"
"""
@spec buzzword_suffix() :: String.t()
sampler(:buzzword_suffix, [
"ability",
"access",
"adapter",
"algorithm",
"alliance",
"analyzer",
"application",
"approach",
"architecture",
"archive",
"artificial intelligence",
"array",
"attitude",
"benchmark",
"budgetary management",
"capability",
"capacity",
"challenge",
"circuit",
"collaboration",
"complexity",
"concept",
"conglomeration",
"contingency",
"core",
"customer loyalty",
"database",
"data-warehouse",
"definition",
"emulation",
"encoding",
"encryption",
"extranet",
"firmware",
"flexibility",
"focus group",
"forecast",
"frame",
"framework",
"function",
"functionalities",
"Graphic Interface",
"groupware",
"Graphical User Interface",
"hardware",
"help-desk",
"hierarchy",
"hub",
"implementation",
"info-mediaries",
"infrastructure",
"initiative",
"installation",
"instruction set",
"interface",
"internet solution",
"intranet",
"knowledge user",
"knowledge base",
"local area network",
"leverage",
"matrices",
"matrix",
"methodology",
"middleware",
"migration",
"model",
"moderator",
"monitoring",
"moratorium",
"neural-net",
"open architecture",
"open system",
"orchestration",
"paradigm",
"parallelism",
"policy",
"portal",
"pricing structure",
"process improvement",
"product",
"productivity",
"project",
"projection",
"protocol",
"secured line",
"service-desk",
"software",
"solution",
"standardization",
"strategy",
"structure",
"success",
"superstructure",
"support",
"synergy",
"system engine",
"task-force",
"throughput",
"time-frame",
"toolset",
"utilisation",
"website",
"workforce"
])
@doc """
Returns a random complete English catch phrase
## Examples
iex> Faker.Company.En.catch_phrase()
"Configurable full-range Graphical User Interface"
iex> Faker.Company.En.catch_phrase()
"Automated mission-critical pricing structure"
iex> Faker.Company.En.catch_phrase()
"Profit-focused bottom-line algorithm"
iex> Faker.Company.En.catch_phrase()
"Self-enabling systematic initiative"
"""
@spec catch_phrase() :: String.t()
def catch_phrase, do: "#{buzzword_prefix()} #{buzzword()} #{buzzword_suffix()}"
@doc """
Returns complete English company name
## Examples
iex> Faker.Company.En.name()
"Hayes Inc"
iex> Faker.Company.En.name()
"<NAME> and Hane"
iex> Faker.Company.En.name()
"Schiller, Rogahn and Hartmann"
iex> Faker.Company.En.name()
"Murphy-Metz"
"""
@spec name() :: String.t()
def name, do: name(Faker.random_between(0, 2))
defp name(0), do: "#{Person.last_name()} #{suffix()}"
defp name(1), do: "#{Person.last_name()}-#{Person.last_name()}"
defp name(2) do
"#{Person.last_name()}, #{Person.last_name()} and #{Person.last_name()}"
end
@doc """
Returns a random type of business entity
## Examples
iex> Faker.Company.En.suffix()
"Inc"
iex> Faker.Company.En.suffix()
"and Sons"
iex> Faker.Company.En.suffix()
"Inc"
iex> Faker.Company.En.suffix()
"Ltd"
"""
@spec suffix() :: String.t()
sampler(:suffix, [
"Inc",
"and Sons",
"LLC",
"Group",
"Ltd"
])
end
|
lib/faker/company/en.ex
| 0.643889 | 0.436562 |
en.ex
|
starcoder
|
defmodule Mnemonix.Application do
@moduledoc """
Automatically starts stores when your application starts.
Mnemonix can manage your stores for you. To do so, it looks in your config files for named stores:
config :mnemonix, stores: [:foo, :bar]
For all stores so listed, it will check for store-specific configuration:
config :mnemonix, :foo, {Memonix.ETS.Store, table: :my_ets_table, name: :my_ets}
If no configuration is found for a named store, it will use the default configuration specified
in `default/0`.
The name of the store in your config will be the reference you pass to `Mnemonix` to interact with it.
This can be overriden by providing a `:name` in the options.
Given the config above, `:foo` would refer to a default Map-backed store,
and `:bar` to an ETS-backed store named `:my_ets` that uses a table named `:my_ets_table`,
both available to you at boot time without writing a line of code:
Application.ensure_started(:mnemonix)
Mnemonix.put(:foo, :a, :b)
Mnemonix.get(:foo, :a)
#=> :b
Mnemonix.put(:my_ets, :a, :b)
Mnemonix.get(:my_ets, :a)
#=> :b
"""
use Application
@doc """
Starts the `:mnemonix` application.
Finds stores in your application configuration and brings them up when your app starts.
Reads from the `:mnemonix` application's `:stores` configuration
to detect store specifications to automatically supervise.
If a store named in the configuration has its own entry under the `:mnemonix` application configuration,
that specification will be used to configure the store. If no specification is provided, Mnemonix will use
the `default` specification documented in `default/0`.
### Examples
config :mnemonix, stores: [Foo, Bar]
config :mnemonix, Bar: {Mnemonix.Stores.ETS, table: Baz}
"""
@impl Application
@spec start(Application.start_type(), [Mnemonix.spec()]) ::
{:ok, pid} | {:error, reason :: term}
def start(_type, [default]) do
default
|> tree
|> Supervisor.start_link(name: Mnemonix.Supervisor, strategy: :rest_for_one)
end
@spec tree() :: [:supervisor.child_spec()]
def tree, do: specification() |> tree
@spec tree(Mnemonix.spec()) :: [:supervisor.child_spec()]
def tree(default), do: [
# prepare_supervisor_spec(
# Mnemonix.Application.Supervisor,
# [
# prepare_supervisor_spec(
# Mnemonix.Store.Expiry.Supervisor,
# [Mnemonix.Store.Expiry.Engine],
# strategy: :simple_one_for_one,
# )
# ],
# strategy: :one_for_one,
# ),
prepare_supervisor_spec(
Mnemonix.Store.Supervisor,
managed_stores(default),
strategy: :one_for_one
),
]
defp prepare_supervisor_spec(module, children, opts) do
%{
id: module,
start: {Supervisor, :start_link, [children, Keyword.put(opts, :name, module)]},
restart: :permanent,
type: :supervisor,
}
end
@doc """
Convenience function to preview the stores that `Mnemonix.Application` will manage for you.
"""
@spec managed_stores :: [Supervisor.child_spec()]
def managed_stores, do: specification() |> managed_stores
@doc """
Convenience function to see the configuration of the stores that `Mnemonix.Application` manages for you.
Provide a store specification to compare the generated configuration against
the `default` `specification/0` that Mnemonix uses by default.
"""
@spec managed_stores(Mnemonix.spec()) :: [Supervisor.child_spec()]
def managed_stores(default) do
:mnemonix
|> Application.get_env(:stores, [])
|> Enum.map(fn name ->
:mnemonix
|> Application.get_env(name, default)
|> prepare_child_spec(name)
end)
end
defp prepare_child_spec({impl, opts}, name) do
{impl, Keyword.put_new(opts, :name, name)}
end
@doc """
Convenience function to access the default `Mnemonix` store specification defined in its `mix.exs`.
This is the specification used for stores named in `config :mnemonix, :stores`
without corresponding configuration under `config :mnemonix, <store_name>`.
"""
@spec specification :: Mnemonix.spec()
def specification do
:mnemonix
|> Application.spec()
|> Keyword.get(:mod)
|> elem(1)
|> List.first()
end
@doc """
Convenience function to access the current hex version of the `Mnemonix` application.
"""
def version do
with {:ok, version} = :application.get_key(:mnemonix, :vsn), do: version
end
end
|
lib/mnemonix/application.ex
| 0.839076 | 0.422922 |
application.ex
|
starcoder
|
defmodule OcppModel.V20.FieldTypes do
@moduledoc """
Field types or subclasses of the OCPP Model
"""
defmodule AdditionalInfoType do
@moduledoc false
use TypedStruct
typedstruct do
field :additionalIdToken, String.t(), enforce: true # 0..36 identifierString
field :type, String.t(), enforce: true # 0..50
end
end
defmodule ChargingStationType do
@moduledoc false
use TypedStruct
typedstruct do
field :serialNumber, String.t() # 0..25
field :model, String.t(), enforce: true # 0..20
field :vendorName, String.t(), enforce: true # 0..50
field :firmwareVersion, String.t() # 0..50
field :modem, ModemType.t()
end
end
defmodule EvseType do
@moduledoc false
use TypedStruct
typedstruct do
field :id, integer(), enforce: true
field :connector_id, integer()
end
end
defmodule IdTokenType do
@moduledoc false
use TypedStruct
typedstruct do
field :idToken, String.t(), enforce: true # 0..36
field :type, String.t(), enforce: true # IdTokenEnumType
field :additionalInfo, List[AdditionalInfoType.t()]
end
end
defmodule IdTokenInfoType do
@moduledoc false
use TypedStruct
typedstruct do
field :status, String.t() # AuthorizationStatusEnumType
end
end
defmodule MessageContentType do
@moduledoc false
use TypedStruct
typedstruct do
field :format, String.t(), enforce: true # MessageFormatEnumtype
field :language, String.t() # 0..8
field :content, String.t() # 0..512
end
end
defmodule MeterValueType do
@moduledoc false
use TypedStruct
typedstruct do
field :timestamp, String.t(), enforce: true #dateTime
field :sampledValue, SampledValueType.t(), enforce: true
end
end
defmodule ModemType do
@moduledoc false
use TypedStruct
typedstruct do
field :iccid, String.t() # 0..20
field :imsi, String.t() # 0..20
end
end
# defmodule OCSPRequestDataType do
# @moduledoc false
# use TypedStruct
# typedstruct do
# field :hashAlgorithm, String.t(), enforce: true # HashAlgorithmEnumType
# field :issuerNameHash, String.t(), enforce: true # 0..512
# field :issuerKeyHash, String.t(), enforce: true # 0..128
# field :serialNumber, String.t(), enforce: true # 0..40
# field :responderURL, String.t(), enforce: true # 0..512
# end
# end
defmodule SampledValueType do
@moduledoc false
use TypedStruct
typedstruct do
field :value, number(), enforce: true
field :context, String.t() # ReadingContextEnumType
field :measurand, String.t() # MeasurerandEnumType
field :phase, String.t() # PhaseEnumType
field :location, String.t() # LocationEnumType
field :signedMeterValue, SignedMeterValueType.t()
field :unitOfMeasure, UnitOfMeasureType.t()
end
end
defmodule SignedMeterValueType do
@moduledoc false
use TypedStruct
typedstruct do
field :signedMeterData, String.t(), enforce: true # 0..2500
field :signingMethod, String.t(), enforce: true # 0..50
field :encodingMethod, String.t(), enforce: true # 0..50
field :publicKey, String.t(), enforce: true # 0..2500
end
end
defmodule StatusInfoType do
@moduledoc false
use TypedStruct
typedstruct do
field :reasonCode, String.t(), enforce: true # 0..20
field :additionalInfo, String.t() # 0..512
end
end
defmodule TransactionType do
@moduledoc false
use TypedStruct
typedstruct do
field :transactionId, String.t(), enforce: true # 0..36
field :chargingState, String.t() # ChargingStateEnumType
field :timeSpentCharging, integer()
field :stoppedReason, String.t() # ReasonEnumType
field :remoteStartId, integer()
end
end
defmodule UnitOfMeasureType do
@moduledoc false
use TypedStruct
typedstruct do
field :unit, String.t() # 0..20
field :multiplier, integer()
end
end
end
|
lib/ocpp_model/v20/fieldtypes.ex
| 0.655667 | 0.427905 |
fieldtypes.ex
|
starcoder
|
defmodule Ecto.Adapters.Pool do
@moduledoc """
Behaviour for using a pool of connections.
"""
use Behaviour
@typedoc """
A pool process
"""
@type t :: atom | pid
@typedoc """
Opaque connection reference.
Use inside `run/4` and `transaction/4` to retrieve the connection module and
pid or break the transaction.
"""
@opaque ref :: {__MODULE__, module, t}
@typedoc """
The depth of nested transactions.
"""
@type depth :: non_neg_integer
@typedoc """
The time in microseconds spent waiting for a connection from the pool.
"""
@type queue_time :: non_neg_integer
@doc """
Start a pool of connections.
`module` is the connection module, which should define the
`Ecto.Adapters.Connection` callbacks, and `opts` are its (and the pool's)
options.
A pool should support the following options:
* `:name` - The name of the pool
* `:size` - The number of connections to keep in the pool
Returns `{:ok, pid}` on starting the pool.
Returns `{:error, reason}` if the pool could not be started. If the `reason`
is {:already_started, pid}}` a pool with the same name has already been
started.
"""
defcallback start_link(module, opts) ::
{:ok, pid} | {:error, any} when opts: Keyword.t
@doc """
Checkout a worker/connection from the pool.
The connection should not be closed if the calling process exits without
returning the connection.
Returns `{:ok, worker, conn, queue_time}` on success, where `worker` is the
worker term and conn is a 2-tuple contain the connection's module and
pid. The `conn` tuple can be retrieved inside a `transaction/4` with
`connection/1`.
Returns `{:error, :noproc}` if the pool is not alive and
`{:error, :noconnect}` if a connection is not available.
"""
defcallback checkout(t, timeout) ::
{:ok, worker, conn, queue_time} |
{:error, :noproc | :noconnect} when worker: any, conn: {module, pid}
@doc """
Checkin a worker/connection to the pool.
Called when the top level `run/4` finishes, if `break/2` was not called
inside the fun.
"""
defcallback checkin(t, worker, timeout) :: :ok when worker: any
@doc """
Break the current transaction or run.
Called when the function has failed and the connection should no longer be
available to to the calling process.
"""
defcallback break(t, worker, timeout) :: :ok when worker: any
@doc """
Open a transaction with a connection from the pool.
The connection should be closed if the calling process exits without
returning the connection.
Returns `{:ok, worker, conn, queue_time}` on success, where `worker` is the
worker term and conn is a 2-tuple contain the connection's module and
pid. The `conn` tuple can be retrieved inside a `transaction/4` with
`connection/2`.
Returns `{:error, :noproc}` if the pool is not alive and
`{:error, :noconnect}` if a connection is not available.
"""
defcallback open_transaction(t, timeout) ::
{:ok, worker, conn, queue_time} |
{:error, :noproc | :noconnect} when worker: any, conn: {module, pid}
@doc """
Close the transaction and signal to the worker the work with the connection
is complete.
Called once the transaction at `depth` `1` is finished, if the transaction
is not broken with `break/2`.
"""
defcallback close_transaction(t, worker, timeout) :: :ok when worker: any
@doc """
Runs a fun using a connection from a pool.
The connection will be taken from the pool unless we are inside
a `transaction/4` which, in this case, would already have a conn
attached to it.
Returns the value returned by the function wrapped in a tuple
as `{:ok, value}`.
Returns `{:error, :noproc}` if the pool is not alive or `{:error, :noconnect}`
if no connection is available.
## Examples
Pool.run(mod, pool, timeout,
fn(_conn, queue_time) -> queue_time end)
Pool.transaction(mod, pool, timeout,
fn(_ref, _conn, 1, _queue_time) ->
{:ok, :nested} =
Pool.run(mod, pool, timeout, fn(_conn, nil) ->
:nested
end)
end)
Pool.run(mod, :pool1, timeout,
fn(_conn1, _queue_time1) ->
{:ok, :different_pool} =
Pool.run(mod, :pool2, timeout,
fn(_conn2, _queue_time2) -> :different_pool end)
end)
"""
@spec run(module, t, timeout, ((conn, queue_time | nil) -> result)) ::
{:ok, result} | {:error, :noproc | :noconnect}
when result: var, conn: {module, pid}
def run(pool_mod, pool, timeout, fun) do
ref = {__MODULE__, pool_mod, pool}
case Process.get(ref) do
nil ->
do_run(pool_mod, pool, timeout, fun)
%{conn: conn} ->
{:ok, fuse(ref, timeout, fun, [conn, nil])}
%{} ->
{:error, :noconnect}
end
end
@doc """
Carry out a transaction using a connection from a pool.
Once a transaction is opened, all following calls to `run/4` or
`transaction/4` will use the same connection/worker. If `break/2` is invoked,
all operations will return `{:error, :noconnect}` until the end of the
top level transaction.
A transaction returns the value returned by the function wrapped in a tuple
as `{:ok, value}`. Transactions can be nested and the `depth` shows the depth
of nested transaction for the module/pool combination.
Returns `{:error, :noproc}` if the pool is not alive, `{:error, :noconnect}`
if no connection is available or `{:error, :notransaction}` if called inside
a `run/4` fun at depth `0`.
## Examples
Pool.transaction(mod, pool, timeout,
fn(_ref, _conn, 1, queue_time) -> queue_time end)
Pool.transaction(mod, pool, timeout,
fn(ref, _conn, 1, _queue_time) ->
{:ok, :nested} =
Pool.transaction(mod, pool, timeout, fn(_ref, _conn, 2, nil) ->
:nested
end)
end)
Pool.transaction(mod, :pool1, timeout,
fn(_ref1, _conn1, 1, _queue_time1) ->
{:ok, :different_pool} =
Pool.transaction(mod, :pool2, timeout,
fn(_ref2, _conn2, 1, _queue_time2) -> :different_pool end)
end)
Pool.run(mod, pool, timeout,
fn(_conn, _queue_time) ->
{:error, :notransaction} =
Pool.transaction(mod, pool, timeout, fn(_, _, _, _) -> end)
end)
"""
@spec transaction(module, t, timeout,
((ref, conn, depth, queue_time | nil) -> result)) ::
{:ok, result} | {:error, :noproc | :noconnect | :notransaction}
when result: var, conn: {module, pid}
def transaction(pool_mod, pool, timeout, fun) do
ref = {__MODULE__, pool_mod, pool}
case Process.get(ref) do
nil ->
transaction(pool_mod, pool, ref, timeout, fun)
%{depth: 0} ->
{:error, :notransaction}
%{conn: _} = info ->
do_transaction(ref, info, nil, fun)
%{} ->
{:error, :noconnect}
end
end
@doc """
Break the active transaction or run.
Calling `connection/1` inside the same transaction or run (at any depth) will
return `{:error, :noconnect}`.
## Examples
Pool.transaction(mod, pool, timout,
fn(ref, conn, 1, _queue_time) ->
{:ok, {_mod, ^conn}} = Pool.connection(ref)
:ok = Pool.break(ref, timeout)
{:error, :noconnect} = Pool.connection(ref)
end)
Pool.transaction(mod, pool, timeout,
fn(ref, _conn, 1, _queue_time) ->
:ok = Pool.break(ref, timeout)
{:error, :noconnect} =
Pool.transaction(mod, pool, timeout, fn(_, _, _, _) -> end)
end)
"""
@spec break(ref, timeout) :: :ok
def break({__MODULE__, pool_mod, pool} = ref, timeout) do
case Process.get(ref) do
%{conn: _, worker: worker} = info ->
_ = Process.put(ref, Map.delete(info, :conn))
pool_mod.break(pool, worker, timeout)
%{} ->
:ok
end
end
## Helpers
defp fuse(ref, timeout, fun, args) do
try do
apply(fun, args)
catch
class, reason ->
stack = System.stacktrace()
break(ref, timeout)
:erlang.raise(class, reason, stack)
end
end
defp do_run(pool_mod, pool, timeout, fun) do
case checkout(pool_mod, pool, timeout) do
{:ok, worker, conn, time} ->
try do
fun.(conn, time)
catch
class, reason ->
stack = System.stacktrace()
pool_mod.break(pool, worker, timeout)
:erlang.raise(class, reason, stack)
else
res ->
pool_mod.checkin(pool, worker, timeout)
{:ok, res}
end
{:error, _} = error ->
error
end
end
defp checkout(pool_mod, pool, timeout) do
case pool_mod.checkout(pool, timeout) do
{:ok, _worker, _conn, _time} = ok ->
ok
{:error, reason} = error when reason in [:noproc, :noconnect] ->
error
{:error, err} ->
raise err
end
end
defp transaction(pool_mod, pool, ref, timeout, fun) do
case open_transaction(pool_mod, pool, timeout) do
{:ok, info, time} ->
try do
do_transaction(ref, info, time, fun)
after
info = Process.delete(ref)
close_transaction(pool_mod, pool, info, timeout)
end
{:error, _} = error ->
error
end
end
defp do_transaction(ref, %{depth: depth, conn: conn} = info, time, fun) do
depth = depth + 1
_ = Process.put(ref, %{info | depth: depth})
try do
{:ok, fun.(ref, conn, depth, time)}
after
case Process.put(ref, info) do
%{conn: _} ->
:ok
%{} ->
_ = Process.put(ref, Map.delete(info, :conn))
:ok
end
end
end
defp open_transaction(pool_mod, pool, timeout) do
case pool_mod.open_transaction(pool, timeout) do
{:ok, worker, conn, time} ->
# We got permission to start a transaction
{:ok, %{worker: worker, conn: conn, depth: 0}, time}
{:error, reason} = error when reason in [:noproc, :noconnect] ->
error
{:error, err} ->
raise err
end
end
defp close_transaction(pool_mod, pool, %{conn: _, worker: worker}, timeout) do
pool_mod.close_transaction(pool, worker, timeout)
end
defp close_transaction(_, _, %{}, _) do
:ok
end
end
|
lib/ecto/adapters/pool.ex
| 0.933869 | 0.67336 |
pool.ex
|
starcoder
|
defmodule Coxir.Struct.Webhook do
@moduledoc """
Defines methods used to interact with channel webhooks.
Refer to [this](https://discord.com/developers/docs/resources/webhook#webhook-object)
for a list of fields and a broader documentation.
"""
use Coxir.Struct
@doc false
def select(pattern)
@doc """
Fetches a webhook.
Returns a webhook object upon success
or a map containing error information.
"""
def get(webhook) do
API.request(:get, "webhooks/#{webhook}")
|> pretty
end
@doc """
Fetches a webhook.
Refer to [this](https://discord.com/developers/docs/resources/webhook#get-webhook-with-token)
for more information.
"""
@spec get_with_token(String.t, String.t) :: map
def get_with_token(webhook, token) do
API.request(:get, "webhooks/#{webhook}/#{token}")
|> pretty
end
@doc """
Modifies a given webhook.
Returns a webhook object upon success
or a map containing error information.
#### Params
Must be an enumerable with the fields listed below.
- `name` - the default name of the webhook
- `avatar` - image for the default webhook avatar
- `channel_id` - the new channel id to be moved to
Refer to [this](https://discord.com/developers/docs/resources/webhook#modify-webhook)
for a broader explanation on the fields and their defaults.
"""
@spec edit(String.t, Enum.t) :: map
def edit(webhook, params) do
API.request(:patch, "webhooks/#{webhook}", params)
|> pretty
end
@doc """
Modifies a given webhook.
Refer to [this](https://discord.com/developers/docs/resources/webhook#modify-webhook-with-token)
for more information.
"""
@spec edit_with_token(String.t, String.t, Enum.t) :: map
def edit_with_token(webhook, token, params) do
API.request(:patch, "webhooks/#{webhook}/#{token}", params)
|> pretty
end
@doc """
Changes the name of a given webhook.
Returns a webhook object upon success
or a map containing error information.
"""
@spec set_name(String.t, String.t) :: map
def set_name(webhook, name),
do: edit(webhook, name: name)
@doc """
Changes the avatar of a given webhook.
Returns a webhook object upon success
or a map containing error information.
"""
@spec set_avatar(String.t, String.t) :: map
def set_avatar(webhook, avatar),
do: edit(webhook, avatar: avatar)
@doc """
Changes the channel of a given webhook.
Returns a webhook object upon success
or a map containing error information.
"""
@spec set_channel(String.t, String.t) :: map
def set_channel(webhook, channel),
do: edit(webhook, channel_id: channel)
@doc """
Deletes a given webhook.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec delete(String.t) :: :ok | map
def delete(webhook) do
API.request(:delete, "webhooks/#{webhook}")
end
@doc """
Deletes a given webhook.
Refer to [this](https://discord.com/developers/docs/resources/webhook#delete-webhook-with-token)
for more information.
"""
@spec delete_with_token(String.t, String.t) :: :ok | map
def delete_with_token(webhook, token) do
API.request(:delete, "webhooks/#{webhook}/#{token}")
end
@doc """
Executes a given webhook.
Refer to [this](https://discord.com/developers/docs/resources/webhook#execute-webhook)
for more information.
"""
@spec execute(String.t, String.t, Enum.t, boolean) :: map
def execute(webhook, token, params, wait \\ false) do
API.request(:post, "webhooks/#{webhook}/#{token}", params, params: [wait: wait])
end
@doc """
Executes a given *Slack* webhook.
Refer to [this](https://discord.com/developers/docs/resources/webhook#execute-slackcompatible-webhook)
for more information.
"""
@spec execute_slack(String.t, String.t, Enum.t, boolean) :: map
def execute_slack(webhook, token, params, wait \\ false) do
API.request(:post, "webhooks/#{webhook}/#{token}/slack", params, params: [wait: wait])
end
@doc """
Executes a given *GitHub* webhook.
Refer to [this](https://discord.com/developers/docs/resources/webhook#execute-githubcompatible-webhook)
for more information.
"""
@spec execute_github(String.t, String.t, Enum.t, boolean) :: map
def execute_github(webhook, token, params, wait \\ false) do
API.request(:post, "webhooks/#{webhook}/#{token}/github", params, params: [wait: wait])
end
end
|
lib/coxir/struct/webhook.ex
| 0.881812 | 0.405949 |
webhook.ex
|
starcoder
|
defmodule ScosSystemTest.Performance do
@moduledoc """
ScosSystemTest.Performance will eventually be an actual
system test, but for now all it does is generate
and upload a specified number of datasets.
"""
require Logger
alias ScosSystemTest.Helpers
alias ScosSystemTest.Stats
@default_andi_url Application.get_env(:scos_system_test, :default_andi_url)
@default_tdg_url Application.get_env(:scos_system_test, :default_tdg_url)
def run(options \\ []) do
andi_url = Keyword.get(options, :andi_url, @default_andi_url)
tdg_url = Keyword.get(options, :tdg_url, @default_tdg_url)
record_counts = Keyword.get(options, :record_counts, [100])
record_counts_length = Enum.count(record_counts)
dataset_count = Keyword.get(options, :dataset_count, 1)
Logger.info("Posting #{dataset_count} datasets to #{andi_url}")
result_list =
Enum.map(0..(dataset_count - 1), fn i ->
cycled_index = rem(i, record_counts_length)
cycled_record_count = Enum.at(record_counts, cycled_index)
create_and_upload_dataset(andi_url, tdg_url, cycled_record_count)
end)
Logger.info("Finished posting datasets: ")
Enum.each(result_list, fn result ->
Logger.info(
"Id: #{result.id}, system name: #{result.system_name} record count: #{result.record_count}"
)
end)
result_list
end
def create_and_upload_dataset(andi_url, tdg_url, record_count) do
uuid = Helpers.generate_uuid()
organization = Helpers.generate_organization(uuid)
organization_id = Helpers.upload_organization(organization, andi_url)
dataset = Helpers.generate_dataset(uuid, organization_id, record_count, tdg_url)
Helpers.upload_dataset(dataset, andi_url)
%{technical: %{orgName: org_name, dataName: data_name}} = dataset
%{id: uuid, system_name: "#{org_name}__#{data_name}", record_count: record_count}
end
def fetch_counts(datasets) do
Enum.map(datasets, &select_count_for_dataset/1)
end
def fetch_stats(datasets) do
Logger.info(fn -> "Fetching stats. This could take a few minutes." end)
map_of_datasets = Enum.into(datasets, Map.new(), &{&1.id, &1})
map_of_stats =
datasets
|> select_stats()
|> Enum.group_by(&Map.get(&1, "dataset_id"))
merged =
Map.merge(map_of_datasets, map_of_stats, fn _k, dataset, stats ->
Map.put(dataset, :stats, stats)
end)
Enum.map(merged, fn {_k, v} -> v end)
end
def aggregate_stats(datasets) do
Enum.map(datasets, fn dataset ->
case Map.get(dataset, :stats) do
nil -> dataset
stats -> Map.put(dataset, :aggregated_stats, Stats.aggregate(stats))
end
end)
end
def aggregate_by_groups(datasets, grouping_fun) do
datasets
|> Enum.group_by(grouping_fun)
|> Enum.map(fn {group_key, group_of_datasets} ->
{group_key, clean_and_aggregate_group(group_of_datasets)}
end)
|> Enum.into(Map.new())
end
def select_stats(datasets) do
"SELECT * FROM operational_stats WHERE dataset_id IN (#{datasets_string(datasets)}) AND app='SmartCityOS' ORDER BY timestamp DESC"
|> Helpers.execute()
end
defp clean_and_aggregate_group(datasets) do
datasets
|> Enum.map(&Map.get(&1, :stats))
|> List.flatten()
|> Enum.reject(&is_nil/1)
|> Stats.aggregate()
|> Map.put("num_of_datasets", length(datasets))
end
defp select_count_for_dataset(dataset) do
"SELECT COUNT(*) FROM #{String.downcase(dataset.system_name)}"
|> Helpers.execute()
|> List.flatten()
|> List.first()
|> log_count(dataset)
|> (&Map.put(dataset, :inserted_count, &1)).()
end
def log_count(count, dataset) do
Logger.info(fn -> "#{dataset.id}: #{count}/#{dataset.record_count}" end)
count
end
defp datasets_string(datasets) do
datasets
|> Enum.map(fn dataset -> "'#{dataset.id}'" end)
|> Enum.join(",")
end
end
|
lib/performance.ex
| 0.566378 | 0.435421 |
performance.ex
|
starcoder
|
defmodule Forth do
defstruct stack: [],
words: [],
definition: %{}
@opaque operation :: (evaluator() -> evaluator())
@opaque word :: %{name: String.t(), code: [operation()]}
@opaque evaluator :: %__MODULE__{stack: [integer()], words: [word()], definition: map()}
defmodule StackUnderflow do
defexception []
def message(_), do: "stack underflow"
end
defmodule InvalidWord do
defexception word: nil
def message(e), do: "invalid word: #{inspect(e.word)}"
end
defmodule UnknownWord do
defexception word: nil
def message(e), do: "unknown word: #{inspect(e.word)}"
end
defmodule DivisionByZero do
defexception []
def message(_), do: "division by zero"
end
@doc """
Create a new evaluator.
"""
@spec new() :: evaluator
def new() do
%__MODULE__{}
|> add_word("+", fn ev -> stack2!(ev).(&[&1 + &2 | &3]) end)
|> add_word("-", fn ev -> stack2!(ev).(&[&2 - &1 | &3]) end)
|> add_word("*", fn ev -> stack2!(ev).(&[&1 * &2 | &3]) end)
|> add_word("/", fn ev -> stack2!(ev).(&[div!(&2, &1) | &3]) end)
|> add_word("dup", &stack1!(&1).(fn head, tail -> [head, head | tail] end))
|> add_word("drop", &stack1!(&1).(fn _head, tail -> tail end))
|> add_word("swap", &stack2!(&1).(fn x, y, tail -> [y, x | tail] end))
|> add_word("over", &stack2!(&1).(fn x, y, tail -> [y, x, y | tail] end))
end
@doc """
Evaluate an input string, updating the evaluator state.
"""
@spec eval(evaluator, String.t()) :: evaluator
def eval(evaluator, input) do
input
|> String.replace(~r/[\x00\x01\n\r\t ]/, " ")
|> String.downcase()
|> String.split(" ", trim: true)
|> do_eval(evaluator)
end
@doc """
Return the current stack as a string with the element on top of the stack
being the rightmost element in the string.
"""
@spec format_stack(evaluator) :: String.t()
def format_stack(evaluator), do: evaluator.stack |> Enum.reverse() |> Enum.join(" ")
@spec add_word(evaluator(), String.t(), operation()) :: evaluator()
defp add_word(evaluator, name, fun) when is_function(fun) do
word = %{name: name, code: [fun]}
%__MODULE__{evaluator | words: [word | evaluator.words]}
end
@spec add_word(evaluator(), String.t(), [String.t()]) :: evaluator()
defp add_word(evaluator, name, tokens) when is_list(tokens) do
instructions =
tokens
|> Enum.map(fn token ->
case Integer.parse(token) do
{integer, _} ->
&push(&1, integer)
:error ->
word = find_word!(evaluator, token)
&Enum.reduce(word.code, &1, fn fun, ev -> fun.(ev) end)
end
end)
word = %{name: name, code: instructions}
%__MODULE__{evaluator | words: [word | evaluator.words]}
end
@spec find_word!(evaluator(), String.t()) :: word()
defp find_word!(evaluator, name) do
evaluator.words
|> Enum.find(:no_word, fn word -> word.name == name end)
|> case do
:no_word -> raise UnknownWord
word -> word
end
end
@spec push(evaluator(), integer()) :: evaluator()
defp push(evaluator, integer) do
%__MODULE__{evaluator | stack: [integer | evaluator.stack]}
end
defp do_eval([":", name | tl], ev) do
case Integer.parse(name) do
:error -> do_eval(tl, %__MODULE__{ev | definition: %{name: name, instructions: []}})
_ -> raise InvalidWord, word: name
end
end
defp do_eval([";" | tl], %__MODULE__{definition: definition} = ev) do
tokens = definition.instructions |> Enum.reverse()
new_ev = %__MODULE__{add_word(ev, definition.name, tokens) | definition: %{}}
do_eval(tl, new_ev)
end
defp do_eval([token | tl], ev) do
case ev.definition do
%{name: _, instructions: instructions} ->
new_definition = %{ev.definition | instructions: [token | instructions]}
new_ev = %__MODULE__{ev | definition: new_definition}
do_eval(tl, new_ev)
_ ->
do_eval(tl, eval_token(token, ev))
end
end
defp do_eval([], ev), do: ev
defp eval_token(token, %__MODULE__{} = ev) do
case Integer.parse(token) do
{number, _} ->
push(ev, number)
:error ->
ev
|> find_word!(token)
|> Map.fetch!(:code)
|> Enum.reduce(ev, fn op, acc -> op.(acc) end)
end
end
defp stack1!(%__MODULE__{stack: stack} = ev) do
case stack do
[head | tail] -> fn fun -> %__MODULE__{ev | stack: fun.(head, tail)} end
_ -> fn _fun -> raise StackUnderflow end
end
end
defp stack2!(%__MODULE__{stack: stack} = ev) do
case stack do
[x, y | tail] -> fn fun -> %__MODULE__{ev | stack: fun.(x, y, tail)} end
_ -> fn _fun -> raise StackUnderflow end
end
end
defp div!(_x, 0), do: raise(DivisionByZero)
defp div!(x, y), do: div(x, y)
end
|
exercism/elixir/forth/lib/forth.ex
| 0.81283 | 0.607721 |
forth.ex
|
starcoder
|
defmodule Coxir.Gateway do
@moduledoc """
Supervises the components necessary to interact with Discord's gateway.
### Using the module
The module can be used with `Kernel.use/2` to transform the calling module into a gateway.
Check out the `__using__/1` macro to see what this does exactly.
"""
import Supervisor, only: [start_child: 2]
import Bitwise
alias Coxir.{API, Sharder, Token}
alias Coxir.Gateway.Payload.{GatewayInfo, RequestGuildMembers, UpdatePresence}
alias Coxir.Gateway.{Producer, Dispatcher, Consumer, Handler}
alias Coxir.Gateway.{Intents, Session}
alias Coxir.Model.Snowflake
alias Coxir.{Guild, Channel}
@default_config [
sharder: Sharder.Default,
intents: :non_privileged
]
@typedoc """
A gateway process.
"""
@type gateway :: Supervisor.supervisor()
@typedoc """
The configuration that must be passed to `start_link/2`.
If no `token` is provided, one is expected to be configured as `:token` under the `:coxir` app.
If no `shard_count` is provided, the value suggested by Discord will be used.
"""
@type config :: [
token: Token.t() | none,
intents: Intents.intents() | :non_privileged,
shard_count: non_neg_integer | none,
sharder: Sharder.t() | Sharder.Default,
handler: Handler.handler()
]
@doc """
Defines functions in order to transform the calling module into a gateway.
Defines `child_spec/1` and `start_link/0` so that the module can be added to a supervisor.
Note that the `t:gateway/0` process that the second function starts is named after the module.
Defines `get_user_id/0` which delegates to `get_user_id/1`.
Defines `update_presence/1` which delegates to `update_presence/2`.
Defines `update_presence/2` which delegates to `update_presence/3`.
Requires the `Coxir.Gateway.Handler` behaviour to be implemented for handling events.
Custom configuration can be given for this module by configuring it under the `:coxir` app.
"""
defmacro __using__(_options) do
quote location: :keep do
@behaviour Coxir.Gateway.Handler
alias Coxir.Gateway
def child_spec(_runtime) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, []},
restart: :permanent
}
end
def start_link do
:coxir
|> Application.get_env(__MODULE__, [])
|> Keyword.put(:handler, __MODULE__)
|> Gateway.start_link(name: __MODULE__)
end
def get_user_id do
Gateway.get_user_id(__MODULE__)
end
def update_presence(params) do
Gateway.update_presence(__MODULE__, params)
end
def update_presence(where, params) do
Gateway.update_presence(__MODULE__, where, params)
end
end
end
@doc """
Calls `update_presence/3` on all the shards of a given gateway.
"""
@spec update_presence(gateway, Enum.t()) :: :ok
def update_presence(gateway, params) do
shard_count = get_shard_count(gateway)
for index <- 1..shard_count do
:ok = update_presence(gateway, index - 1, params)
end
:ok
end
@doc """
Updates the presence on a given channel, guild or specific shard.
The possible parameters are the fields of `t:Coxir.Gateway.Payload.UpdatePresence.t/0`.
"""
@spec update_presence(gateway, Channel.t() | Guild.t() | non_neg_integer, Enum.t()) :: :ok
def update_presence(gateway, where, params) do
shard = get_shard(gateway, where)
params = Map.new(params)
payload = UpdatePresence.cast(params)
Session.update_presence(shard, payload)
end
@doc """
Requests members for a given guild.
The possible parameters are the fields of `t:Coxir.Gateway.Payload.RequestGuildMembers.t/0`.
"""
@spec request_guild_members(gateway, Guild.t(), Enum.t()) :: :ok
def request_guild_members(gateway, %Guild{id: guild_id} = guild, params) do
shard = get_shard(gateway, guild)
params =
params
|> Map.new()
|> Map.put(:guild_id, guild_id)
payload = RequestGuildMembers.cast(params)
Session.request_guild_members(shard, payload)
end
@doc """
Returns the session process for a given channel, guild or specific shard.
"""
@spec get_shard(gateway, Channel.t() | Guild.t() | non_neg_integer) :: Session.session()
def get_shard(gateway, %Channel{guild_id: nil}) do
get_shard(gateway, 0)
end
def get_shard(gateway, %Channel{guild_id: guild_id}) do
guild = %Guild{id: guild_id}
get_shard(gateway, guild)
end
def get_shard(gateway, %Guild{id: id}) do
shard_count = get_shard_count(gateway)
index = rem(id >>> 22, shard_count)
get_shard(gateway, index)
end
def get_shard(gateway, index) when is_integer(index) do
{sharder, sharder_module} = get_sharder(gateway)
sharder_module.get_shard(sharder, index)
end
@doc """
Returns the id of the user the given gateway is running for.
"""
@spec get_user_id(gateway) :: Snowflake.t()
def get_user_id(gateway) do
%Session{user_id: user_id} = get_session_options(gateway)
user_id
end
@doc """
Returns the token configured for the given gateway.
"""
@spec get_token(gateway) :: Token.t()
def get_token(gateway) do
%Session{token: token} = get_session_options(gateway)
token
end
@doc """
Returns the `t:Coxir.Gateway.Producer.producer/0` process of a given gateway.
"""
@spec get_producer(gateway) :: Producer.producer()
def get_producer(gateway) do
children = Supervisor.which_children(gateway)
Enum.find_value(
children,
fn {_id, pid, _type, [module]} ->
if module == Producer, do: pid
end
)
end
@doc """
Starts a gateway with the given configuration and options.
"""
@spec start_link(config, list(Supervisor.option() | Supervisor.init_option())) ::
Supervisor.on_start()
def start_link(config, options \\ []) do
handler = Keyword.fetch!(config, :handler)
options = [{:strategy, :rest_for_one} | options]
with {:ok, gateway} <- Supervisor.start_link([], options) do
{:ok, producer} = start_child(gateway, Producer)
{:ok, dispatcher} = start_child(gateway, {Dispatcher, producer})
consumer_options = %Consumer{handler: handler, dispatcher: dispatcher}
{:ok, _consumer} = start_child(gateway, {Consumer, consumer_options})
sharder_spec = generate_sharder_spec(producer, config)
{:ok, _sharder} = start_child(gateway, sharder_spec)
{:ok, gateway}
end
end
defp get_sharder(gateway) do
children = Supervisor.which_children(gateway)
Enum.find_value(
children,
fn {id, pid, _type, [module]} ->
if id == :sharder, do: {pid, module}
end
)
end
defp get_shard_count(gateway) do
%Sharder{shard_count: shard_count} = get_sharder_options(gateway)
shard_count
end
defp get_session_options(gateway) do
%Sharder{session_options: session_options} = get_sharder_options(gateway)
session_options
end
defp get_sharder_options(gateway) do
{:ok, spec} = :supervisor.get_childspec(gateway, :sharder)
%{start: {_module, _function, [sharder_options | _rest]}} = spec
sharder_options
end
defp generate_sharder_spec(producer, config) do
global = Application.get_all_env(:coxir)
config =
@default_config
|> Keyword.merge(global)
|> Keyword.merge(config)
token = Token.from_options!(config)
intents =
config
|> Keyword.fetch!(:intents)
|> Intents.get_value()
{gateway_host, shard_count, _start_limit} = request_gateway_info(token)
session_options = %Session{
user_id: Token.get_user_id(token),
token: token,
intents: intents,
producer: producer,
gateway_host: gateway_host
}
sharder_options = %Sharder{
shard_count: Keyword.get(config, :shard_count, shard_count),
session_options: session_options
}
sharder_module = Keyword.fetch!(config, :sharder)
spec = sharder_module.child_spec(sharder_options)
%{spec | id: :sharder}
end
defp request_gateway_info(token) do
{:ok, object} = API.get("gateway/bot", token: token)
gateway_info = GatewayInfo.cast(object)
%GatewayInfo{
url: "wss://" <> gateway_host,
shards: shard_count,
session_start_limit: start_limit
} = gateway_info
gateway_host = :binary.bin_to_list(gateway_host)
{gateway_host, shard_count, start_limit}
end
end
|
lib/coxir/gateway.ex
| 0.89865 | 0.499268 |
gateway.ex
|
starcoder
|
defmodule Genex.Support.Genealogy do
alias Graph.Serializers.DOT
@moduledoc """
Implementation of a genealogy tree.
We use the Genealogy tree to model the history of the population from it's initialization to the end of the algorithm. The tree itself is a directed `Graph` implemented using [libgraph](ttps://www.github.com/x/libgraph). Genealogy is tracked in a population struct, so you can access the genealogy anytime through the current population. Any function in the libgraph library will work on a genealogy
An edge emanates from parent and is incident on child. Mutants are considered new chromosomes, so their lineage starts with a single parent.
To produce visualizations of the Genealogy of an evolution, you'll have to export the tree to a DOT file and use a third-party visualization tool.
"""
@doc """
Creates a new Genealogy Tree.
Returns `Graph`.
"""
def new, do: Graph.new(type: :directed)
@doc """
Updates a Genealogy Tree with several vertices or one vertex.
Returns `Graph`.
# Parameters
- `genealogy`: Reference to the Genealogy Tree.
- `chromosomes`: `List` of `%Chromosome{}` or `%Chromosome{}`.
"""
def update(genealogy, chromosomes) when is_list(chromosomes) do
genealogy
|> Graph.add_vertices(chromosomes)
end
def update(genealogy, chromosome) do
genealogy
|> Graph.add_vertex(chromosome)
end
@doc """
Updates a Genealogy Tree with a vertex and it's parents.
Returns `Graph`.
# Parameters
- `genealogy`: Reference to a Genealogy Tree.
- `child`: Child `%Chromosome{}`.
- `parent`: Parent `%Chromosome{}`.
"""
def update(genealogy, child, parent) do
genealogy
|> Graph.add_vertex(child)
|> Graph.add_edge(parent, child)
end
@doc """
Updates a Genealogy Tree with a vertex and it's parents.
Returns `Graph`.
# Parameters
- `genealogy`: Reference to a Genealogy Tree.
- `child`: Child `%Chromosome{}`.
- `parent_a`: Parent A `%Chromosome{}`.
- `parent_b`: Parent B `%Chromosome{}`.
"""
def update(genealogy, child, parent_a, parent_b) do
genealogy
|> Graph.add_vertex(child)
|> Graph.add_edge(parent_a, child)
|> Graph.add_edge(parent_b, child)
end
@doc """
Exports the genealogy tree.
Returns `{:ok, String}`.
# Parameters
- `genealogy`: Reference to a Genealogy Tree.
"""
def export(genealogy) do
genealogy
|> DOT.serialize()
end
end
|
lib/genex/support/genealogy.ex
| 0.870982 | 0.922132 |
genealogy.ex
|
starcoder
|
defmodule Struct.Elements.InsertedWoodenSidingWall do
use Ecto.Schema
import Ecto.Changeset
schema "inserted_wooden_siding_walls" do
field :beam_height, :float
field :beam_width, :float
field :column_depth, :float
field :column_width, :float
field :fiber_direction_compressive_strength_of_sidings, :float
field :fiber_direction_elasticity_of_sidings, :float
field :fiber_orthogonal_direction_compressive_strength_of_sidings, :float
field :fiber_orthogonal_direction_elasticity_of_beams, :float
field :fiber_orthogonal_direction_elasticity_of_columns, :float
field :frame_inner_height, :float
field :frame_inner_width, :float
field :friction_coefficient_between_sidings, :float
field :name, :string
field :number_of_shear_connecters_on_line, :integer
field :shear_modulus_of_sidings, :float
field :siding_elasticity_ratio, :float
field :siding_thickness, :float
field :siding_width, :float
field :single_shear_connecter_rigidity, :float
field :single_shear_connecter_yield_resistance, :float
field :substitution_coefficient_of_beams, :float
field :substitution_coefficient_of_columns, :float
field :yield_judgement_ratio, :float
timestamps()
end
@doc false
def changeset(inserted_wooden_siding_wall, attrs) do
inserted_wooden_siding_wall
|> cast(attrs, [:name, :single_shear_connecter_rigidity, :number_of_shear_connecters_on_line, :siding_width, :frame_inner_height, :frame_inner_width, :friction_coefficient_between_sidings, :shear_modulus_of_sidings, :siding_thickness, :fiber_direction_elasticity_of_sidings, :siding_elasticity_ratio, :fiber_orthogonal_direction_elasticity_of_columns, :fiber_orthogonal_direction_elasticity_of_beams, :column_depth, :beam_height, :column_width, :beam_width, :substitution_coefficient_of_columns, :substitution_coefficient_of_beams, :single_shear_connecter_yield_resistance, :fiber_direction_compressive_strength_of_sidings, :fiber_orthogonal_direction_compressive_strength_of_sidings, :yield_judgement_ratio])
|> validate_required([:name, :single_shear_connecter_rigidity, :number_of_shear_connecters_on_line, :siding_width, :frame_inner_height, :frame_inner_width, :friction_coefficient_between_sidings, :shear_modulus_of_sidings, :siding_thickness, :fiber_direction_elasticity_of_sidings, :siding_elasticity_ratio, :fiber_orthogonal_direction_elasticity_of_columns, :fiber_orthogonal_direction_elasticity_of_beams, :column_depth, :beam_height, :column_width, :beam_width, :substitution_coefficient_of_columns, :substitution_coefficient_of_beams, :single_shear_connecter_yield_resistance, :fiber_direction_compressive_strength_of_sidings, :fiber_orthogonal_direction_compressive_strength_of_sidings, :yield_judgement_ratio])
end
end
|
lib/struct/elements/inserted_wooden_siding_wall.ex
| 0.543348 | 0.523238 |
inserted_wooden_siding_wall.ex
|
starcoder
|
defmodule Arc.Ecto.Type do
@moduledoc false
require Logger
def type, do: :string
@filename_with_timestamp ~r{^(.*)\?(\d+)$}
def cast(definition, %{file_name: file, updated_at: updated_at}) do
cast(definition, %{"file_name" => file, "updated_at" => updated_at})
end
def cast(_definition, %{"file_name" => file, "updated_at" => updated_at}) do
{:ok, %{file_name: file, updated_at: updated_at}}
end
def cast(definition, args) do
case definition.store(args) do
{:ok, file} -> {:ok, %{file_name: file, updated_at: NaiveDateTime.truncate(NaiveDateTime.utc_now, :second)}}
error ->
Logger.error(inspect(error))
:error
end
end
def load(_definition, value) do
{file_name, gsec} =
case Regex.match?(@filename_with_timestamp, value) do
true ->
[_, file_name, gsec] = Regex.run(@filename_with_timestamp, value)
{file_name, gsec}
_ -> {value, nil}
end
updated_at = case gsec do
gsec when is_binary(gsec) ->
gsec
|> String.to_integer
|> :calendar.gregorian_seconds_to_datetime
|> NaiveDateTime.from_erl!
_ ->
nil
end
{:ok, %{file_name: file_name, updated_at: updated_at}}
end
def dump(_definition, %{file_name: file_name, updated_at: nil}) do
{:ok, file_name}
end
def dump(_definition, %{file_name: file_name, updated_at: updated_at}) do
gsec = :calendar.datetime_to_gregorian_seconds(NaiveDateTime.to_erl(updated_at))
{:ok, "#{file_name}?#{gsec}"}
end
def dump(definition, %{"file_name" => file_name, "updated_at" => updated_at}) do
dump(definition, %{file_name: file_name, updated_at: updated_at})
end
def embed_as(definition, format) do
if function_exported?(definition, :embed_as, 1), do: definition.embed_as(format), else: :self
end
def equal?(definition, term1, term2) do
if function_exported?(definition, :equal?, 2) do
definition.equal?(term1, term2)
else
equal?(term1, term2)
end
end
defp equal?(term1, term2) do
eq?(file_name(term1), file_name(term2)) and eq?(updated_at(term1), updated_at(term2))
end
defp file_name(%{file_name: name}), do: name
defp file_name(%{"file_name" => name}), do: name
defp file_name(_), do: nil
defp updated_at(%{updated_at: at}), do: at
defp updated_at(%{"updated_at" => at}), do: at
defp updated_at(_), do: nil
defp eq?(%NaiveDateTime{} = dt1, %NaiveDateTime{} = dt2), do: NaiveDateTime.compare(dt1, dt2) == :eq
defp eq?(term, term), do: true
defp eq?(_, _), do: false
end
|
lib/arc_ecto/type.ex
| 0.581422 | 0.464355 |
type.ex
|
starcoder
|
defmodule Aoc2020Day24 do
import Enum
@black 1
@white 0
@doc """
Each point can be identified by treat e/w as +-2, ne/nw/se/sw as +-1
nwwswee -> flips the reference tile itself {0, 0}
"""
def coordinate(path) do
path
|> reduce({0, 0}, fn i, {x, y} ->
case i do
"nw" -> {x - 1, y - 1}
"se" -> {x + 1, y + 1}
"ne" -> {x + 1, y - 1}
"sw" -> {x - 1, y + 1}
"e" -> {x + 2, y}
"w" -> {x - 2, y}
end
end)
end
def neighbors({x, y}) do
[{x - 1, y - 1}, {x + 1, y + 1}, {x + 1, y - 1}, {x - 1, y + 1}, {x + 2, y}, {x - 2, y}]
end
def count_black_neighbors(world, tile) do
neighbors(tile)
|> map(fn k -> Map.get(world, k, 0) end)
|> count(&is_black?(&1))
end
def is_black?(v) do
rem(v, 2) == @black
end
def next(world, tile) do
v = Map.get(world, tile)
case is_black?(v) do
true ->
c = count_black_neighbors(world, tile)
cond do
c == 0 -> @white
c > 2 -> @white
true -> @black
end
false ->
c = count_black_neighbors(world, tile)
case c do
2 -> @black
_ -> @white
end
end
end
defp parse_line(s) do
parse_line(s, [])
end
def parse_line("se" <> t, result) do
parse_line(t, ["se" | result])
end
def parse_line("sw" <> t, result) do
parse_line(t, ["sw" | result])
end
def parse_line("nw" <> t, result) do
parse_line(t, ["nw" | result])
end
def parse_line("ne" <> t, result) do
parse_line(t, ["ne" | result])
end
def parse_line("e" <> t, result) do
parse_line(t, ["e" | result])
end
def parse_line("w" <> t, result) do
parse_line(t, ["w" | result])
end
def parse_line("", result) do
result
|> reverse
end
@doc """
Read steps and turns to a dict of point -> number of visit
An odd visited points would be flipped odd times -> black
Otherwise it is white
"""
def read_input(input) do
input
|> String.trim()
|> String.split("\n", trim: true)
|> map(&parse_line/1)
|> map(fn p -> coordinate(p) end)
|> frequencies
end
def solve1(input) do
input
|> read_input
|> Map.values()
|> count(fn v -> v == @black end)
end
def solve2(input) do
world =
input
|> read_input
1..100
|> reduce(world, fn _i, acc ->
added_neighbors =
acc
|> Map.keys()
|> map(fn k -> neighbors(k) end)
|> concat
|> uniq
|> reduce(acc, fn k, acc -> Map.update(acc, k, 0, fn old -> old end) end)
added_neighbors
|> map(fn {k, _v} ->
{k, next(added_neighbors, k)}
end)
|> Map.new()
end)
|> count(fn {_k, v} -> is_black?(v) end)
end
end
|
lib/2020/aoc2020_day24.ex
| 0.71123 | 0.670129 |
aoc2020_day24.ex
|
starcoder
|
defmodule NebulexMemcachedAdapter do
@moduledoc """
Nebulex adapter for Memcached..
This adapter is implemented by means of `Memcachex`, a Memcached driver for
Elixir.
This adapter supports multiple connection pools against different memcached
nodes in a cluster. This feature enables resiliency, be able to survive in
case any node(s) gets unreachable.
## Adapter Options
In addition to `Nebulex.Cache` shared options, this adapters supports the
following options:
* `:pools` - The list of connection pools for Memcached. Each element (pool)
holds the same options as `Memcachex` (including connection options), and
the `:pool_size` (number of connections to keep in the pool).
## Memcachex Options (for each pool)
Since this adapter is implemented by means of `Memachex`, it inherits the same
options (including connection options). These are some of the main ones:
* `:hostname` - (string) hostname of the memcached server. Defaults to "localhost".
* `:port` - (integer) port on which the memcached server is listening. Defaults to
11211.
* `:auth` - (tuple) only plain authentication method is supported.It is specified
using the following format {:plain, "username", "password"}. Defaults to nil.
* `ttl` - (integer) a default expiration time in seconds. This value will be used
if the :ttl value is not specified for a operation. Defaults to 0(means forever).
* `:namespace` - (string) prepend each key with the given value.
* `:backoff_initial` - (integer) initial backoff (in milliseconds) to be used in
case of connection failure. Defaults to 500.
* `:backoff_max` - (integer) maximum allowed interval between two connection attempt.
Defaults to 30_000.
For more information about the options (Memcache and connection options), please
checkout `Memcachex` docs.
In addition to `Memcachex` options, it supports:
* `:pool_size` - The number of connections to keep in the pool
(default: `System.schedulers_online()`).
## Example
We can define our cache to use Memcached adapter as follows:
defmodule MyApp.MemachedCache do
use Nebulex.Cache,
otp_app: :nebulex,
adapter: NebulexMemcachedAdapter
end
The configuration for the cache must be in your application environment,
usually defined in your `config/config.exs`:
config :my_app, MyApp.MemachedCache,
pools: [
primary: [
hostname: "127.0.0.1",
port: 11211
],
secondary: [
hostname: "127.0.0.1",
port: 11211,
pool_size: 2
]
]
For more information about the usage, check out `Nebulex.Cache` as well.
"""
# Inherit default transaction implementation
use Nebulex.Adapter.Transaction
# Provide Cache Implementation
@behaviour Nebulex.Adapter
alias Nebulex.Object
alias NebulexMemcachedAdapter.Client
@default_pool_size System.schedulers_online()
## Adapter
@impl true
defmacro __before_compile__(%{module: module}) do
otp_app = Module.get_attribute(module, :otp_app)
config = Module.get_attribute(module, :config)
pool_size =
config
|> Keyword.get(:pools)
|> pool_size(module, otp_app)
quote do
def __pool_size__, do: unquote(pool_size)
end
end
defp pool_size(nil, module, otp_app) do
raise ArgumentError,
"missing :pools configuration in " <> "config #{inspect(otp_app)}, #{inspect(module)}"
end
defp pool_size([], _module, _otp_app), do: 0
defp pool_size([{_, pool} | other_pools], module, otp_app) do
pool_size(pool) + pool_size(other_pools, module, otp_app)
end
defp pool_size(pool), do: Keyword.get(pool, :pool_size, @default_pool_size)
@impl true
def init(opts) do
cache = Keyword.fetch!(opts, :cache)
children =
opts
|> Keyword.fetch!(:pools)
|> children(cache)
{:ok, children}
end
defp children(pools, cache, offset \\ 0)
defp children([], _cache, _offset), do: []
defp children([{_, pool} | other_pools], cache, offset) do
pool_size = pool_size(pool)
next_offset = offset + pool_size
for index <- offset..(offset + pool_size - 1) do
pool
|> Keyword.delete(:pool_size)
|> child_spec(index, cache)
end ++ children(other_pools, cache, next_offset)
end
defp child_spec(opts, index, cache) do
Supervisor.child_spec(
{Memcache, [opts, [name: :"#{cache}_memcache_#{index}"]]},
id: {Memcache, index}
)
end
@impl true
def get(cache, key, opts) do
opts
|> Keyword.get(:return)
|> do_get(cache, key)
end
@impl true
def get_many(cache, keys, _opts) do
key_values =
Enum.map(keys, fn key ->
{key, get(cache, key, [])}
end)
key_values
|> Enum.reject(fn {_k, v} -> is_nil(v) end)
|> Map.new()
end
@impl true
def set(cache, %Object{key: key} = object, opts) do
action = Keyword.get(opts, :action, :set)
ttl = Keyword.get(opts, :ttl, 0)
do_set(action, cache, encode(key), encode(object), ttl)
end
@impl true
def set_many(cache, objects, opts) do
ttl = opts |> Keyword.get(:ttl, 0)
key_values =
objects
|> Enum.map(fn %Object{key: key} = object ->
{encode(key), encode(object)}
end)
case Client.multi_set(cache, key_values, ttl: ttl) do
{:ok, _} -> :ok
_ -> :error
end
end
@impl true
def take(cache, key, _opts) do
with {:ok, value, cas} <- Client.get(cache, encoded_key = encode(key), cas: true) do
_ = Client.delete_cas(cache, encoded_key, cas)
value
|> decode()
|> object(key, -1)
else
_ -> nil
end
end
defp do_set(:set, cache, key, value, ttl) do
case Client.set(cache, key, value, ttl: ttl) do
{:ok} -> true
_ -> false
end
end
defp do_set(:add, cache, key, value, ttl) do
case Client.add(cache, key, value, ttl: ttl) do
{:ok} -> true
_ -> false
end
end
defp do_set(:replace, cache, key, value, ttl) do
case Client.replace(cache, key, value, ttl: ttl) do
{:ok} -> true
_ -> false
end
end
@impl true
def expire(cache, key, :infinity) do
expire(cache, encode(key), nil)
end
def expire(cache, key, ttl) do
with {:ok, value, cas} <- Client.get(cache, encode(key), cas: true),
{:ok} <- set_cas(cache, key, decode(value), cas, ttl) do
Object.expire_at(ttl) || :infinity
else
_ -> nil
end
end
defp set_cas(cache, key, %Object{} = object, cas, ttl) do
value = object(object, key, ttl)
set_cas(cache, key, encode(value), cas, ttl)
end
defp set_cas(cache, key, value, cas, ttl) do
Client.set_cas(
cache,
encode(key),
value,
cas,
ttl: ttl || 0
)
end
@impl true
def update_counter(cache, key, incrby, _opts) when is_integer(incrby) do
case Client.incr(cache, encode(key), incrby) do
{:ok, value} -> value
_ -> nil
end
end
@impl true
def delete(cache, key, _opts) do
_ = Client.delete(cache, encode(key))
:ok
end
@impl true
def has_key?(cache, key) do
case get(cache, key, []) do
nil -> false
_ -> true
end
end
@impl true
def object_info(cache, key, :ttl) do
case Client.get(cache, encode(key)) do
{:ok, value} ->
%Object{expire_at: expire_at} =
value
|> decode()
|> object(key, -1)
Object.remaining_ttl(expire_at)
{:error, _} -> nil
end
end
def object_info(cache, key, :version) do
case get(cache, key, []) do
nil -> nil
object -> object.version
end
end
@impl true
def size(cache) do
Client.size(cache)
end
@impl true
def flush(cache) do
_ = Client.flush(cache)
:ok
end
defp do_get(:object, cache, key) do
case Client.get(cache, encode(key)) do
{:ok, value} ->
value
|> decode()
|> object(key, -1)
{:error, _} ->
nil
end
end
defp do_get(_, cache, key) do
case Client.get(cache, encode(key)) do
{:ok, value} ->
value
|> decode()
|> object(key, -1)
{:error, _} ->
nil
end
end
defp encode(data) do
to_string(data)
rescue
_e ->
:erlang.term_to_binary(data)
end
defp decode(nil), do: nil
defp decode(data) do
if String.printable?(data) do
data
else
:erlang.binary_to_term(data)
end
end
defp object(nil, _key, _ttl), do: nil
defp object(%Object{} = object, _key, -1), do: object
defp object(%Object{} = object, _key, ttl) do
%{object | expire_at: Object.expire_at(ttl)}
end
defp object(value, key, -1) do
%Object{key: key, value: value}
end
end
|
lib/nebulex_memcached_adapter.ex
| 0.912216 | 0.539165 |
nebulex_memcached_adapter.ex
|
starcoder
|
defmodule Content.Audio.FollowingTrain do
@moduledoc """
The following train to [destination] arrives in [n] minutes.
"""
@enforce_keys [:destination, :route_id, :verb, :minutes]
defstruct @enforce_keys
@type verb :: :arrives | :departs
@type t :: %__MODULE__{
destination: PaEss.destination(),
route_id: String.t(),
verb: verb(),
minutes: integer()
}
require Logger
@spec from_predictions_message({
Signs.Utilities.SourceConfig.source(),
Content.Message.Predictions.t()
}) :: Content.Audio.FollowingTrain.t() | nil
def from_predictions_message({
%{
terminal?: terminal
},
%Content.Message.Predictions{minutes: n, destination: destination, route_id: route_id}
})
when is_integer(n) do
%__MODULE__{
destination: destination,
route_id: route_id,
minutes: n,
verb: arrives_or_departs(terminal)
}
end
def from_predictions_message({_src, _msg}) do
nil
end
@spec arrives_or_departs(boolean) :: :arrives | :departs
defp arrives_or_departs(true), do: :departs
defp arrives_or_departs(false), do: :arrives
defimpl Content.Audio do
alias PaEss.Utilities
@the_following "667"
@train_to "507"
@in_ "504"
@minutes "505"
@minute "532"
def to_params(audio) do
case Utilities.destination_var(audio.destination) do
{:ok, dest_var} ->
green_line_branch =
Content.Utilities.route_and_destination_branch_letter(
audio.route_id,
audio.destination
)
cond do
!is_nil(green_line_branch) ->
green_line_with_branch_params(audio, green_line_branch, dest_var)
audio.minutes == 1 ->
{:canned, {"159", [dest_var, verb_var(audio)], :audio}}
true ->
{:canned, {"160", [dest_var, verb_var(audio), minutes_var(audio)], :audio}}
end
{:error, :unknown} ->
case Utilities.ad_hoc_trip_description(audio.destination, audio.route_id) do
{:ok, trip_description} ->
min_or_mins = if audio.minutes == 1, do: "minute", else: "minutes"
text =
"The following #{trip_description} #{audio.verb} in #{audio.minutes} #{
min_or_mins
}"
{:ad_hoc, {text, :audio}}
{:error, :unknown} ->
Logger.error(
"FollowingTrain.to_params unknown destination: #{inspect(audio.destination)}"
)
nil
end
end
end
@spec green_line_with_branch_params(
Content.Audio.FollowingTrain.t(),
Content.Utilities.green_line_branch(),
String.t()
) :: Content.Audio.canned_message()
defp green_line_with_branch_params(audio, green_line_branch, destination_var) do
vars = [
@the_following,
PaEss.Utilities.green_line_branch_var(green_line_branch),
@train_to,
destination_var,
verb_var(audio),
@in_,
minutes_var(audio),
minute_or_minutes(audio)
]
Utilities.take_message(vars, :audio)
end
defp verb_var(%{verb: :arrives}), do: "503"
defp verb_var(%{verb: :departs}), do: "502"
defp minutes_var(%{minutes: minutes}) do
Utilities.countdown_minutes_var(minutes)
end
@spec minute_or_minutes(Content.Audio.FollowingTrain.t()) :: String.t()
defp minute_or_minutes(%Content.Audio.FollowingTrain{minutes: 1}), do: @minute
defp minute_or_minutes(%Content.Audio.FollowingTrain{}), do: @minutes
end
end
|
lib/content/audio/following_train.ex
| 0.76921 | 0.4474 |
following_train.ex
|
starcoder
|
defmodule ExTorch.Native.Tensor.Creation do
@moduledoc false
use ExTorch.Native.BindingDeclaration
@doc_section :creation
defbindings(:tensor_creation) do
@doc """
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the tuple argument `size`.
## Arguments
- `size`: a tuple/list of integers defining the shape of the output tensor.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.empty({2, 3})
#Tensor<-6.2093e+29 4.5611e-41 0.0000e+00
0.0000e+00 1.1673e-42 0.0000e+00
[ CPUFloatType{2,3} ]>
iex> ExTorch.empty({2, 3}, dtype: :int64, device: :cpu)
#Tensor< 1.4023e+14 0.0000e+00 0.0000e+00
1.0000e+00 7.0000e+00 1.4023e+14
[ CPULongType{2,3} ]>
"""
@spec empty(
tuple() | [integer()],
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
empty(
size,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc """
Returns a tensor filled with the scalar value `0`, with the shape defined
by the variable argument `size`.
## Arguments
- `size`: a tuple/list of integers defining the shape of the output tensor.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.zeros({2, 3})
#Tensor< 0 0 0
0 0 0
[ CPUFloatType{2,3} ]>
iex> ExTorch.zeros({2, 3}, dtype: :uint8, device: :cpu)
#Tensor< 0 0 0
0 0 0
[ CPUByteType{2,3} ]>
iex> ExTorch.zeros({5})
#Tensor< 0
0
0
0
0
[ CPUFloatType{5} ]>
"""
@spec zeros(
tuple() | [integer()],
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
zeros(
size,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc """
Returns a tensor filled with the scalar value `1`, with the shape defined
by the variable argument `size`.
## Arguments
- `size`: a tuple/list of integers defining the shape of the output tensor.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.ones({2, 3})
#Tensor< 1 1 1
1 1 1
[ CPUFloatType{2,3} ]>
iex> ExTorch.ones({2, 3}, dtype: :uint8, device: :cpu)
#Tensor< 1 1 1
1 1 1
[ CPUByteType{2,3} ]>
iex> ExTorch.ones({5})
#Tensor< 1
1
1
1
1
[ CPUFloatType{5} ]>
"""
@spec ones(
tuple() | [integer()],
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
ones(
size,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc """
Returns a tensor filled with random numbers from a uniform distribution
on the interval $[0, 1)$
The shape of the tensor is defined by the variable argument `size`.
## Arguments
- `size`: a tuple/list of integers defining the shape of the output tensor.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.rand({3, 3, 3})
#Tensor<
(1,.,.) =
0.5997 0.3569 0.7639
0.1939 0.0923 0.0942
0.3355 0.3534 0.6490
(2,.,.) =
0.7250 0.5877 0.9215
0.1583 0.7270 0.3289
0.7083 0.1259 0.0050
(3,.,.) =
0.1731 0.9534 0.6758
0.8523 0.0659 0.3623
0.0747 0.6079 0.7227
[ CPUFloatType{3,3,3} ]
>
iex> ExTorch.rand({2, 3}, dtype: :float64)
#Tensor<
0.6012 0.6164 0.2413
0.9720 0.7804 0.4863
[ CPUDoubleType{2,3} ]
>
"""
@spec rand(
tuple() | [integer()],
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
rand(
size,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc ~S"""
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
$$\text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)$$
The shape of the tensor is defined by the variable argument :attr:`size`.
## Arguments
- `size`: a tuple/list of integers defining the shape of the output tensor.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.randn({3, 3, 5})
#Tensor<
(1,.,.) =
0.0784 -0.3355 -0.0159 -0.0606 -1.2691
-0.6146 0.2346 0.8563 0.8795 0.0645
-1.9992 0.6692 0.2269 1.9263 0.1033
(2,.,.) =
0.2647 0.7078 0.0270 -1.1330 -0.4143
1.2061 -1.1191 0.7465 0.2140 0.7406
0.3587 -0.6102 0.3359 -0.4517 -0.5276
(3,.,.) =
1.7122 0.3814 -0.6218 0.8047 -0.6067
0.1693 0.4957 -0.6139 0.7341 1.4272
0.1630 -0.1142 0.8823 0.8026 1.3355
[ CPUFloatType{3,3,5} ]
iex> ExTorch.randn({3, 3, 5}, device: :cpu)
#Tensor<
(1,.,.) =
-0.8990 -0.3449 -1.2916 -0.0318 0.7116
0.9068 -0.3159 -0.6416 -1.8414 -0.1421
-0.9251 -0.8209 0.0830 -2.5484 0.3731
(2,.,.) =
0.5975 0.0690 -0.2972 -0.0328 -0.2672
1.3053 0.7803 -0.1992 -2.1078 -0.7520
1.3048 0.6391 0.1137 2.0412 0.2380
(3,.,.) =
-1.1820 -1.9329 -0.3965 -0.0618 -1.1190
0.7926 -1.8551 1.1356 -0.7451 -0.6003
1.0266 0.5791 0.2724 0.6952 -3.1296
[ CPUFloatType{3,3,5} ]
>
"""
@spec randn(
tuple() | [integer()],
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
randn(
size,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc """
Returns a tensor filled with random integers generated uniformly
between `low` (inclusive) and `high` (exclusive).
The shape of the tensor is defined by the variable argument `size`.
## Arguments
- `low`: Lowest integer to be drawn from the distribution. Default: `0`.
- `high`: One above the highest integer to be drawn from the distribution.
- `size`: a tuple/list of integers defining the shape of the output tensor.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
# Sample numbers between 0 and 3
iex> ExTorch.randint(3, {3, 3, 4})
#Tensor<
(1,.,.) =
2 2 0 2
0 0 1 0
1 2 1 0
(2,.,.) =
1 1 2 0
0 2 1 2
2 0 0 1
(3,.,.) =
0 2 0 2
0 1 1 1
2 1 1 1
[ CPUFloatType{3,3,4} ]
>
# Sample numbers between 0 and 3 of type int64
iex> ExTorch.randint(3, {3, 3, 4}, dtype: :int64)
#Tensor<
(1,.,.) =
2 2 1 0
0 1 0 1
2 2 2 2
(2,.,.) =
1 1 1 1
1 1 0 1
2 1 0 2
(3,.,.) =
1 2 1 0
1 1 2 1
1 1 0 1
[ CPULongType{3,3,4} ]
>
# Sample numbers between -2 and 4
iex> ExTorch.randint(-2, 3, {2, 2, 4})
#Tensor<
(1,.,.) =
-2 2 0 -2
0 2 1 2
(2,.,.) =
-2 -1 -1 1
0 -1 0 0
[ CPUFloatType{2,2,4} ]
>
# Sample numbers between -2 and 4 on cpu
iex> ExTorch.randint(-2, 3, {2, 2, 4}, device: :cpu)
#Tensor<
(1,.,.) =
-2 0 0 -2
2 1 2 -2
(2,.,.) =
2 -1 -1 1
1 2 1 -2
[ CPUFloatType{2,2,4} ]
>
"""
@spec randint(
integer(),
integer(),
tuple() | [integer()],
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
randint(
low \\ 0,
high,
size,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc ~S"""
Returns a 1-D tensor of size $\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil$
with values from the interval ``[start, end)`` taken with common difference
`step` beginning from `start`.
Note that non-integer `step` is subject to floating point rounding errors when
comparing against `end`; to avoid inconsistency, we advise adding a small epsilon
to `end` in such cases.
$$out_{i + 1} = out_i + step$$
## Arguments
- `start`: the starting value for the set of points. Default: ``0``.
- `end`: the ending value for the set of points.
- `step`: the gap between each pair of adjacent points. Default: ``1``.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
# Single argument, end only
iex> ExTorch.arange(5)
#Tensor<
0
1
2
3
4
[ CPUFloatType{5} ]
>
# End only with options
iex> ExTorch.arange(5, dtype: :uint8)
#Tensor<
0
1
2
3
4
[ CPUByteType{5} ]
# Start to end
iex> ExTorch.arange(1, 7)
#Tensor<
1
2
3
4
5
6
[ CPUFloatType{6} ]
>
# Start to end with options
iex> ExTorch.arange(1, 7, device: :cpu, dtype: :float64)
#Tensor<
1
2
3
4
5
6
[ CPUDoubleType{6} ]
>
# Start to end with step
iex> ExTorch.arange(-1.3, 2.4, 0.5)
#Tensor<
-1.3000
-0.8000
-0.3000
0.2000
0.7000
1.2000
1.7000
2.2000
[ CPUFloatType{8} ]
>
# Start to end with step and options
iex> ExTorch.arange(-1.3, 2.4, 0.5, dtype: :float64)
#Tensor<
-1.3000
-0.8000
-0.3000
0.2000
0.7000
1.2000
1.7000
2.2000
[ CPUDoubleType{8} ]
>
"""
@spec arange(
number(),
number(),
number(),
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
arange(
start \\ 0,
end_bound,
step \\ 1,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc """
Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
## Arguments
- `n`: the number of rows
- `m`: the number of columns
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.eye(3, 3)
#Tensor<
1 0 0
0 1 0
0 0 1
[ CPUFloatType{3,3} ]
>
iex> ExTorch.eye(4, 6, dtype: :uint8, device: :cpu)
#Tensor<
1 0 0 0 0 0
0 1 0 0 0 0
0 0 1 0 0 0
0 0 0 1 0 0
[ CPUByteType{4,6} ]
>
"""
@spec eye(
integer(),
integer(),
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
eye(
n,
m \\ n,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc """
Returns a tensor filled with the scalar value `scalar`, with the shape defined
by the variable argument `size`.
## Arguments
- `size`: a tuple/list of integers defining the shape of the output tensor.
- `scalar`: the value to fill the output tensor with.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.full({2, 3}, 2)
#Tensor< 2 2 2
2 2 2
[ CPUFloatType{2,3} ]>
iex> ExTorch.full({2, 3}, 23, dtype: :uint8, device: :cpu)
#Tensor< 23 23 23
23 23 23
[ CPUByteType{2,3} ]>
iex> ExTorch.full({2, 3}, 3.1416)
#Tensor< 3.1416 3.1416 3.1416
3.1416 3.1416 3.1416
[ CPUFloatType{5} ]>
"""
@spec full(
tuple() | [integer()],
number(),
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
full(
size,
scalar,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc ~S"""
Creates a one-dimensional tensor of size `steps` whose values are evenly
spaced from `start` to `end`, inclusive. That is, the value are:
$$(\text{start},
\text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
\ldots,
\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
\text{end})$$
## Arguments
- `start`: the starting value for the set of points.
- `end`: the ending value for the set of points.
- `steps`: size of the constructed tensor.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
# Returns a tensor with 10 evenly-spaced values between -2 and 10
iex> ExTorch.linspace(-2, 10, 10)
#Tensor<
-2.0000
-0.6667
0.6667
2.0000
3.3333
4.6667
6.0000
7.3333
8.6667
10.0000
[ CPUFloatType{10} ]
>
# Returns a tensor with 10 evenly-spaced int32 values between -2 and 10
iex> ExTorch.linspace(-2, 10, 10, dtype: :int32)
#Tensor<
-2
0
0
1
3
4
6
7
8
10
[ CPUIntType{10} ]
>
"""
@spec linspace(
number(),
number(),
integer(),
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
linspace(
start,
end_bound,
steps,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc ~S"""
Creates a one-dimensional tensor of size `steps` whose values are evenly
spaced from ${{\text{{base}}}}^{{\text{{start}}}}$ to
${{\text{{base}}}}^{{\text{{end}}}}$, inclusive, on a logarithmic scale
with base `base`. That is, the values are:
$$(\text{base}^{\text{start}},
\text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\ldots,
\text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\text{base}^{\text{end}})$$
## Arguments
- `start`: the starting value for the set of points.
- `end`: the ending value for the set of points.
- `steps`: size of the constructed tensor.
- `base`: base of the logarithm function. Default: ``10.0``.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.logspace(-10, 10, 5)
#Tensor<
1.0000e-10
1.0000e-05
1.0000e+00
1.0000e+05
1.0000e+10
[ CPUFloatType{5} ]
>
iex> ExTorch.logspace(0.1, 1.0, 5)
#Tensor<
1.2589
2.1135
3.5481
5.9566
10.0000
[ CPUFloatType{5} ]
>
iex> ExTorch.logspace(0.1, 1.0, 3, base: 2)
#Tensor<
1.0718
1.4641
2.0000
[ CPUFloatType{3} ]
>
iex> ExTorch.logspace(0.1, 1.0, 3, base: 2, dtype: :float64)
#Tensor<
1.0718
1.4641
2.0000
[ CPUDoubleType{3} ]
>
"""
@spec logspace(
number(),
number(),
integer(),
number(),
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
logspace(
start,
end_bound,
steps,
base \\ 10,
dtype \\ :float,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
)
)
@doc """
Constructs a tensor with data.
## Arguments
- `list`: Initial data for the tensor. Can be a list, tuple or number.
## Keyword args
- dtype (`ExTorch.DType`, optional): the desired data type of returned tensor.
**Default**: if `nil`, uses a global default (see `ExTorch.set_default_tensor_type`).
- layout (`ExTorch.Layout`, optional): the desired layout of returned Tensor.
**Default**: `:strided`.
- device (`ExTorch.Device`, optional): the desired device of returned tensor.
Default: if `nil`, uses the current device for the default tensor type
(see `ExTorch.set_default_tensor_type`). `device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (`boolean()`, optional): If autograd should record operations on the
returned tensor. **Default**: `false`.
- pin_memory (`bool`, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: `false`.
- memory_format (`ExTorch.MemoryFormat`, optional): the desired memory format of
returned Tensor. **Default**: `:contiguous`
## Examples
iex> ExTorch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
#Tensor<
0.1000 1.2000
2.2000 3.1000
4.9000 5.2000
[ CPUFloatType{3,2} ]
>
# Type inference
iex> ExTorch.tensor([0, 1])
#Tensor<
0
1
[ CPUByteType{2} ]
>
iex> ExTorch.tensor([[0.11111, 0.222222, 0.3333333]], dtype: :float64)
#Tensor<
0.1111 0.2222 0.3333
[ CPUDoubleType{1,3} ]
>
"""
@spec tensor(
list() | tuple() | number(),
ExTorch.DType.dtype(),
ExTorch.Layout.layout(),
ExTorch.Device.device(),
boolean(),
boolean(),
ExTorch.MemoryFormat.memory_format()
) :: ExTorch.Tensor.t()
defbinding(
tensor(
list,
dtype \\ nil,
layout \\ :strided,
device \\ :cpu,
requires_grad \\ false,
pin_memory \\ false,
memory_format \\ :contiguous
),
list: ExTorch.Utils.to_list_wrapper(list),
dtype:
case dtype do
nil -> list.dtype
_ -> dtype
end
)
end
end
|
lib/extorch/native/tensor/creation.ex
| 0.958372 | 0.825976 |
creation.ex
|
starcoder
|
defmodule LogicalFile do
alias LogicalFile.{Macro, Section}
@moduledoc """
## LogicalFile
### One file from many
LogicalFile is a way of creating a logical representation of a unit of lines
of text (e.g. a source code file) supplied by one or more backing files,
presumably separate files on disk. It also provides for a system of macros
that can transform the logical text.
A typical use case for LogicalFile would be to implement a compiler that has
`#include` style functionality. The compiler works on the whole text but
can translate logical line numbers back to specific files and local line
numbers (for example when an error occurs it can pinpoint the specific file
and line the error arose in).
"""
defstruct base_path: nil,
sections: %{}
@type t :: %__MODULE__{
base_path: nil | binary,
sections: map
}
# --- Interface ---
@doc """
`read/3` returns a new `LogicalFile` containing `Section`s that
represent the contents of the file specified by `source_path` relative to
`base_path` and as modified by the macros it is initialised with.
Macros should implement the `LogicalFile.Macro` behaviours and should be
specified as a list of tuples of the form `{module, [options keyword list]}`.
See `LogicalFile.Macro` for further details.
## Examples
iex> file = LogicalFile.read("test/support", "main.source")
iex> assert 11 = LogicalFile.size(file)
"""
def read(base_path, source_path, macros \\ [])
when is_binary(source_path) and is_list(macros) do
base_path = Path.expand(base_path)
file_path = Path.join(base_path, source_path)
section = Section.new(file_path)
%LogicalFile{base_path: base_path, sections: %{section.range => section}}
|> Macro.apply_macros(macros)
end
@doc """
`assemble/2` returns a `LogicalFile` composed of the `Section`s specified in
the second argument. This is mainly intended for internal use when modifying
a `LogicalFile` during macro processing.
"""
def assemble(base_path, sections) when is_list(sections) do
sections
|> Enum.map(fn %Section{range: range} -> range end)
|> check_contiguous()
%LogicalFile{
base_path: base_path,
sections: sections |> Enum.map(fn section -> {section.range, section} end) |> Enum.into(%{})
}
end
defp check_contiguous(range_list) do
pairs = Enum.zip([nil | range_list], range_list ++ [nil])
Enum.each(pairs, fn {r1, r2} ->
case {r1, r2} do
{nil, _} -> nil
{_, nil} -> nil
{_..hi, lo.._} when lo == hi + 1 -> nil
{r1, r2} -> raise "Non-contiguous ranges #{inspect(r1)} & #{inspect(r2)}!"
end
end)
end
@doc """
`line/2` returns the specified logical line number from the `LogicalFile`
at `lno`.
## Example
iex> file = LogicalFile.read("test/support", "main.source")
iex> assert "%(include.source)" = LogicalFile.line(file, 6)
"""
def line(%LogicalFile{} = file, lno) do
with section when not is_nil(section) <- section_including_line(file, lno) do
Section.line(section, lno)
end
end
@doc """
`lines/2` takes a `LogicalFile` and a range of logical line numbers and
returns a list of tuples in the form `{file, line}` for the corresponding
lines.
"""
def lines(%LogicalFile{} = file, logical_line_range)
when is_struct(logical_line_range, Range) do
logical_line_range
|> Enum.reduce([], fn logical_lno, lines ->
[line(file, logical_lno) | lines]
end)
|> Enum.reverse()
end
@doc """
`insert/3` inserts a new `Section` into the `LogicalFile` at the specified
logical line number `at_line` and containing the contents of the `source_path`.
It guarantees that all sections and the logical file remains contiguous.
## Examples
"""
def insert(%LogicalFile{base_path: base_path} = file, source_path, at_line)
when is_binary(source_path) do
insert(file, Section.new(Path.join(base_path, source_path)), at_line)
end
def insert(
%LogicalFile{base_path: base_path, sections: sections},
%Section{} = insert_section,
at_line
) do
sections = Map.values(sections)
{before, target, rest} = partition_sections(sections, at_line)
if is_nil(target) do
raise("Unable to partition: line:#{at_line} is not in any source section.")
else
sections =
case split_strategy(target, at_line) do
:prepend ->
insert_section = Section.shift(insert_section, Section.total_size(before))
rest = [target | rest]
rest =
Enum.map(rest, fn section ->
Section.shift(section, Section.size(insert_section))
end)
before ++ [insert_section] ++ rest
:append ->
before = before ++ [target]
insert_section = Section.shift(insert_section, Section.total_size(before))
rest =
Enum.map(rest, fn section ->
Section.shift(section, Section.size(insert_section))
end)
before ++ [insert_section] ++ rest
:insert ->
{pre, post} = Section.split(target, at_line)
before = before ++ [pre]
insert_section = Section.shift(insert_section, Section.total_size(before))
rest = [post | rest]
rest =
Enum.map(rest, fn section ->
Section.shift(section, Section.size(insert_section))
end)
before ++ [insert_section] ++ rest
end
LogicalFile.assemble(base_path, sections)
end
end
defp split_strategy(%Section{range: lo..lo}, _), do: :append
defp split_strategy(%Section{range: lo.._}, lo), do: :prepend
defp split_strategy(%Section{range: _..hi}, hi), do: :append
defp split_strategy(%Section{}, _), do: :insert
@doc """
`contains_source?/2` returns true if at least one section from the given
`LogicalFile` originates from the specified `source_path`.
## Examples
iex> file = LogicalFile.read("test/support", "main.source")
iex> invalid_path = Path.expand("test/support/player.source")
iex> assert not LogicalFile.contains_source?(file, invalid_path)
iex> valid_path = Path.expand("test/support/main.source")
iex> assert LogicalFile.contains_source?(file, valid_path)
"""
def contains_source?(%LogicalFile{sections: sections}, source_path) do
Enum.any?(sections, fn {_range, section} -> section.source_path == source_path end)
end
@doc """
`lines/1` returns a list of all lines in the `LogicalFile` in line number
order.
"""
def lines(%LogicalFile{} = file) do
file
|> sections_in_order()
|> Enum.reduce([], fn section, lines -> lines ++ section.lines end)
end
@doc """
`size/1` returns the number of lines in the `LogicalFile`.
## Examples
iex> file = LogicalFile.read("test/support", "main.source")
iex> 11 = LogicalFile.size(file)
"""
def size(%LogicalFile{sections: sections}) do
Enum.reduce(sections, 0, fn {_range, section}, size ->
size + Section.size(section)
end)
end
@doc """
`last_line_number/1` returns the line number of the last line in the
specified `LogicalFile`.
## Examples
iex> alias LogicalFile.Section
iex> file = LogicalFile.read("test/support", "main.source")
iex> assert 11 = LogicalFile.last_line_number(file)
"""
def last_line_number(%LogicalFile{} = file) do
file
|> sections_in_order()
|> List.last()
|> then(fn %Section{range: _lo..hi} -> hi end)
end
@doc """
`update_line/3` replaces the content of line `lno` in the specified
`LogicalFile` by passing the current contents of the line to the specified
transformation function. This function is expected to return the new
contents of the line.
## Examples
iex> assert " " =
...> LogicalFile.read("test/support", "main.source")
...> |> LogicalFile.update_line(6, fn line -> String.duplicate(" ", String.length(line)) end)
...> |> LogicalFile.line(6)
"""
def update_line(%LogicalFile{sections: sections} = file, lno, fun) do
updated_section =
file
|> section_including_line(lno)
|> Section.update_line(lno, fun)
%{file | sections: Map.put(sections, updated_section.range, updated_section)}
end
@doc """
`section_including_line/2` returns the `Section` that contains the logical
line `lno`.
## Examples
iex> alias LogicalFile.Section
iex> section1 = Section.new("test/support/main.source")
iex> section2 = Section.new("test/support/include.source") |> Section.shift(Section.size(section1))
iex> map = LogicalFile.assemble("test/support", [section1, section2])
iex> assert ^section1 = LogicalFile.section_including_line(map, section1.range.first)
iex> assert ^section2 = LogicalFile.section_including_line(map, section2.range.first)
"""
def section_including_line(%LogicalFile{} = file, lno) do
file
|> sections_in_order()
|> Enum.find(fn %{range: range} -> lno in range end)
end
@doc """
`resolve_line/2` takes a logical line number `logical_lno` and returns a
tuple `{file, local_line_no}` representing the file and file line number
that logical line represents.
## Examples
iex> alias LogicalFile.Macros.Include
iex> file = LogicalFile.read("test/support", "main.source")
iex> path = Path.expand("test/support/main.source")
iex> assert {^path, 1} = LogicalFile.resolve_line(file, 1)
"""
def resolve_line(%LogicalFile{} = file, logical_lno) do
file
|> section_including_line(logical_lno)
|> Section.resolve_line(logical_lno)
end
# --- Utility functions ---
@doc """
`sections_to_map/1` takes a list of `Section`s and returns a `Map` whose
keys are the logical line number ranges of the sections, mapped to the
corresponding sections.
"""
def sections_to_map(sections) do
Enum.reduce(sections, %{}, fn section, map ->
Map.put(map, section.range, section)
end)
end
@doc """
`sections_in_order/1` takes the `Section`s backing a `LogicalFile` and
returns them as a list, ordered by the range of logical line numbers they
represent.
"""
def sections_in_order(%LogicalFile{sections: sections}) do
sections
|> Map.values()
|> Enum.sort_by(fn %Section{range: range} -> range end)
end
@doc """
`partition_sections/2` accepts a list of `Section`s and a logical
line number `at_line` representing an insertion point. It returns a tuple
`{sections_before, insert_section, sections_after}` by finding the `Section`
containing `at_line` and partitioning the remaining `Section`s around it.
"""
def partition_sections(sections, at_line) when is_list(sections) do
sections
|> Enum.sort_by(fn section -> section.range end)
|> Enum.split_while(fn section -> at_line not in section.range end)
|> then(fn split ->
case split do
{_, []} -> {sections, nil, []}
{before, [target | rest]} -> {before, target, rest}
end
end)
end
end
defimpl String.Chars, for: LogicalFile do
def to_string(%LogicalFile{} = file) do
file
|> LogicalFile.lines()
|> Enum.join("\n")
end
end
|
lib/logical_file.ex
| 0.797636 | 0.516961 |
logical_file.ex
|
starcoder
|
defmodule Day11 do
@moduledoc """
Documentation for `Day11`.
"""
def run() do
process("""
7612648217
7617237672
2853871836
7214367135
1533365614
6258172862
5377675583
5613268278
8381134465
3445428733
""", 100)
end
def example(), do: (example1(); example2();)
def example2() do
process("""
5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526
""", 100)
end
def example1() do
process("""
11111
19991
19191
19991
11111
""", 1)
end
def process(text, n) do
puzzle = make_puzzle(text)
{_puzzle, count} = process_further(puzzle, 0, n, 1)
IO.puts("Total number of flashes after 100 steps = #{count}")
_count = process_part2(make_puzzle(text), 1)
end
def process_further(puzzle, sum_of_flashes, 0, _step), do: {puzzle, sum_of_flashes}
def process_further(puzzle, sum_of_flashes, n, step) do
next_puzzle = next_generation(puzzle)
flash_count = count_zeroes(next_puzzle)
IO.puts("#{flash_count} simultaneous flashes at step #{step}-----------------------------------")
process_further(next_puzzle, flash_count + sum_of_flashes, n - 1, step + 1)
end
def process_part2(puzzle, step) do
next_puzzle = next_generation(puzzle)
flash_count = count_zeroes(next_puzzle)
IO.puts("#{flash_count} simultaneous flashes at step #{step}-----------------------------------")
if flash_count < 100 do
process_part2(next_puzzle, step + 1)
else
step
end
end
def count_zeroes(%{core: core} =pz) do
coords = get_coords(pz)
length(Enum.filter(coords, fn coord -> 0 == get_core(core, coord) end))
end
def anneal(%{core: core} = pz) do
# IO.inspect(core, label: "anneal")
# find what octopi are to be lighted up
enlightened = Enum.filter(get_coords(pz), fn coord -> get_core(core, coord) >= 10 end)
maybe_anneal_again(pz, enlightened)
end
def maybe_anneal_again(%{core: _core} = pz, [] = _light_list), do: pz
def maybe_anneal_again(%{core: core} = pz, coords) do
# IO.inspect(core, label: "anneal again")
# make them so they only get lighted up once this round
new_core = Enum.reduce(coords, core, fn coord, kore -> set_core(kore, coord, -9999) end)
# boost neighbors' strength: first find them all (duplicates expected)
# then energize them. (Energize means add energy)
all_neighbors = Enum.map(coords, fn coord -> get_neighbor_coords(pz, coord) end)
|> List.flatten()
energize(%{pz| core: new_core}, all_neighbors)
|> anneal()
end
def raise_floor(%{core: core} = pz) do
new_core = Enum.filter(get_coords(pz), fn coord -> get_core(core, coord) < 0 end)
|> Enum.reduce(core, fn coord, kore -> set_core(kore, coord, 0) end)
%{pz | core: new_core}
end
def energize(%{core: _core} = puzzle), do: energize(puzzle, get_coords(puzzle))
def energize(%{core: core} = puzzle, coords) do
new_core = Enum.reduce(coords, core, fn coord, kore -> set_core(kore, coord, 1 + get_core(kore, coord)) end)
%{puzzle| core: new_core}
end
def next_generation(puzzle) do
puzzle
|> energize() # add one to each octopus's energy
|> anneal() # flash until flashes die out
|> raise_floor() # correct any octopus with negative energy
# negative energy is a flag so no octopus flashes
# twice in the same step
end
def get_core(core, {r, c}) do
elem(core, r)
|> elem(c)
end
def set_core(core, {r, c}, v) do
row = elem(core, r)
new_row = put_elem(row, c, v)
put_elem(core, r, new_row)
end
def get_coords(%{core: _core, row_size: r, col_size: c}), do: for r <- 0..r-1, c <- 0..c-1, do: {r,c}
def get_neighbor_coords(%{core: _core, row_size: r, col_size: c}, {r0, c0}) do
(for r <- max(0,r0-1)..min(r0+1, r-1), c <- max(0,c0-1)..min(c0+1, c-1), do: {r,c}) -- [{r0, c0}]
end
def make_puzzle(text) do
make_row = fn l -> String.split(l, "", trim: true) |> Enum.map(fn e -> String.to_integer(e) end) |> List.to_tuple() end
core_puzzle = String.split(text, "\n", trim: true)
|> Enum.map(fn l -> make_row.(l) end)
|> List.to_tuple()
row_size = tuple_size(core_puzzle)
col_size = tuple_size(elem(core_puzzle, 0))
%{core: core_puzzle, row_size: row_size, col_size: col_size}
end
def p2s(%{core: core} = _pz) do
Enum.map(Tuple.to_list(core), fn r -> Tuple.to_list(r) end )
|> Enum.map(fn row -> Enum.reduce(row, "", fn elt, acc -> acc <> "#{elt}" end) end)
|> Enum.reduce("", fn sr, acc -> acc <> sr <> "\n\r" end)
|> String.replace("0", " ", all: true)
end
end
|
apps/day11/lib/day11.ex
| 0.683631 | 0.412146 |
day11.ex
|
starcoder
|
defmodule FilePreviews do
@type status :: :ok | :error
@type response :: {status, map}
defmodule Config do
@type t :: %Config{api_key: binary, api_secret: binary}
defstruct [:api_key, :api_secret]
end
@moduledoc """
Provides an interface to the [FilePreviews][filepreviews] API.
For the API's documentation please check [http://filepreviews.io/docs/][docs].
## Usage
```
{:ok, filepreviews} = FilePreviews.new("API_KEY", "API_SECRET")
```
### Generate
```
{status, response} = FilePreviews.generate("http://example.com/file.pdf")
```
Note:
- `status` is either :ok or :error.
- `response` is a Map converted from the JSON response from FilePreviews.
#### Options
```
params = %{metadata: ["exif"], pages: "1"}
{status, response} = FilePreviews.generate("http://example.com/file.pdf", params)
```
### Retrieve
```
{status, response} = FilePreviews.generate("42764e04-9094-467c-96b3-49d31ff4423d")
```
[filepreviews]: http://filepreviews.io
[docs]: http://filepreviews.io/docs/
"""
@doc """
Starts FilePreviews process with the given api_key and api_secret.
"""
@spec new(binary, binary) :: {FilePreviews.status(), pid}
def new(api_key, api_secret) do
%Config{api_key: api_key, api_secret: api_secret}
|> new
end
@doc """
Starts FilePreviews process with the config.
"""
@spec new(FilePreviews.Config.t()) :: {FilePreviews.status(), pid}
def new(config) do
Agent.start_link(fn -> config end, name: __MODULE__)
end
@doc """
Returns the API Key.
"""
@spec api_key() :: binary
def api_key() do
config().api_key
end
@doc """
Returns the API Secret.
"""
@spec api_secret() :: binary
def api_secret() do
config().api_secret
end
@doc """
Generates a preview for a given URL and params.
"""
@spec generate(binary, map) :: FilePreviews.response()
def generate(url, params \\ %{}) do
params = Dict.merge(%{url: url}, params)
size = Dict.get(params, "size")
if is_map(size) do
width = Dict.get(size, "width")
height = Dict.get(size, "height")
geometry = ""
if width do
geometry = width
end
if height do
geometry = "#{geometry}x#{height}"
end
params = Dict.put(params, "sizes", [geometry])
end
FilePreviews.Client.post("/previews/", params)
end
@doc """
Retrieves a preview with a given ID.
"""
@spec retrieve(binary) :: FilePreviews.response()
def retrieve(id) do
FilePreviews.Client.get("/previews/#{id}/")
end
def version() do
Mix.Project.config()[:version]
end
defp config() do
Agent.get(__MODULE__, fn state -> state end)
end
end
|
lib/filepreviews.ex
| 0.841972 | 0.686206 |
filepreviews.ex
|
starcoder
|
defmodule Ockam.Channel do
@moduledoc """
An implementation of secure channels via the Noise protocol
See an overview of the Noise handshake [here](https://noiseprotocol.org/noise.html#overview-of-handshake-state-machine)
"""
require Logger
alias Ockam.Transport
alias Ockam.Channel.Handshake
alias Ockam.Channel.Protocol
alias Ockam.Channel.CipherState
alias Ockam.Router.Protocol.Encoding
alias Ockam.Router.Protocol.Message.Envelope
alias Ockam.Router.Protocol.Message.Payload
alias Ockam.Vault
defstruct [:vault, :rx, :tx, :hash, :state]
@type t :: %__MODULE__{
vault: Vault.t(),
rx: CipherState.t(),
tx: CipherState.t(),
hash: binary(),
state: Ockam.Noise.Handshake.t()
}
@type role :: :initiator | :responder
@type reason :: term()
@type step_data :: {:send, payload :: binary()} | {:received, encrypted :: binary()}
@roles [:initiator, :responder]
@doc """
Encrypt a message to be sent over the given channel
"""
def encrypt(%__MODULE__{vault: vault, tx: tx} = chan, payload) do
{:ok, new_tx, ciphertext} = CipherState.encrypt(tx, vault, "", payload)
{:ok, %__MODULE__{chan | tx: new_tx}, ciphertext}
end
@doc """
Decrypt a message received over the given channel
"""
def decrypt(%__MODULE__{vault: vault, rx: rx} = chan, payload) do
with {:ok, new_rx, plaintext} <- CipherState.decrypt(rx, vault, "", payload) do
{:ok, %__MODULE__{chan | rx: new_rx}, plaintext}
end
end
@doc """
Start a handshake
"""
@spec handshake(Vault.t(), role(), map()) ::
{:ok, Handshake.t()} | {:error, {module(), reason()}}
def handshake(vault, role, options)
def handshake(%Vault{} = vault, role, options) when role in @roles and is_map(options) do
prologue = Map.get(options, :prologue, "")
protocol =
case Map.get(options, :protocol) do
name when is_binary(name) ->
with {:ok, p} <- Protocol.from_name(name) do
p
else
err ->
throw(err)
end
%Protocol{} = p ->
p
end
s = Map.get(options, :s)
e = Map.get(options, :e)
rs = Map.get(options, :rs)
re = Map.get(options, :re)
Handshake.init(vault, protocol, role, prologue, {s, e, rs, re})
catch
:throw, err ->
err
end
def handshake(%Vault{}, role, _options) when role not in @roles,
do: {:error, {__MODULE__, {:invalid_role, role}}}
def handshake(%Vault{}, _role, _options),
do: {:error, {__MODULE__, {:invalid_options, :expected_map}}}
@doc """
Step the handshake state machine forward one step
"""
@spec step_handshake(Handshake.t(), step_data()) ::
{:ok, :send, binary(), Handshake.t()}
| {:ok, :received, binary(), Handshake.t()}
| {:ok, :done, t()}
| {:error, {__MODULE__, reason()}}
def step_handshake(handshake, data)
def step_handshake(%Handshake{} = handshake, data) do
next = Handshake.next_message(handshake)
step_handshake(next, data, handshake)
end
defp step_handshake(:in, {:received, encrypted}, handshake) do
with {:ok, hs, msg} <- Handshake.read_message(handshake, encrypted) do
{:ok, :received, msg, hs}
end
end
defp step_handshake(:out, {:send, payload}, handshake) do
with {:ok, hs, msg} <- Handshake.write_message(handshake, payload) do
{:ok, :send, msg, hs}
end
end
defp step_handshake(:done, :done, handshake) do
with {:ok, chan} <- Handshake.finalize(handshake) do
{:ok, :done, chan}
end
end
defp step_handshake(next, data, _handshake) do
{:error, {__MODULE__, {:invalid_step, {:expected, next}, {:got, data}}}}
end
@doc """
Perform a Noise handshake to secure a channel, using the provided transport
"""
@spec negotiate_secure_channel(Handshake.t(), Transport.t(), map()) ::
{:ok, t(), Transport.t()} | {:error, {__MODULE__, term()}}
@spec negotiate_secure_channel(Vault.t(), role(), Transport.t(), map()) ::
{:ok, t(), Transport.t()} | {:error, {__MODULE__, term()}}
def negotiate_secure_channel(vault, role, transport, options)
def negotiate_secure_channel(%Vault{} = vault, role, transport, options) when role in @roles do
with {:ok, handshake} <- handshake(vault, role, options) do
timeout = Map.get(options, :timeout, :infinity)
do_negotiate_secure_channel(handshake, transport, timeout)
end
end
def negotiate_secure_channel(%Handshake{} = handshake, transport, options)
when is_map(options) do
timeout = Map.get(options, :timeout, :infinity)
do_negotiate_secure_channel(handshake, transport, timeout)
end
defp do_negotiate_secure_channel(%Handshake{} = handshake, transport, timeout) do
next = Handshake.next_message(handshake)
Logger.debug("[#{inspect(handshake.role)}] Transitioning handshake to #{inspect(next)}")
do_negotiate_secure_channel(next, handshake, transport, timeout)
end
defp do_negotiate_secure_channel(:in, handshake, transport, timeout) do
Logger.debug("[#{inspect(handshake.role)}] Awaiting handshake message")
with {:ok, data, transport} <- Transport.recv(transport, timeout: timeout),
{:ok, %Envelope{body: %Payload{data: data}}, _rest} <- Encoding.decode(data),
{:ok, hs, _msg} <- Handshake.read_message(handshake, data) do
do_negotiate_secure_channel(hs, transport, timeout)
else
{:ok, message, _} ->
{:error, {:unexpected_message, message}}
{:error, _} = err ->
err
end
end
defp do_negotiate_secure_channel(:out, handshake, transport, timeout) do
Logger.debug("[#{inspect(handshake.role)}] Sending handshake message")
with {:ok, hs, msg} <- Handshake.write_message(handshake, ""),
{:ok, encoded} <- Encoding.encode(%Payload{data: msg}),
{:ok, transport} <- Transport.send(transport, encoded) do
do_negotiate_secure_channel(hs, transport, timeout)
end
end
defp do_negotiate_secure_channel(:done, handshake, transport, _timeout) do
Logger.debug("[#{inspect(handshake.role)}] Finalizing handshake")
with {:ok, chan} <- Handshake.finalize(handshake) do
{:ok, chan, transport}
end
end
end
|
implementations/elixir/lib/channel.ex
| 0.955561 | 0.439447 |
channel.ex
|
starcoder
|
defmodule StatetraceElixir.Annotations do
@moduledoc """
Schema for annotating database transactions for Statetrace.
Statetrace treats values written to statetrace_annotations in a special way,
allowing you to annotate the row-level transaction information. This should not be used
directly, instead you should use `StatetraceElixir.Annotations`
For information about integration see `process_conn/2`
"""
defmodule CurrentUser do
defstruct [:id, :full_name, :avatar]
end
import Ecto.Query
import Phoenix.Controller
import Plug.Conn
alias StatetraceElixir.Annotations.Annotation
@doc """
Generate the numerical portion of the frame's ID.
"""
def new_id do
rem(abs(System.monotonic_time(:nanosecond)), 2_147_483_647)
end
@doc """
Annotate session information into the current database transaction.
"""
def log_session!(repo, session_actor_id, session_actor_full_name, session_actor_avatar) do
%Annotation{
id: new_id(),
kind: "_st.app.sess",
timestamp: DateTime.utc_now(),
session_actor_id: "#{session_actor_id}",
session_actor_full_name: session_actor_full_name,
session_actor_avatar: session_actor_avatar
}
|> repo.insert!()
end
@doc """
Annotate action information into the current database transaction.
"""
def log_action!(repo, parent_timestamp, parent_id, action_url) do
%Annotation{
id: new_id(),
kind: "_st.app.act",
timestamp: DateTime.utc_now(),
parent_id: parent_id,
parent_timestamp: parent_timestamp,
action_url: action_url
}
|> repo.insert!()
end
@doc """
Processes a `Plug.Conn` to annotate the current transaction with request details.
This should be called inside of a transaction for example:
```
defmodule MyAppWeb.SomeController do
use MyAppWeb, :controller
alias StatetraceElixir.Annotations
def action(conn, _) do
args = [conn, conn.params]
with {_, response} <-
MyApp.Repo.transaction(fn ->
Annotations.process_conn(conn,
get_actor: fn conn -> conn.assigns.current_actor end,
repo: MyApp.Repo
)
apply(__MODULE__, action_name(conn), args)
end) do
response
end
end
end
```
"""
def process_conn(conn, options) do
repo = Keyword.fetch!(options, :repo)
get_actor = Keyword.get(options, :get_actor, &get_nil/1)
get_action_url = Keyword.get(options, :get_action_url, &get_current_url/1)
conn
|> process_session!(repo, get_actor)
|> process_action!(repo, get_action_url)
end
@doc """
Annotates session information as part of `process_conn/2`
This function is exposed to give finer grained control over those who need it. In general it is recommended to use `process_conn/2`
"""
def process_session!(
conn,
repo,
get_actor \\ &get_nil/1
) do
case get_session(conn, :statetrace_session) do
nil ->
annotation =
case get_actor.(conn) do
nil ->
log_session!(repo, nil, nil, nil)
%{id: id, full_name: full_name, avatar: avatar} ->
log_session!(repo, id, full_name, avatar)
end
put_session(
conn,
:statetrace_session,
Jason.encode!([annotation.timestamp, annotation.id])
)
session ->
conn
end
end
@doc """
Annotates action information as part of `process_conn/2`
This function is exposed to give finer grained control over those who need it. In general it is recommended to use `process_conn/2`
"""
def process_action!(
conn,
repo,
get_action_url \\ &get_current_url/1
) do
[parent_timestamp_str, parent_id] = Jason.decode!(get_session(conn, :statetrace_session))
{:ok, parent_timestamp, 0} = DateTime.from_iso8601(parent_timestamp_str)
url = get_action_url.(conn)
log_action!(repo, parent_timestamp, parent_id, url)
conn
end
defp get_nil(_conn), do: nil
defp get_current_url(conn), do: current_url(conn)
end
|
lib/statetrace_elixir/annotations.ex
| 0.858719 | 0.58883 |
annotations.ex
|
starcoder
|
defmodule Sneex.Ops.MoveBits do
@moduledoc "
This represents the op codes for moving the bits of a value
This can either be a shift (where 0's fill in the bit, and the
moved bit goes to the carry flag) or a rotation (where the
carry flag is filled in and the moved bit goes to the carry flag).
"
defstruct [:cycle_mods, :address_mode, :operation, :disasm]
use Bitwise
alias Sneex.Address.{Absolute, CycleCalculator, DirectPage, Indexed, Mode, Register}
alias Sneex.{Cpu, CpuHelper}
@type t :: %__MODULE__{
cycle_mods: list(CycleCalculator.t()),
address_mode: any(),
operation: function(),
disasm: String.t()
}
@spec new(Cpu.t() | byte()) :: nil | __MODULE__.t()
def new(cpu = %Cpu{}) do
cpu |> Cpu.read_opcode() |> new()
end
def new(opcode), do: opcode |> determine_base_data() |> set_function_and_disasm(opcode)
defp determine_base_data(op) when 0x1E == band(op, 0x1E) do
%__MODULE__{
cycle_mods: [CycleCalculator.constant(7), CycleCalculator.acc_is_16_bit(2)],
address_mode: true |> Absolute.new() |> Indexed.new(:x)
}
end
defp determine_base_data(op) when 0x0E == band(op, 0x0E) do
%__MODULE__{
cycle_mods: [CycleCalculator.constant(6), CycleCalculator.acc_is_16_bit(2)],
address_mode: true |> Absolute.new()
}
end
defp determine_base_data(op) when 0x0A == band(op, 0x0A) do
%__MODULE__{
cycle_mods: [CycleCalculator.constant(2)],
address_mode: Register.new(:acc)
}
end
defp determine_base_data(op) when 0x16 == band(op, 0x16) do
%__MODULE__{
cycle_mods: [
CycleCalculator.constant(6),
CycleCalculator.acc_is_16_bit(1),
CycleCalculator.low_direct_page_is_not_zero(1)
],
address_mode: DirectPage.new() |> Indexed.new(:x)
}
end
defp determine_base_data(op) when 0x06 == band(op, 0x06) do
%__MODULE__{
cycle_mods: [
CycleCalculator.constant(5),
CycleCalculator.acc_is_16_bit(1),
CycleCalculator.low_direct_page_is_not_zero(1)
],
address_mode: DirectPage.new()
}
end
defp determine_base_data(_op), do: nil
defp set_function_and_disasm(data = %__MODULE__{}, op)
when 0x00 == band(op, 0xF0) or 0x10 == band(op, 0xF0) do
%__MODULE__{data | disasm: "ASL", operation: build_shift_function(:left)}
end
defp set_function_and_disasm(data = %__MODULE__{}, op)
when 0x40 == band(op, 0xF0) or 0x50 == band(op, 0xF0) do
%__MODULE__{data | disasm: "LSR", operation: build_shift_function(:right)}
end
defp set_function_and_disasm(data = %__MODULE__{}, op)
when 0x20 == band(op, 0xF0) or 0x30 == band(op, 0xF0) do
%__MODULE__{data | disasm: "ROL", operation: build_rotate_function(:left)}
end
defp set_function_and_disasm(data = %__MODULE__{}, op)
when 0x60 == band(op, 0xF0) or 0x70 == band(op, 0xF0) do
%__MODULE__{data | disasm: "ROR", operation: build_rotate_function(:right)}
end
defp set_function_and_disasm(_data, _op), do: nil
defp build_shift_function(direction) do
fn value, bitness, _carry_flag ->
CpuHelper.rotate(value, bitness, direction)
end
end
defp build_rotate_function(direction) do
fn value, bitness, carry_flag ->
mask = adjust_rotation_mask(bitness, direction, carry_flag)
{new_value, new_carry_flag} = CpuHelper.rotate(value, bitness, direction)
adjusted_value = bor(new_value, mask)
{adjusted_value, new_carry_flag}
end
end
defp adjust_rotation_mask(_bitness, :left, true), do: 0x0001
defp adjust_rotation_mask(:bit8, :right, true), do: 0x80
defp adjust_rotation_mask(:bit16, :right, true), do: 0x8000
defp adjust_rotation_mask(_bitness, _direction, false), do: 0x0000
defimpl Sneex.Ops.Opcode do
def byte_size(%{address_mode: mode}, cpu), do: Mode.byte_size(mode, cpu) + 1
def total_cycles(%{cycle_mods: mods}, cpu) do
CycleCalculator.calc_cycles(cpu, mods)
end
def execute(%{address_mode: mode, operation: op}, cpu) do
acc_size = cpu |> Cpu.acc_size()
carry_flag = cpu |> Cpu.carry_flag()
{result, new_carry_flag} = mode |> Mode.fetch(cpu) |> op.(acc_size, carry_flag)
%{negative: nf, zero: zf} = result |> CpuHelper.check_flags_for_value(acc_size)
cpu = mode |> Mode.store(cpu, result)
cpu |> Cpu.negative_flag(nf) |> Cpu.zero_flag(zf) |> Cpu.carry_flag(new_carry_flag)
end
def disasm(%{address_mode: mode, disasm: disasm}, cpu),
do: "#{disasm} #{Mode.disasm(mode, cpu)}"
end
end
|
lib/sneex/ops/move_bits.ex
| 0.799521 | 0.642439 |
move_bits.ex
|
starcoder
|
defmodule Saucexages.DataTypeInfo do
@moduledoc false
@enforce_keys [:data_type_id, :data_type, :name]
@type t :: %__MODULE__{
data_type_id: atom(),
data_type: non_neg_integer(),
name: String.t()
}
defstruct [:data_type_id, :data_type, :name]
end
defmodule Saucexages.DataType do
@moduledoc """
Functions for working with SAUCE Data Types. Each data type in combination with a file type determines how SAUCE type dependent fields should be interpreted. The `data type` and `file type` together form *named file types* such as ANSI, ASCII, RIP Script, HTML, and S3M among many others.
Each data type is represented by a human-readable `data_type_id` and has *one* or *more* associated file types. The `data_type` itself is stored in a field in a SAUCE record as an unsigned integer.
You should work with `data_type_id` internally in your system and `data_type` only when working *externally* or dealing *directly* with SAUCE binary values.
The list of data types itself is fixed and is a part of the SAUCE specification. In the unlikely event you need to work with an unsupported data type, you should use a data_type of `:none` or otherwise reconsider.
## Data Types Overview
The following `data_type_id` values are valid and correspond to those defined by the SAUCE spec:
* `:none` - Anything not set in a SAUCE record or not covered by the SAUCE spec.
* `:character` - Character-based files such as `ascii`, `ansi graphics`, and other text files.
* `:bitmap` - Bitmap graphic and animation files such as `gif`, `png`, `jpeg`, etc.
* `:vector` - Vector graphics file such as `dxf`, `dwg`, etc.
* `:audio` - Audio files such as mod, s3m, wav, etc.
* `:binary_text` - Raw memory copy of text mode screen, used for art .BIN files.
* `:xbin` - Extended BIN files
* `:archive` - Archive files such as `zip`, `arc`, `lzh`, etc.
* `executable` - Executable scripts such as `.exe`, `.bat`, `.dll`, etc.
## Notes
In the case of `:binary_text`, its file type is variable and thus can be any non-negative file type.
Be aware that some media types might intuitively match some of these types such, but you should not assume any typing other than what is defined by the SAUCE spec. For instance, `rip` files are vectors, but considered to be characters.
It is critical that any media type not covered by this spec should be assumed to have a data type of `:none` unless you are able to update the official SAUCE spec.
"""
alias Saucexages.DataTypeInfo
@type data_type :: non_neg_integer()
@type data_type_id :: :none | :character | :bitmap | :vector | :audio | :binary_text | :xbin | :archive | :executable
@data_type_mapping [
%DataTypeInfo{data_type_id: :none, data_type: 0, name: "None"},
%DataTypeInfo{data_type_id: :character, data_type: 1, name: "Character"},
%DataTypeInfo{data_type_id: :bitmap, data_type: 2, name: "Bitmap"},
%DataTypeInfo{data_type_id: :vector, data_type: 3, name: "Vector"},
%DataTypeInfo{data_type_id: :audio, data_type: 4, name: "Audio"},
%DataTypeInfo{data_type_id: :binary_text, data_type: 5, name: "Binary Text"},
%DataTypeInfo{data_type_id: :xbin, data_type: 6, name: "XBIN"},
%DataTypeInfo{data_type_id: :archive, data_type: 7, name: "Archive"},
%DataTypeInfo{data_type_id: :executable, data_type: 8, name: "Executable"},
]
@doc """
Returns a full list of data type info available for SAUCE.
"""
@spec data_type_meta() :: [DataTypeInfo.t()]
defmacro data_type_meta() do
@data_type_mapping
|> Macro.escape()
end
@doc """
Returns the data type meta information for the given data type.
## Examples
iex> Saucexages.DataType.data_type_meta(:character)
%Saucexages.DataTypeInfo{
data_type: 1,
data_type_id: :character,
name: "Character"
}
"""
@spec data_type_meta(data_type() | data_type_id()) :: DataTypeInfo.t()
def data_type_meta(data_type)
for %{data_type_id: data_type_id, data_type: data_type} = data_type_info <- @data_type_mapping do
def data_type_meta(unquote(data_type_id)) do
unquote(
data_type_info
|> Macro.escape()
)
end
def data_type_meta(unquote(data_type)) do
unquote(
data_type_info
|> Macro.escape()
)
end
end
def data_type_meta(data_type) when is_integer(data_type) or is_atom(data_type) do
nil
end
@doc """
Returns a list of data type ids available for SAUCE.
## Examples
iex> Saucexages.DataType.data_type_ids()
[:none, :character, :bitmap, :vector, :audio, :binary_text, :xbin, :archive,
:executable]
"""
@spec data_type_ids() :: [data_type_id()]
defmacro data_type_ids() do
Enum.map(@data_type_mapping, fn (%{data_type_id: data_type_id}) -> data_type_id end)
end
@doc """
Returns a data type identifier for a given data type value.
## Examples
iex> Saucexages.DataType.data_type_id(1)
:character
iex> Saucexages.DataType.data_type_id(2)
:bitmap
iex> Saucexages.DataType.data_type_id(44)
:none
"""
@spec data_type_id(data_type()) :: data_type_id()
def data_type_id(data_type)
for %{data_type_id: data_type_id, data_type: data_type} <- @data_type_mapping do
def data_type_id(unquote(data_type)) do
unquote(data_type_id)
end
end
def data_type_id(data_type) when is_integer(data_type) do
:none
end
@doc """
Returns a data type value for a given data type identifier.
## Examples
iex> Saucexages.DataType.data_type(:none)
0
iex> Saucexages.DataType.data_type(:character)
1
iex> Saucexages.DataType.data_type(:bitmap)
2
"""
@spec data_type(data_type_id()) :: data_type()
def data_type(data_type_id)
for %{data_type_id: data_type_id, data_type: data_type} <- @data_type_mapping do
def data_type(unquote(data_type_id)) do
unquote(data_type)
end
end
def data_type(data_type_id) when is_atom(data_type_id) do
0
end
end
|
lib/saucexages/data_type.ex
| 0.856032 | 0.691862 |
data_type.ex
|
starcoder
|
defmodule Exhort.SAT.SolverResponse do
@moduledoc """
A response from solving a model.
Provides functions for retrieving variable values from the response. The set
of valid variables are those defined in the model that was solved.
"""
@type t :: %__MODULE__{}
defstruct [:res, :model, :status, :int_status, :objective, :walltime, :usertime]
alias __MODULE__
alias Exhort.NIF.Nif
alias Exhort.SAT.BoolVar
alias Exhort.SAT.IntVar
alias Exhort.SAT.Model
alias Exhort.SAT.SolverResponse
alias Exhort.SAT.Vars
@spec build(map(), Model.t()) :: SolverResponse.t()
def build(
%{
"res" => res,
"status" => int_status,
"objective" => objective,
"walltime" => walltime,
"usertime" => usertime
},
model
) do
%SolverResponse{
res: res,
model: model,
status: status_from_int(int_status),
int_status: int_status,
objective: objective,
walltime: walltime,
usertime: usertime
}
end
@doc """
A map of the response metadata, `:status`, `:objective`, `:walltime`,
`:usertime`.
"""
@spec stats(SolverResponse.t()) :: map()
def stats(response) do
Map.take(response, [:status, :objective, :walltime, :usertime])
end
@doc """
Get a variable value from the response.
"""
@spec value(SolverResponse.t(), var :: BoolVar.t() | IntVar.t()) :: boolean() | integer()
def value(response, %BoolVar{} = var) do
bool_val(response, var)
end
def value(response, %IntVar{} = var) do
int_val(response, var)
end
@doc """
Get the corresponding value of the integer variable.
"""
@spec int_val(SolverResponse.t(), var :: String.t() | atom() | IntVar.t()) :: integer()
def int_val(response, var) do
get_int_val(response, var)
end
@doc """
Get the corresponding value of the boolean variable.
"""
@spec bool_val(SolverResponse.t(), literal :: String.t() | atom() | BoolVar.t()) :: boolean()
def bool_val(response, var) do
get_bool_val(response, var)
end
defp status_from_int(int) do
%{0 => :unknown, 1 => :model_invalid, 2 => :feasible, 3 => :infeasible, 4 => :optimal}
|> Map.get(int)
end
@spec get_int_val(SolverResponse.t(), var :: atom() | String.t() | IntVar.t()) ::
nil | integer()
defp get_int_val(%SolverResponse{status: status}, _)
when status in [:unknown, :model_invalid, :infeasible] do
nil
end
defp get_int_val(%SolverResponse{res: response_res, model: %{vars: vars}}, %IntVar{
res: nil,
name: literal
}) do
%IntVar{res: var_res} = Vars.get(vars, literal)
Nif.solution_integer_value_nif(response_res, var_res)
end
defp get_int_val(%SolverResponse{res: response_res}, %IntVar{res: var_res}) do
Nif.solution_integer_value_nif(response_res, var_res)
end
defp get_int_val(%SolverResponse{res: response_res, model: %{vars: vars}}, literal) do
%IntVar{res: var_res} = Vars.get(vars, literal)
Nif.solution_integer_value_nif(response_res, var_res)
end
@spec get_bool_val(SolverResponse.t(), var :: atom() | String.t() | BoolVar.t()) ::
nil | boolean()
defp get_bool_val(%SolverResponse{status: status}, _)
when status in [:unknown, :model_invalid, :infeasible] do
nil
end
defp get_bool_val(%SolverResponse{res: response_res, model: %{vars: vars}}, %BoolVar{
res: nil,
name: literal
}) do
%BoolVar{res: var_res} = Vars.get(vars, literal)
Nif.solution_bool_value_nif(response_res, var_res) == 1
end
defp get_bool_val(%SolverResponse{res: response_res}, %BoolVar{res: var_res}) do
Nif.solution_bool_value_nif(response_res, var_res) == 1
end
defp get_bool_val(%SolverResponse{res: response_res, model: %{vars: vars}}, literal) do
%BoolVar{res: var_res} = Vars.get(vars, literal)
Nif.solution_bool_value_nif(response_res, var_res) == 1
end
end
|
lib/exhort/sat/solver_response.ex
| 0.792986 | 0.401541 |
solver_response.ex
|
starcoder
|
defmodule Credo.Code.Scope do
@moduledoc """
This module provides helper functions to determine the scope name at a certain
point in the analysed code.
"""
@def_ops [:def, :defp, :defmacro]
@doc """
Returns the module part of a scope.
iex> Credo.Code.Scope.mod_name("Credo.Code")
"Credo.Code"
iex> Credo.Code.Scope.mod_name("Credo.Code.ast")
"Credo.Code"
"""
def mod_name(nil), do: nil
def mod_name(scope_name) do
names = String.split(scope_name, ".")
base_name = List.last(names)
if String.match?(base_name, ~r/^[a-z]/) do
names
|> Enum.slice(0..(length(names) - 2))
|> Enum.join(".")
else
scope_name
end
end
@doc """
Returns the scope for the given line as a tuple consisting of the call to
define the scope (`:defmodule`, `:def`, `:defp` or `:defmacro`) and the
name of the scope.
Examples:
{:defmodule, "Foo.Bar"}
{:def, "Foo.Bar.baz"}
"""
def name(_ast, line: 0), do: nil
def name(ast, line: line) do
ast
|> scope_info_list()
|> name_from_scope_info_list(line)
end
@doc false
def name_from_scope_info_list(scope_info_list, line) do
result =
Enum.find(scope_info_list, fn
{line_no, _op, _arguments} when line_no <= line -> true
_ -> false
end)
case result do
{_line_no, op, arguments} ->
name = Credo.Code.Name.full(arguments)
{op, name}
_ ->
{nil, ""}
end
end
@doc false
def scope_info_list(ast) do
{_, scope_info_list} = Macro.prewalk(ast, [], &traverse_modules(&1, &2, nil, nil))
Enum.reverse(scope_info_list)
end
defp traverse_modules({:defmodule, meta, arguments} = ast, acc, current_scope, _current_op)
when is_list(arguments) do
new_scope_part = Credo.Code.Module.name(ast)
scope_name =
[current_scope, new_scope_part]
|> Enum.reject(&is_nil/1)
|> Credo.Code.Name.full()
defmodule_scope_info = {meta[:line], :defmodule, scope_name}
{_, def_scope_infos} =
Macro.prewalk(arguments, [], &traverse_defs(&1, &2, scope_name, :defmodule))
new_acc = (acc ++ [defmodule_scope_info]) ++ def_scope_infos
{nil, new_acc}
end
defp traverse_modules({_op, meta, _arguments} = ast, acc, current_scope, current_op) do
scope_info = {meta[:line], current_op, current_scope}
{ast, acc ++ [scope_info]}
end
defp traverse_modules(ast, acc, _current_scope, _current_op) do
{ast, acc}
end
defp traverse_defs({:defmodule, _meta, arguments} = ast, acc, current_scope, _current_op)
when is_list(arguments) do
{_, scopes} = Macro.prewalk(ast, [], &traverse_modules(&1, &2, current_scope, :defmodule))
{nil, acc ++ scopes}
end
for op <- @def_ops do
defp traverse_defs({unquote(op), meta, arguments} = ast, acc, current_scope, _current_op)
when is_list(arguments) do
new_scope_part = Credo.Code.Module.def_name(ast)
scope_name =
[current_scope, new_scope_part]
|> Enum.reject(&is_nil/1)
|> Credo.Code.Name.full()
scope_info = {meta[:line], unquote(op), scope_name}
new_acc = acc ++ [scope_info]
{nil, new_acc}
end
end
defp traverse_defs({_op, meta, _arguments} = ast, acc, current_scope, current_op) do
scope_info = {meta[:line], current_op, current_scope}
{ast, acc ++ [scope_info]}
end
defp traverse_defs(ast, acc, _current_scope, _current_op) do
{ast, acc}
end
end
|
lib/credo/code/scope.ex
| 0.737064 | 0.445771 |
scope.ex
|
starcoder
|
defmodule Swarm.Distribution.StaticQuorumRing do
@moduledoc """
A quorum is the minimum number of nodes that a distributed cluster has to obtain in order to be
allowed to perform an operation. This can be used to enforce consistent operation in a distributed system.
## Quorum size
You must configure the distribution strategy and its quorum size using the `:static_quorum_size` setting:
config :swarm,
distribution_strategy: Swarm.Distribution.StaticQuorumRing,
static_quorum_size: 5
It defines the minimum number of nodes that must be connected in the cluster to allow process
registration and distribution.
If there are fewer nodes currently available than the quorum size, any calls to
`Swarm.register_name/5` will block until enough nodes have started.
You can configure the `:kernel` application to wait for cluster formation before starting your
application during node start up. The `sync_nodes_optional` configuration specifies which nodes
to attempt to connect to within the `sync_nodes_timeout` window, defined in milliseconds, before
continuing with startup. There is also a `sync_nodes_mandatory` setting which can be used to
enforce all nodes are connected within the timeout window or else the node terminates.
config :kernel,
sync_nodes_optional: [:"[email protected]", :"[email protected]"],
sync_nodes_timeout: 60_000
The `sync_nodes_timeout` can be configured as `:infinity` to wait indefinitely for all nodes to
connect. All involved nodes must have the same value for `sync_nodes_timeout`.
### Example
In a 9 node cluster you would configure the `:static_quorum_size` as 5. If there is a network split
of 4 and 5 nodes, processes on the side with 5 nodes will continue running but processes on the
other 4 nodes will be stopped.
Be aware that in the running 5 node cluster, no more failures can be handled because the
remaining cluster size would be less than 5. In the case of another failure in that 5 node
cluster all running processes will be stopped.
"""
use Swarm.Distribution.Strategy
alias Swarm.Distribution.StaticQuorumRing
defstruct [:static_quorum_size, :ring]
def create do
%StaticQuorumRing{
static_quorum_size: Application.get_env(:swarm, :static_quorum_size, 2),
ring: HashRing.new(),
}
end
def add_node(quorum, node) do
%StaticQuorumRing{quorum |
ring: HashRing.add_node(quorum.ring, node),
}
end
def add_node(quorum, node, weight) do
%StaticQuorumRing{quorum |
ring: HashRing.add_node(quorum.ring, node, weight),
}
end
def add_nodes(quorum, nodes) do
%StaticQuorumRing{quorum |
ring: HashRing.add_nodes(quorum.ring, nodes),
}
end
def remove_node(quorum, node) do
%StaticQuorumRing{quorum |
ring: HashRing.remove_node(quorum.ring, node),
}
end
@doc """
Maps a key to a specific node via the current distribution strategy.
If the available nodes in the cluster are fewer than the minimum node count it returns `:undefined`.
"""
def key_to_node(%StaticQuorumRing{static_quorum_size: static_quorum_size, ring: ring}, key) do
case length(ring.nodes) do
node_count when node_count < static_quorum_size -> :undefined
_ -> HashRing.key_to_node(ring, key)
end
end
end
|
lib/swarm/distribution/static_quorum_ring.ex
| 0.903083 | 0.706773 |
static_quorum_ring.ex
|
starcoder
|
defmodule Solid.Filter do
@moduledoc """
Standard filters
"""
import Kernel, except: [abs: 1, ceil: 1, round: 1, floor: 1, apply: 2]
@doc """
Apply `filter` if it exists. Otherwise return the first input.
iex> Solid.Filter.apply("upcase", ["ac"])
"AC"
iex> Solid.Filter.apply("no_filter_here", [1, 2, 3])
1
"""
def apply(filter, args) do
custom_module = Application.get_env(:solid, :custom_filters, __MODULE__)
cond do
filter_exists?({custom_module, filter, Enum.count(args)}) ->
apply_filter({custom_module, filter, args})
filter_exists?({__MODULE__, filter, Enum.count(args)}) ->
apply_filter({__MODULE__, filter, args})
true ->
List.first(args)
end
end
defp apply_filter({m, f, a}) do
Kernel.apply(m, String.to_existing_atom(f), a)
end
defp filter_exists?({module, function, arity}) do
try do
function = String.to_existing_atom(function)
function_exported?(module, function, arity)
rescue
ArgumentError -> false
end
end
@doc """
Returns the absolute value of a number.
iex> Solid.Filter.abs(-17)
17
iex> Solid.Filter.abs(17)
17
iex> Solid.Filter.abs("-17.5")
17.5
"""
@spec abs(number | String.t()) :: number
def abs(input) when is_binary(input) do
{float, _} = Float.parse(input)
abs(float)
end
def abs(input), do: Kernel.abs(input)
@doc """
Concatenates two strings and returns the concatenated value.
iex> Solid.Filter.append("www.example.com", "/index.html")
"www.example.com/index.html"
"""
@spec append(any, any) :: String.t()
def append(input, string), do: "#{input}#{string}"
@doc """
Limits a number to a minimum value.
iex> Solid.Filter.at_least(5, 3)
5
iex> Solid.Filter.at_least(2, 4)
4
"""
@spec at_least(number, number) :: number
def at_least(input, minimum), do: max(input, minimum)
@doc """
Limits a number to a maximum value.
iex> Solid.Filter.at_most(5, 3)
3
iex> Solid.Filter.at_most(2, 4)
2
"""
@spec at_most(number, number) :: number
def at_most(input, maximum), do: min(input, maximum)
@doc """
Makes the first character of a string capitalized.
iex> Solid.Filter.capitalize("my great title")
"My great title"
iex> Solid.Filter.capitalize(1)
"1"
"""
@spec capitalize(any) :: String.t()
def capitalize(input), do: to_string(input) |> String.capitalize()
@doc """
Rounds the input up to the nearest whole number. Liquid tries to convert the input to a number before the filter is applied.
"""
@spec ceil(number | String.t()) :: number
def ceil(input) when is_binary(input) do
{float, _} = Float.parse(input)
ceil(float)
end
def ceil(input) when is_integer(input), do: input
def ceil(input), do: Float.ceil(input) |> trunc
@doc """
Converts a `DateTime`/`NaiveDateTime` struct into another date format.
The input may also be a Unix timestamp or an ISO 8601 date string.
The format for this syntax is the same as `Calendar.strftime/2`.
To get the current time, pass the special word `"now"` (or `"today"`) to `date`.
"""
@spec date(DateTime.t() | NaiveDateTime.t() | integer() | String.t(), String.t()) :: String.t()
def date(date, format) when is_map(date) and is_binary(format) do
try do
Calendar.strftime(date, format)
rescue
KeyError -> ""
ArgumentError -> ""
end
end
def date(date, format) when is_integer(date) do
case DateTime.from_unix(date) do
{:ok, datetime} -> date(datetime, format)
_ -> ""
end
end
def date(date, format) when date in ["now", "today"] do
date(NaiveDateTime.local_now(), format)
end
def date(date, format) when is_binary(date) do
case DateTime.from_iso8601(date) do
{:ok, datetime, _} -> date(datetime, format)
_ -> date
end
end
def date(_, _), do: ""
@doc """
Allows you to specify a fallback in case a value doesn’t exist.
`default` will show its value if the left side is nil, false, or empty
iex> Solid.Filter.default(123, 456)
123
iex> Solid.Filter.default(nil, 456)
456
iex> Solid.Filter.default(false, 456)
456
iex> Solid.Filter.default([], 456)
456
"""
@spec default(any, any) :: any
def default(nil, value), do: value
def default(false, value), do: value
def default([], value), do: value
def default(input, _), do: input
@doc """
Divides a number by the specified number.
The result is rounded down to the nearest integer (that is, the floor) if the divisor is an integer.
{{ 16 | divided_by: 4 }}
iex> Solid.Filter.divided_by(16, 4)
4
iex> Solid.Filter.divided_by(5, 3)
1
iex> Solid.Filter.divided_by(20, 7)
2
"""
@spec divided_by(number, number) :: number
def divided_by(input, operand) when is_integer(operand) do
(input / operand) |> Float.floor() |> trunc
end
def divided_by(input, operand) when is_float(operand) do
input / operand
end
@doc """
Makes each character in a string uppercase.
It has no effect on strings which are already all uppercase.
iex> Solid.Filter.upcase("aBc")
"ABC"
iex> Solid.Filter.upcase(456)
"456"
iex> Solid.Filter.upcase(nil)
""
"""
@spec upcase(any) :: String.t()
def upcase(input), do: input |> to_string |> String.upcase()
@doc """
Makes each character in a string lowercase.
It has no effect on strings which are already all lowercase.
iex> Solid.Filter.downcase("aBc")
"abc"
iex> Solid.Filter.downcase(456)
"456"
iex> Solid.Filter.downcase(nil)
""
"""
@spec downcase(any) :: String.t()
def downcase(input), do: input |> to_string |> String.downcase()
@doc """
Returns the first item of an array.
iex> Solid.Filter.first([1, 2, 3])
1
iex> Solid.Filter.first([])
nil
"""
@spec first(list) :: any
def first(input) when is_list(input), do: List.first(input)
def first(_), do: nil
@doc """
Rounds a number down to the nearest whole number.
Solid tries to convert the input to a number before the filter is applied.
iex> Solid.Filter.floor(1.2)
1
iex> Solid.Filter.floor(2.0)
2
iex> Solid.Filter.floor("3.5")
3
"""
@spec floor(number | String.t()) :: integer
def floor(input) when is_binary(input) do
{float, _} = Float.parse(input)
floor(float)
end
def floor(input), do: Float.floor(input) |> trunc
@doc """
Removes all occurrences of nil from a list
iex> Solid.Filter.compact([1, nil, 2, nil, 3])
[1, 2, 3]
"""
@spec compact(list) :: list
def compact(input) when is_list(input), do: Enum.reject(input, &(&1 == nil))
def compact(input, property) when is_list(input), do: Enum.reject(input, &(&1[property] == nil))
@doc """
Concatenates (joins together) multiple arrays.
The resulting array contains all the items from the input arrays.
iex> Solid.Filter.concat([1, 2], [3, 4])
[1, 2, 3, 4]
"""
@spec concat(list, list) :: list
def concat(input, list) when is_list(input) and is_list(list) do
input ++ list
end
@doc """
Join a list of strings returning one String glued by `glue`
iex> Solid.Filter.join(["a", "b", "c"])
"a b c"
iex> Solid.Filter.join(["a", "b", "c"], "-")
"a-b-c"
"""
@spec join(list, String.t()) :: String.t()
def join(input, glue \\ " ") when is_list(input), do: Enum.join(input, glue)
@doc """
Returns the last item of an array.
iex> Solid.Filter.last([1, 2, 3])
3
iex> Solid.Filter.last([])
nil
"""
@spec last(list) :: any
def last(input) when is_list(input), do: List.last(input)
def last(_), do: nil
@doc """
Removes all whitespaces (tabs, spaces, and newlines) from the beginning of a string.
The filter does not affect spaces between words.
iex> Solid.Filter.lstrip(" So much room for activities! ")
"So much room for activities! "
"""
@spec lstrip(String.t()) :: String.t()
def lstrip(input), do: String.trim_leading(input)
@doc """
Split input string into an array of substrings separated by given pattern.
iex> Solid.Filter.split("a b c", " ")
~w(a b c)
iex> Solid.Filter.split("", " ")
[""]
"""
@spec split(any, String.t()) :: List.t()
def split(input, pattern), do: to_string(input) |> String.split(pattern)
@doc """
Map through a list of hashes accessing `property`
iex> Solid.Filter.map([%{"a" => "A"}, %{"a" => 1}], "a")
["A", 1]
"""
def map(input, property) when is_list(input) do
Enum.map(input, & &1[property])
end
@doc """
Subtracts a number from another number.
iex> Solid.Filter.minus(4, 2)
2
iex> Solid.Filter.minus(16, 4)
12
iex> Solid.Filter.minus(183.357, 12)
171.357
"""
@spec minus(number, number) :: number
def minus(input, number), do: input - number
@doc """
Subtracts a number from another number.
iex> Solid.Filter.modulo(3, 2)
1
iex> Solid.Filter.modulo(24, 7)
3
iex> Solid.Filter.modulo(183.357, 12)
3.357
"""
@spec modulo(number, number) :: number
def modulo(dividend, divisor)
when is_integer(dividend) and is_integer(divisor),
do: Integer.mod(dividend, divisor)
# OTP 20+
def modulo(dividend, divisor) do
dividend
|> :math.fmod(divisor)
|> Float.round(decimal_places(dividend))
end
defp decimal_places(float) do
string = float |> Float.to_string()
{start, _} = :binary.match(string, ".")
byte_size(string) - start - 1
end
@doc """
Adds a number to another number.
iex> Solid.Filter.plus(4, 2)
6
iex> Solid.Filter.plus(16, 4)
20
iex> Solid.Filter.plus("16", 4)
20
iex> Solid.Filter.plus(183.357, 12)
195.357
iex> Solid.Filter.plus("183.357", 12)
195.357
iex> Solid.Filter.plus("183.ABC357", 12)
nil
"""
@spec plus(number, number) :: number
def plus(input, number) when is_number(input), do: input + number
def plus(input, number) when is_binary(input) do
try do
plus(String.to_integer(input), number)
rescue
ArgumentError ->
plus(String.to_float(input), number)
end
rescue
ArgumentError -> nil
end
def plus(_input, number), do: number
@doc """
Adds the specified string to the beginning of another string.
iex> Solid.Filter.prepend("/index.html", "www.example.com")
"www.example.com/index.html"
"""
@spec prepend(any, any) :: String.t()
def prepend(input, string), do: "#{string}#{input}"
@doc """
Removes every occurrence of the specified substring from a string.
iex> Solid.Filter.remove("I strained to see the train through the rain", "rain")
"I sted to see the t through the "
"""
@spec remove(String.t(), String.t()) :: String.t()
def remove(input, string) do
String.replace(input, string, "")
end
@doc """
Removes only the first occurrence of the specified substring from a string.
iex> Solid.Filter.remove_first("I strained to see the train through the rain", "rain")
"I sted to see the train through the rain"
"""
@spec remove_first(String.t(), String.t()) :: String.t()
def remove_first(input, string) do
String.replace(input, string, "", global: false)
end
@doc """
Replaces every occurrence of an argument in a string with the second argument.
iex> Solid.Filter.replace("Take my protein pills and put my helmet on", "my", "your")
"Take your protein pills and put your helmet on"
"""
@spec replace(String.t(), String.t(), String.t()) :: String.t()
def replace(input, string, replacement \\ "") do
input |> to_string |> String.replace(string, replacement)
end
@doc """
Replaces only the first occurrence of the first argument in a string with the second argument.
iex> Solid.Filter.replace_first("Take my protein pills and put my helmet on", "my", "your")
"Take your protein pills and put my helmet on"
"""
@spec replace_first(String.t(), String.t(), String.t()) :: String.t()
def replace_first(input, string, replacement \\ "") do
input |> to_string |> String.replace(string, replacement, global: false)
end
@doc """
Reverses the order of the items in an array. reverse cannot reverse a string.
iex> Solid.Filter.reverse(["a", "b", "c"])
["c", "b", "a"]
"""
@spec reverse(list) :: List.t()
def reverse(input), do: Enum.reverse(input)
@doc """
Rounds an input number to the nearest integer or,
if a number is specified as an argument, to that number of decimal places.
iex> Solid.Filter.round(1.2)
1
iex> Solid.Filter.round(2.7)
3
iex> Solid.Filter.round(183.357, 2)
183.36
"""
@spec round(number) :: integer
def round(input, precision \\ nil)
def round(input, nil), do: Kernel.round(input)
def round(input, precision) do
p = :math.pow(10, precision)
Kernel.round(input * p) / p
end
@doc """
Removes all whitespace (tabs, spaces, and newlines) from the right side of a string.
iex> Solid.Filter.rstrip(" So much room for activities! ")
" So much room for activities!"
"""
@spec rstrip(String.t()) :: String.t()
def rstrip(input), do: String.trim_trailing(input)
@doc """
Returns the number of characters in a string or the number of items in an array.
iex> Solid.Filter.size("Ground control to Major Tom.")
28
iex> Solid.Filter.size(~w(ground control to Major Tom.))
5
"""
@spec size(String.t() | list) :: non_neg_integer
def size(input) when is_list(input), do: Enum.count(input)
def size(input), do: String.length(input)
@doc """
Returns a substring of 1 character beginning at the index specified by the argument passed in.
An optional second argument specifies the length of the substring to be returned.
String indices are numbered starting from 0.
iex> Solid.Filter.slice("Liquid", 0)
"L"
iex> Solid.Filter.slice("Liquid", 2)
"q"
iex> Solid.Filter.slice("Liquid", 2, 5)
"quid"
iex> Solid.Filter.slice("Liquid", -3, 2)
"ui"
"""
@spec slice(String.t(), integer, non_neg_integer | nil) :: String.t()
def slice(input, offset, length \\ nil)
def slice(input, offset, nil), do: String.at(input, offset)
def slice(input, offset, length), do: String.slice(input, offset, length)
@doc """
Sorts items in an array by a property of an item in the array. The order of the sorted array is case-sensitive.
iex> Solid.Filter.sort(~w(zebra octopus giraffe SallySnake))
~w(SallySnake giraffe octopus zebra)
"""
@spec sort(List.t()) :: List.t()
def sort(input), do: Enum.sort(input)
@doc """
Sorts items in an array by a property of an item in the array. The order of the sorted array is case-sensitive.
iex> Solid.Filter.sort_natural(~w(zebra octopus giraffe SallySnake))
~w(giraffe octopus SallySnake zebra)
"""
@spec sort_natural(List.t()) :: List.t()
def sort_natural(input) do
Enum.sort(input, &(String.downcase(&1) <= String.downcase(&2)))
end
@doc """
Removes all whitespace (tabs, spaces, and newlines) from both the left and right side of a string.
It does not affect spaces between words.
iex> Solid.Filter.strip(" So much room for activities! ")
"So much room for activities!"
"""
@spec strip(String.t()) :: String.t()
def strip(input), do: String.trim(input)
@doc """
Multiplies a number by another number.
iex> Solid.Filter.times(3, 2)
6
iex> Solid.Filter.times(24, 7)
168
iex> Solid.Filter.times(183.357, 12)
2200.284
"""
@spec times(number, number) :: number
def times(input, operand), do: input * operand
@doc """
truncate shortens a string down to the number of characters passed as a parameter.
If the number of characters specified is less than the length of the string, an ellipsis (…) is appended to the string
and is included in the character count.
iex> Solid.Filter.truncate("Ground control to Major Tom.", 20)
"Ground control to..."
# Custom ellipsis
truncate takes an optional second parameter that specifies the sequence of characters to be appended to the truncated string.
By default this is an ellipsis (…), but you can specify a different sequence.
The length of the second parameter counts against the number of characters specified by the first parameter.
For example, if you want to truncate a string to exactly 10 characters, and use a 3-character ellipsis,
use 13 for the first parameter of truncate, since the ellipsis counts as 3 characters.
iex> Solid.Filter.truncate("Ground control to Major Tom.", 25, ", and so on")
"Ground control, and so on"
# No ellipsis
You can truncate to the exact number of characters specified by the first parameter
and show no trailing characters by passing a blank string as the second parameter:
iex> Solid.Filter.truncate("Ground control to Major Tom.", 20, "")
"Ground control to Ma"
"""
@spec truncate(String.t(), non_neg_integer, String.t()) :: String.t()
def truncate(input, length, ellipsis \\ "...") do
if String.length(input) > length do
length = max(0, length - String.length(ellipsis))
slice(input, 0, length) <> ellipsis
else
input
end
end
@doc """
Shortens a string down to the number of words passed as the argument.
If the specified number of words is less than the number of words in the string, an ellipsis (…) is appended to the string.
iex> Solid.Filter.truncatewords("Ground control to Major Tom.", 3)
"Ground control to..."
# Custom ellipsis
`truncatewords` takes an optional second parameter that specifies the sequence of characters to be appended to the truncated string.
By default this is an ellipsis (…), but you can specify a different sequence.
iex> Solid.Filter.truncatewords("Ground control to Major Tom.", 3, "--")
"Ground control to--"
# No ellipsis
You can avoid showing trailing characters by passing a blank string as the second parameter:
iex> Solid.Filter.truncatewords("Ground control to Major Tom.", 3, "")
"Ground control to"
"""
@spec truncatewords(nil | String.t(), non_neg_integer, String.t()) :: String.t()
def truncatewords(input, max_words, ellipsis \\ "...")
def truncatewords(nil, _max_words, _ellipsis), do: ""
def truncatewords(input, max_words, ellipsis) do
words = String.split(input, " ")
if length(words) > max_words do
Enum.take(words, max_words)
|> Enum.intersperse(" ")
|> to_string
|> Kernel.<>(ellipsis)
end
end
@doc """
Removes any duplicate elements in an array.
Output
iex> Solid.Filter.uniq(~w(ants bugs bees bugs ants))
~w(ants bugs bees)
"""
@spec uniq(list) :: list
def uniq(input), do: Enum.uniq(input)
@doc """
Removes any newline characters (line breaks) from a string.
Output
iex> Solid.Filter.strip_newlines("Test \\ntext\\r\\n with line breaks.")
"Test text with line breaks."
iex> Solid.Filter.strip_newlines([[["Test \\ntext\\r\\n with "] | "line breaks."]])
"Test text with line breaks."
"""
@spec strip_newlines(iodata()) :: String.t()
def strip_newlines(iodata) do
binary = IO.iodata_to_binary(iodata)
pattern = :binary.compile_pattern(["\r\n", "\n"])
String.replace(binary, pattern, "")
end
@doc """
Replaces every newline in a string with an HTML line break (<br />).
Output
iex> Solid.Filter.newline_to_br("Test \\ntext\\r\\n with line breaks.")
"Test <br />\\ntext<br />\\r\\n with line breaks."
iex> Solid.Filter.newline_to_br([[["Test \\ntext\\r\\n with "] | "line breaks."]])
"Test <br />\\ntext<br />\\r\\n with line breaks."
"""
@spec newline_to_br(iodata()) :: String.t()
def newline_to_br(iodata) do
binary = IO.iodata_to_binary(iodata)
pattern = :binary.compile_pattern(["\r\n", "\n"])
String.replace(binary, pattern, fn x -> "<br />#{x}" end)
end
@doc """
Creates an array including only the objects with a given property value,
or any truthy value by default.
Output
iex> input = [
...> %{"id" => 1, "type" => "kitchen"},
...> %{"id" => 2, "type" => "bath"},
...> %{"id" => 3, "type" => "kitchen"}
...> ]
iex> Solid.Filter.where(input, "type", "kitchen")
[%{"id" => 1, "type" => "kitchen"}, %{"id" => 3, "type" => "kitchen"}]
iex> input = [
...> %{"id" => 1, "available" => true},
...> %{"id" => 2, "type" => false},
...> %{"id" => 3, "available" => true}
...> ]
iex> Solid.Filter.where(input, "available")
[%{"id" => 1, "available" => true}, %{"id" => 3, "available" => true}]
"""
@spec where(list, String.t(), String.t()) :: list
def where(input, key, value) do
for %{} = map <- input, map[key] == value, do: map
end
@spec where(list, String.t()) :: list
def where(input, key) do
for %{} = map <- input, Map.has_key?(map, key), do: map
end
@doc """
Removes any HTML tags from a string.
This mimics the regex based approach of the ruby library.
Output
iex> Solid.Filter.strip_html("Have <em>you</em> read <strong>Ulysses</strong>?")
"Have you read Ulysses?"
"""
@html_blocks ~r{(<script.*?</script>)|(<!--.*?-->)|(<style.*?</style>)}m
@html_tags ~r|<.*?>|m
@spec strip_html(iodata()) :: String.t()
def strip_html(iodata) do
iodata
|> IO.iodata_to_binary()
|> String.replace(@html_blocks, "")
|> String.replace(@html_tags, "")
end
@doc """
URL encodes the string.
Output
iex> Solid.Filter.url_encode("<EMAIL>")
"john%40liquid.com"
iex> Solid.Filter.url_encode("Tetsuro Takara")
"Tetsuro+Takara"
"""
def url_encode(iodata) do
iodata
|> IO.iodata_to_binary()
|> URI.encode_www_form()
end
@doc """
URL decodes the string.
Output
iex> Solid.Filter.url_decode("%27Stop%21%27+said+Fred")
"'Stop!' said Fred"
"""
def url_decode(iodata) do
iodata
|> IO.iodata_to_binary()
|> URI.decode_www_form()
end
@doc """
HTML encodes the string.
Output
iex> Solid.Filter.escape("Have you read 'James & the Giant Peach'?")
"Have you read 'James & the Giant Peach'?"
"""
@spec escape(iodata()) :: String.t()
def escape(iodata) do
iodata
|> IO.iodata_to_binary()
|> Solid.HTML.html_escape()
end
@doc """
HTML encodes the string without encoding already encoded characters again.
This mimics the regex based approach of the ruby library.
Output
"1 < 2 & 3"
iex> Solid.Filter.escape_once("1 < 2 & 3")
"1 < 2 & 3"
"""
@escape_once_regex ~r{["><']|&(?!([a-zA-Z]+|(#\d+));)}
@spec escape_once(iodata()) :: String.t()
def escape_once(iodata) do
iodata
|> IO.iodata_to_binary()
|> String.replace(@escape_once_regex, &Solid.HTML.replacements/1)
end
end
|
lib/solid/filter.ex
| 0.888036 | 0.592107 |
filter.ex
|
starcoder
|
defmodule Every do
@moduledoc """
Every gives you ability to use `Process.send_after/3` with
intervals which can be rounded to every:
1. Minute,
2. N minutes,
3. Hour,
4. N Hours,
5. Day.
Every function accepts an optional `relative_to` parameter, which can be used
to fake the current moment in time. If it is not provided, the current time
will be used.
**Note:** All functions return the difference in milliseconds!
"""
@doc """
Calculates how many milliseconds left until the next minute starts.
## Examples
iex> now = Timex.parse!("2018-10-14T16:48:12.000Z", "{ISO:Extended}")
iex> Every.minute(now)
48_000
"""
def minute(relative_to \\ Timex.now()) do
(60 - relative_to.second) * 1000
end
@doc """
Calculates how many milliseconds left until the next interval (minutes) will be
reached.
## Examples
iex> now = Timex.parse!("2018-10-14T16:48:12.000Z", "{ISO:Extended}")
iex> Every.minutes(5, now) # 16:50 > 15:50:00 - 16:48:12
108_000
"""
def minutes(interval, relative_to \\ Timex.now())
def minutes(interval, relative_to) do
minutes_until_next_interval = next_interval(relative_to.minute, interval)
{microseconds, _precision} = relative_to.microsecond
relative_to
|> Timex.shift(seconds: -relative_to.second)
|> Timex.shift(microseconds: -microseconds)
|> Timex.shift(minutes: minutes_until_next_interval)
|> Timex.diff(relative_to, :milliseconds)
end
@doc """
Calculates how many seconds left until the next hour starts.
## Examples
iex> now = Timex.parse!("2018-10-14T16:48:12.000Z", "{ISO:Extended}")
iex> Every.hour(now)
708_000
"""
def hour(relative_to \\ Timex.now())
def hour(relative_to) do
{microseconds, _precision} = relative_to.microsecond
relative_to
|> Timex.shift(seconds: -relative_to.second)
|> Timex.shift(microseconds: -microseconds)
|> Timex.shift(minutes: -relative_to.minute)
|> Timex.shift(hours: 1)
|> Timex.diff(relative_to, :milliseconds)
end
@doc """
Calculates how many milliseconds left until the next interval (hours) will be
reached.
## Examples
iex> now = Timex.parse!("2018-10-14T16:48:12.000Z", "{ISO:Extended}")
iex> Every.hours(2, now)
4_308_000
"""
def hours(interval, relative_to \\ Timex.now())
def hours(interval, relative_to) do
hours_until_next_interval = next_interval(relative_to.hour, interval)
{microseconds, _precision} = relative_to.microsecond
relative_to
|> Timex.shift(seconds: -relative_to.second)
|> Timex.shift(microseconds: -microseconds)
|> Timex.shift(minutes: -relative_to.minute)
|> Timex.shift(hours: hours_until_next_interval)
|> Timex.diff(relative_to, :milliseconds)
end
@doc """
Calculates how many milliseconds left until the next day starts.
## Examples
iex> now = Timex.parse!("2018-10-14T16:48:12.000Z", "{ISO:Extended}")
iex> Every.day(now) # Time remaining 7h 25m 48s
25_908_000
"""
def day(relative_to \\ Timex.now())
def day(relative_to) do
relative_to
|> Timex.shift(days: 1)
|> Timex.beginning_of_day()
|> Timex.diff(relative_to, :milliseconds)
end
defp next_interval(value, round_value) do
# Uses `rem` function to get remainder for value
# then calculates next step value, for example
# value=48, round_value=15
# then the result will look like
# 15 - (48%15) = 12
round_value - rem(value, round_value)
end
end
|
lib/every.ex
| 0.905589 | 0.644442 |
every.ex
|
starcoder
|
defmodule AWS.S3Control do
@moduledoc """
AWS S3 Control provides access to Amazon S3 control plane operations.
"""
@doc """
Creates an access point and associates it with the specified bucket.
For more information, see [Managing Data Access with Amazon S3 Access Points](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html) in
the *Amazon Simple Storage Service Developer Guide*.
## Using this action with Amazon S3 on Outposts
This action:
* Requires a virtual private cloud (VPC) configuration as S3 on
Outposts only supports VPC style access points.
* Does not support ACL on S3 on Outposts buckets.
* Does not support Public Access on S3 on Outposts buckets.
* Does not support object lock for S3 on Outposts buckets.
For more information, see [Using Amazon S3 on Outposts](AmazonS3/latest/dev/S3onOutposts.html) in the *Amazon Simple Storage
Service Developer Guide *.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateAccessPoint.html#API_control_CreateAccessPoint_Examples)
section below.
The following actions are related to `CreateAccessPoint`:
*
[GetAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html) *
[DeleteAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPoint.html)
*
[ListAccessPoints](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_ListAccessPoints.html)
"""
def create_access_point(client, name, input, options \\ []) do
path_ = "/v20180820/accesspoint/#{URI.encode(name)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
This API operation creates an Amazon S3 on Outposts bucket.
To create an S3 bucket, see [Create Bucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
in the *Amazon Simple Storage Service API*.
Creates a new Outposts bucket. By creating the bucket, you become the bucket
owner. To create an Outposts bucket, you must have S3 on Outposts. For more
information, see [Using Amazon S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
*Amazon Simple Storage Service Developer Guide*.
Not every string is an acceptable bucket name. For information on bucket naming
restrictions, see [Working with Amazon S3 Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules).
S3 on Outposts buckets do not support
* ACLs. Instead, configure access point policies to manage access to
buckets.
* Public access.
* Object Lock
* Bucket Location constraint
For an example of the request syntax for Amazon S3 on Outposts that uses the S3
on Outposts endpoint hostname prefix and outpost-id in your API request, see the
[
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateBucket.html#API_control_CreateBucket_Examples)
section below.
The following actions are related to `CreateBucket` for Amazon S3 on Outposts:
*
[PutObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) *
[GetBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucket.html)
*
[DeleteBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html) *
[CreateAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateAccessPoint.html)
*
[PutAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutAccessPointPolicy.html)
"""
def create_bucket(client, bucket, input, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}"
{headers, input} =
[
{"ACL", "x-amz-acl"},
{"GrantFullControl", "x-amz-grant-full-control"},
{"GrantRead", "x-amz-grant-read"},
{"GrantReadACP", "x-amz-grant-read-acp"},
{"GrantWrite", "x-amz-grant-write"},
{"GrantWriteACP", "x-amz-grant-write-acp"},
{"ObjectLockEnabledForBucket", "x-amz-bucket-object-lock-enabled"},
{"OutpostId", "x-amz-outpost-id"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
S3 Batch Operations performs large-scale Batch Operations on Amazon S3 objects.
Batch Operations can run a single operation or action on lists of Amazon S3
objects that you specify. For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service Developer Guide*.
This operation creates a S3 Batch Operations job.
Related actions include:
*
[DescribeJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) *
[ListJobs](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html)
*
[UpdateJobPriority](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def create_job(client, input, options \\ []) do
path_ = "/v20180820/jobs"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the
ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPoint.html#API_control_DeleteAccessPoint_Examples)
section below.
The following actions are related to `DeleteAccessPoint`:
*
[CreateAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) *
[GetAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html)
*
[ListAccessPoints](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html)
"""
def delete_access_point(client, name, input, options \\ []) do
path_ = "/v20180820/accesspoint/#{URI.encode(name)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the access point policy for the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPointPolicy.html#API_control_DeleteAccessPointPolicy_Examples)
section below.
The following actions are related to `DeleteAccessPointPolicy`:
*
[PutAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html) *
[GetAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html)
"""
def delete_access_point_policy(client, name, input, options \\ []) do
path_ = "/v20180820/accesspoint/#{URI.encode(name)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
This API operation deletes an Amazon S3 on Outposts bucket.
To delete an S3 bucket, see
[DeleteBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) in the *Amazon Simple Storage Service API*.
Deletes the Amazon S3 on Outposts bucket. All objects (including all object
versions and delete markers) in the bucket must be deleted before the bucket
itself can be deleted. For more information, see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
*Amazon Simple Storage Service Developer Guide*.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html#API_control_DeleteBucket_Examples)
section below.
## Related Resources
*
[CreateBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) *
[GetBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucket.html)
*
[DeleteObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
"""
def delete_bucket(client, bucket, input, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
This API action deletes an Amazon S3 on Outposts bucket's lifecycle
configuration.
To delete an S3 bucket's lifecycle configuration, see
[DeleteBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) in the *Amazon Simple Storage Service API*.
Deletes the lifecycle configuration from the specified Outposts bucket. Amazon
S3 on Outposts removes all the lifecycle configuration rules in the lifecycle
subresource associated with the bucket. Your objects never expire, and Amazon S3
on Outposts no longer automatically deletes any objects on the basis of rules
contained in the deleted lifecycle configuration. For more information, see
[Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
*Amazon Simple Storage Service Developer Guide*.
To use this operation, you must have permission to perform the
`s3outposts:DeleteLifecycleConfiguration` action. By default, the bucket owner
has this permission and the Outposts bucket owner can grant this permission to
others.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucketLifecycleConfiguration.html#API_control_DeleteBucketLifecycleConfiguration_Examples)
section below.
For more information about object expiration, see [ Elements to Describe Lifecycle
Actions](https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions).
Related actions include:
*
[PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html) *
[GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html)
"""
def delete_bucket_lifecycle_configuration(client, bucket, input, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/lifecycleconfiguration"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
This API operation deletes an Amazon S3 on Outposts bucket policy.
To delete an S3 bucket policy, see
[DeleteBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketPolicy.html) in the *Amazon Simple Storage Service API*.
This implementation of the DELETE operation uses the policy subresource to
delete the policy of a specified Amazon S3 on Outposts bucket. If you are using
an identity other than the root user of the AWS account that owns the bucket,
the calling identity must have the `s3outposts:DeleteBucketPolicy` permissions
on the specified Outposts bucket and belong to the bucket owner's account to use
this operation. For more information, see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
*Amazon Simple Storage Service Developer Guide*.
If you don't have `DeleteBucketPolicy` permissions, Amazon S3 returns a `403
Access Denied` error. If you have the correct permissions, but you're not using
an identity that belongs to the bucket owner's account, Amazon S3 returns a `405
Method Not Allowed` error.
As a security precaution, the root user of the AWS account that owns a bucket
can always use this operation, even if the policy explicitly denies the root
user the ability to perform this action.
For more information about bucket policies, see [Using Bucket Policies and User Policies](
https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucketPolicy.html#API_control_DeleteBucketPolicy_Examples)
section below.
The following actions are related to `DeleteBucketPolicy`:
*
[GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html) *
[PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketPolicy.html)
"""
def delete_bucket_policy(client, bucket, input, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
This API operation deletes an Amazon S3 on Outposts bucket's tags.
To delete an S3 bucket tags, see
[DeleteBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) in the *Amazon Simple Storage Service API*.
Deletes the tags from the Outposts bucket. For more information, see [Using
Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
*Amazon Simple Storage Service Developer Guide*.
To use this operation, you must have permission to perform the
`PutBucketTagging` action. By default, the bucket owner has this permission and
can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucketTagging.html#API_control_DeleteBucketTagging_Examples)
section below.
The following actions are related to `DeleteBucketTagging`:
*
[GetBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html) *
[PutBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html)
"""
def delete_bucket_tagging(client, bucket, input, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Removes the entire tag set from the specified S3 Batch Operations job.
To use this operation, you must have permission to perform the
`s3:DeleteJobTagging` action. For more information, see [Controlling access and labeling jobs using
tags](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags)
in the *Amazon Simple Storage Service Developer Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[GetJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html)
*
[PutJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html)
"""
def delete_job_tagging(client, job_id, input, options \\ []) do
path_ = "/v20180820/jobs/#{URI.encode(job_id)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Removes the `PublicAccessBlock` configuration for an AWS account.
For more information, see [ Using Amazon S3 block public access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html).
Related actions include:
*
[GetPublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html) *
[PutPublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html)
"""
def delete_public_access_block(client, input, options \\ []) do
path_ = "/v20180820/configuration/publicAccessBlock"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves the configuration parameters and status for a Batch Operations job.
For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service Developer Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[ListJobs](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html)
*
[UpdateJobPriority](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def describe_job(client, job_id, account_id, options \\ []) do
path_ = "/v20180820/jobs/#{URI.encode(job_id)}"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns configuration information about the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetAccessPoint.html#API_control_GetAccessPoint_Examples)
section below.
The following actions are related to `GetAccessPoint`:
*
[CreateAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) *
[DeleteAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html)
*
[ListAccessPoints](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html)
"""
def get_access_point(client, name, account_id, options \\ []) do
path_ = "/v20180820/accesspoint/#{URI.encode(name)}"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the access point policy associated with the specified access point.
The following actions are related to `GetAccessPointPolicy`:
*
[PutAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html) *
[DeleteAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html)
"""
def get_access_point_policy(client, name, account_id, options \\ []) do
path_ = "/v20180820/accesspoint/#{URI.encode(name)}/policy"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Indicates whether the specified access point currently has a policy that allows
public access.
For more information about public access through access points, see [Managing Data Access with Amazon S3 Access
Points](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html) in
the *Amazon Simple Storage Service Developer Guide*.
"""
def get_access_point_policy_status(client, name, account_id, options \\ []) do
path_ = "/v20180820/accesspoint/#{URI.encode(name)}/policyStatus"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets an Amazon S3 on Outposts bucket.
For more information, see [ Using Amazon S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
the *Amazon Simple Storage Service Developer Guide*.
The following actions are related to `GetBucket` for Amazon S3 on Outposts:
*
[PutObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) *
[CreateBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateBucket.html)
*
[DeleteBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html)
"""
def get_bucket(client, bucket, account_id, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This API operation gets an Amazon S3 on Outposts bucket's lifecycle
configuration.
To get an S3 bucket's lifecycle configuration, see
[GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) in the *Amazon Simple Storage Service API*.
Returns the lifecycle configuration information set on the Outposts bucket. For
more information, see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) and
for information about lifecycle configuration, see [ Object Lifecycle Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
in *Amazon Simple Storage Service Developer Guide*.
To use this operation, you must have permission to perform the
`s3outposts:GetLifecycleConfiguration` action. The Outposts bucket owner has
this permission, by default. The bucket owner can grant this permission to
others. For more information about permissions, see [Permissions Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucketLifecycleConfiguration.html#API_control_GetBucketLifecycleConfiguration_Examples)
section below.
`GetBucketLifecycleConfiguration` has the following special error:
* Error code: `NoSuchLifecycleConfiguration`
* Description: The lifecycle configuration does not
exist.
* HTTP Status Code: 404 Not Found
* SOAP Fault Code Prefix: Client
The following actions are related to `GetBucketLifecycleConfiguration`:
*
[PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html) *
[DeleteBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html)
"""
def get_bucket_lifecycle_configuration(client, bucket, account_id, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/lifecycleconfiguration"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This API action gets a bucket policy for an Amazon S3 on Outposts bucket.
To get a policy for an S3 bucket, see
[GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html) in the *Amazon Simple Storage Service API*.
Returns the policy of a specified Outposts bucket. For more information, see
[Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
the *Amazon Simple Storage Service Developer Guide*.
If you are using an identity other than the root user of the AWS account that
owns the bucket, the calling identity must have the `GetBucketPolicy`
permissions on the specified bucket and belong to the bucket owner's account in
order to use this operation.
If you don't have `s3outposts:GetBucketPolicy` permissions, Amazon S3 returns a
`403 Access Denied` error. If you have the correct permissions, but you're not
using an identity that belongs to the bucket owner's account, Amazon S3 returns
a `405 Method Not Allowed` error.
As a security precaution, the root user of the AWS account that owns a bucket
can always use this operation, even if the policy explicitly denies the root
user the ability to perform this action.
For more information about bucket policies, see [Using Bucket Policies and User Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucketPolicy.html#API_control_GetBucketPolicy_Examples)
section below.
The following actions are related to `GetBucketPolicy`:
*
[GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) *
[PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html)
*
[DeleteBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html)
"""
def get_bucket_policy(client, bucket, account_id, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/policy"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This API operation gets an Amazon S3 on Outposts bucket's tags.
To get an S3 bucket tags, see
[GetBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) in the *Amazon Simple Storage Service API*.
Returns the tag set associated with the Outposts bucket. For more information,
see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
the *Amazon Simple Storage Service Developer Guide*.
To use this operation, you must have permission to perform the
`GetBucketTagging` action. By default, the bucket owner has this permission and
can grant this permission to others.
`GetBucketTagging` has the following special error:
* Error code: `NoSuchTagSetError`
* Description: There is no tag set associated with the
bucket.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucketTagging.html#API_control_GetBucketTagging_Examples)
section below.
The following actions are related to `GetBucketTagging`:
*
[PutBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html) *
[DeleteBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html)
"""
def get_bucket_tagging(client, bucket, account_id, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/tagging"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the tags on an S3 Batch Operations job.
To use this operation, you must have permission to perform the
`s3:GetJobTagging` action. For more information, see [Controlling access and labeling jobs using
tags](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags)
in the *Amazon Simple Storage Service Developer Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[PutJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html)
*
[DeleteJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html)
"""
def get_job_tagging(client, job_id, account_id, options \\ []) do
path_ = "/v20180820/jobs/#{URI.encode(job_id)}/tagging"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves the `PublicAccessBlock` configuration for an AWS account.
For more information, see [ Using Amazon S3 block public access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html).
Related actions include:
*
[DeletePublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html) *
[PutPublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html)
"""
def get_public_access_block(client, account_id, options \\ []) do
path_ = "/v20180820/configuration/publicAccessBlock"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of the access points currently associated with the specified
bucket.
You can retrieve up to 1000 access points per call. If the specified bucket has
more than 1,000 access points (or the number specified in `maxResults`,
whichever is less), the response will include a continuation token that you can
use to list the additional access points.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetAccessPoint.html#API_control_GetAccessPoint_Examples)
section below.
The following actions are related to `ListAccessPoints`:
*
[CreateAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) *
[DeleteAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html)
*
[GetAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html)
"""
def list_access_points(client, bucket \\ nil, max_results \\ nil, next_token \\ nil, account_id, options \\ []) do
path_ = "/v20180820/accesspoint"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(bucket) do
[{"bucket", bucket} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists current S3 Batch Operations jobs and jobs that have ended within the last
30 days for the AWS account making the request.
For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service Developer Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[DescribeJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html)
*
[UpdateJobPriority](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def list_jobs(client, job_statuses \\ nil, max_results \\ nil, next_token \\ nil, account_id, options \\ []) do
path_ = "/v20180820/jobs"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(job_statuses) do
[{"jobStatuses", job_statuses} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of all Outposts buckets in an Outposts that are owned by the
authenticated sender of the request.
For more information, see [Using Amazon S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
the *Amazon Simple Storage Service Developer Guide*.
For an example of the request syntax for Amazon S3 on Outposts that uses the S3
on Outposts endpoint hostname prefix and outpost-id in your API request, see the
[
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_ListRegionalBuckets.html#API_control_ListRegionalBuckets_Examples)
section below.
"""
def list_regional_buckets(client, max_results \\ nil, next_token \\ nil, account_id, outpost_id \\ nil, options \\ []) do
path_ = "/v20180820/bucket"
headers = []
headers = if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
headers = if !is_nil(outpost_id) do
[{"x-amz-outpost-id", outpost_id} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Associates an access policy with the specified access point.
Each access point can have only one policy, so a request made to this API
replaces any existing policy associated with the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutAccessPointPolicy.html#API_control_PutAccessPointPolicy_Examples)
section below.
The following actions are related to `PutAccessPointPolicy`:
*
[GetAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html) *
[DeleteAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html)
"""
def put_access_point_policy(client, name, input, options \\ []) do
path_ = "/v20180820/accesspoint/#{URI.encode(name)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
This API action puts a lifecycle configuration to an Amazon S3 on Outposts
bucket.
To put a lifecycle configuration to an S3 bucket, see
[PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) in the *Amazon Simple Storage Service API*.
Creates a new lifecycle configuration for the Outposts bucket or replaces an
existing lifecycle configuration. Outposts buckets can only support a lifecycle
that deletes objects after a certain period of time. For more information, see
[Managing Lifecycle Permissions for Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketLifecycleConfiguration.html#API_control_PutBucketLifecycleConfiguration_Examples)
section below.
The following actions are related to `PutBucketLifecycleConfiguration`:
*
[GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html) *
[DeleteBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html)
"""
def put_bucket_lifecycle_configuration(client, bucket, input, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/lifecycleconfiguration"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
This API action puts a bucket policy to an Amazon S3 on Outposts bucket.
To put a policy on an S3 bucket, see
[PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html) in the *Amazon Simple Storage Service API*.
Applies an Amazon S3 bucket policy to an Outposts bucket. For more information,
see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in
the *Amazon Simple Storage Service Developer Guide*.
If you are using an identity other than the root user of the AWS account that
owns the Outposts bucket, the calling identity must have the `PutBucketPolicy`
permissions on the specified Outposts bucket and belong to the bucket owner's
account in order to use this operation.
If you don't have `PutBucketPolicy` permissions, Amazon S3 returns a `403 Access
Denied` error. If you have the correct permissions, but you're not using an
identity that belongs to the bucket owner's account, Amazon S3 returns a `405
Method Not Allowed` error.
As a security precaution, the root user of the AWS account that owns a bucket
can always use this operation, even if the policy explicitly denies the root
user the ability to perform this action.
For more information about bucket policies, see [Using Bucket Policies and User Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketPolicy.html#API_control_PutBucketPolicy_Examples)
section below.
The following actions are related to `PutBucketPolicy`:
*
[GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html) *
[DeleteBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html)
"""
def put_bucket_policy(client, bucket, input, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
{"ConfirmRemoveSelfBucketAccess", "x-amz-confirm-remove-self-bucket-access"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
This API action puts tags on an Amazon S3 on Outposts bucket.
To put tags on an S3 bucket, see
[PutBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) in the *Amazon Simple Storage Service API*.
Sets the tags for an Outposts bucket. For more information, see [Using Amazon S3
on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
in the *Amazon Simple Storage Service Developer Guide*.
Use tags to organize your AWS bill to reflect your own cost structure. To do
this, sign up to get your AWS account bill with tag key values included. Then,
to see the cost of combined resources, organize your billing information
according to resources with the same tag key values. For example, you can tag
several resources with a specific application name, and then organize your
billing information to see the total cost of that application across several
services. For more information, see [Cost Allocation and Tagging](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html).
Within a bucket, if you add a tag that has the same key as an existing tag, the
new value overwrites the old value. For more information, see [Using Cost Allocation in Amazon S3 Bucket
Tags](https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html).
To use this operation, you must have permissions to perform the
`s3outposts:PutBucketTagging` action. The Outposts bucket owner has this
permission by default and can grant this permission to others. For more
information about permissions, see [ Permissions Related to Bucket Subresource Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
`PutBucketTagging` has the following special errors:
* Error code: `InvalidTagError`
* Description: The tag provided was not a valid tag.
This error can occur if the tag did not pass input validation. For information
about tag restrictions, see [ User-Defined Tag Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
and [ AWS-Generated Cost Allocation Tag Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html).
* Error code: `MalformedXMLError`
* Description: The XML provided does not match the
schema.
* Error code: `OperationAbortedError `
* Description: A conflicting conditional operation is
currently in progress against this resource. Try again.
* Error code: `InternalError`
* Description: The service was unable to apply the
provided tag to the bucket.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of outpost-id to be passed with the request and an S3 on
Outposts endpoint hostname prefix instead of s3-control. For an example of the
request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint
hostname prefix and the outpost-id derived using the access point ARN, see the [
Example](https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketTagging.html#API_control_PutBucketTagging_Examples)
section below.
The following actions are related to `PutBucketTagging`:
*
[GetBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html) *
[DeleteBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html)
"""
def put_bucket_tagging(client, bucket, input, options \\ []) do
path_ = "/v20180820/bucket/#{URI.encode(bucket)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets the supplied tag-set on an S3 Batch Operations job.
A tag is a key-value pair. You can associate S3 Batch Operations tags with any
job by sending a PUT request against the tagging subresource that is associated
with the job. To modify the existing tag set, you can either replace the
existing tag set entirely, or make changes within the existing tag set by
retrieving the existing tag set using
[GetJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html), modify that tag set, and use this API action to replace the tag set with the one
you modified. For more information, see [Controlling access and labeling jobs
using
tags](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags)
in the *Amazon Simple Storage Service Developer Guide*.
* If you send this request with an empty tag set, Amazon S3 deletes
the existing tag set on the Batch Operations job. If you use this method, you
are charged for a Tier 1 Request (PUT). For more information, see [Amazon S3 pricing](http://aws.amazon.com/s3/pricing/).
* For deleting existing tags for your Batch Operations job, a
[DeleteJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html) request is preferred because it achieves the same result without incurring
charges.
* A few things to consider about using tags:
* Amazon S3 limits the maximum number of tags to 50 tags
per job.
* You can associate up to 50 tags with a job as long as
they have unique tag keys.
* A tag key can be up to 128 Unicode characters in
length, and tag values can be up to 256 Unicode characters in length.
* The key and values are case sensitive.
* For tagging-related restrictions related to characters
and encodings, see [User-Defined Tag
Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
in the *AWS Billing and Cost Management User Guide*.
To use this operation, you must have permission to perform the
`s3:PutJobTagging` action.
Related actions include:
*
[CreatJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[GetJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html)
*
[DeleteJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html)
"""
def put_job_tagging(client, job_id, input, options \\ []) do
path_ = "/v20180820/jobs/#{URI.encode(job_id)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Creates or modifies the `PublicAccessBlock` configuration for an AWS account.
For more information, see [ Using Amazon S3 block public access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html).
Related actions include:
*
[GetPublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html) *
[DeletePublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html)
"""
def put_public_access_block(client, input, options \\ []) do
path_ = "/v20180820/configuration/publicAccessBlock"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Updates an existing S3 Batch Operations job's priority.
For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service Developer Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[ListJobs](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html)
*
[DescribeJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def update_job_priority(client, job_id, input, options \\ []) do
path_ = "/v20180820/jobs/#{URI.encode(job_id)}/priority"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"Priority", "priority"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the status for the specified job.
Use this operation to confirm that you want to run a job or to cancel an
existing job. For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service Developer Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[ListJobs](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html)
*
[DescribeJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def update_job_status(client, job_id, input, options \\ []) do
path_ = "/v20180820/jobs/#{URI.encode(job_id)}/status"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"RequestedJobStatus", "requestedJobStatus"},
{"StatusUpdateReason", "statusUpdateReason"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "s3"}
account_id = :proplists.get_value("x-amz-account-id", headers)
host = build_host(account_id, "s3-control", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "text/xml"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_account_id, _endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_account_id, _endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(:undefined, _endpoint_prefix, _client) do
raise "missing account_id"
end
defp build_host(account_id, endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{account_id}.#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :xml) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :xml)
end
end
|
lib/aws/generated/s3_control.ex
| 0.88865 | 0.462473 |
s3_control.ex
|
starcoder
|
defmodule AWS.ServiceDiscovery do
@moduledoc """
AWS Cloud Map lets you configure public DNS, private DNS, or HTTP namespaces
that your microservice applications run in.
When an instance of the service becomes available, you can call the AWS Cloud
Map API to register the instance with AWS Cloud Map. For public or private DNS
namespaces, AWS Cloud Map automatically creates DNS records and an optional
health check. Clients that submit public or private DNS queries, or HTTP
requests, for the service receive an answer that contains up to eight healthy
records.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "ServiceDiscovery",
api_version: "2017-03-14",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "servicediscovery",
global?: false,
protocol: "json",
service_id: "ServiceDiscovery",
signature_version: "v4",
signing_name: "servicediscovery",
target_prefix: "Route53AutoNaming_v20170314"
}
end
@doc """
Creates an HTTP namespace.
Service instances that you register using an HTTP namespace can be discovered
using a `DiscoverInstances` request but can't be discovered using DNS.
For the current quota on the number of namespaces that you can create using the
same AWS account, see [AWS Cloud Map quotas](https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
in the *AWS Cloud Map Developer Guide*.
"""
def create_http_namespace(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateHttpNamespace", input, options)
end
@doc """
Creates a private namespace based on DNS, which will be visible only inside a
specified Amazon VPC.
The namespace defines your service naming scheme. For example, if you name your
namespace `example.com` and name your service `backend`, the resulting DNS name
for the service will be `backend.example.com`. For the current quota on the
number of namespaces that you can create using the same AWS account, see [AWS Cloud Map
Limits](https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
in the *AWS Cloud Map Developer Guide*.
"""
def create_private_dns_namespace(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePrivateDnsNamespace", input, options)
end
@doc """
Creates a public namespace based on DNS, which will be visible on the internet.
The namespace defines your service naming scheme. For example, if you name your
namespace `example.com` and name your service `backend`, the resulting DNS name
for the service will be `backend.example.com`. For the current quota on the
number of namespaces that you can create using the same AWS account, see [AWS Cloud Map
Limits](https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
in the *AWS Cloud Map Developer Guide*.
"""
def create_public_dns_namespace(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePublicDnsNamespace", input, options)
end
@doc """
Creates a service, which defines the configuration for the following entities:
* For public and private DNS namespaces, one of the following
combinations of DNS records in Amazon Route 53:
* `A`
* `AAAA`
* `A` and `AAAA`
* `SRV`
* `CNAME`
* Optionally, a health check
After you create the service, you can submit a
[RegisterInstance](https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html) request, and AWS Cloud Map uses the values in the configuration to create the
specified entities.
For the current quota on the number of instances that you can register using the
same namespace and using the same service, see [AWS Cloud Map
Limits](https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
in the *AWS Cloud Map Developer Guide*.
"""
def create_service(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateService", input, options)
end
@doc """
Deletes a namespace from the current account.
If the namespace still contains one or more services, the request fails.
"""
def delete_namespace(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteNamespace", input, options)
end
@doc """
Deletes a specified service.
If the service still contains one or more registered instances, the request
fails.
"""
def delete_service(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteService", input, options)
end
@doc """
Deletes the Amazon Route 53 DNS records and health check, if any, that AWS Cloud
Map created for the specified instance.
"""
def deregister_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterInstance", input, options)
end
@doc """
Discovers registered instances for a specified namespace and service.
You can use `DiscoverInstances` to discover instances for any type of namespace.
For public and private DNS namespaces, you can also use DNS queries to discover
instances.
"""
def discover_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DiscoverInstances", input, options)
end
@doc """
Gets information about a specified instance.
"""
def get_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetInstance", input, options)
end
@doc """
Gets the current health status (`Healthy`, `Unhealthy`, or `Unknown`) of one or
more instances that are associated with a specified service.
There is a brief delay between when you register an instance and when the health
status for the instance is available.
"""
def get_instances_health_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetInstancesHealthStatus", input, options)
end
@doc """
Gets information about a namespace.
"""
def get_namespace(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetNamespace", input, options)
end
@doc """
Gets information about any operation that returns an operation ID in the
response, such as a `CreateService` request.
To get a list of operations that match specified criteria, see
[ListOperations](https://docs.aws.amazon.com/cloud-map/latest/api/API_ListOperations.html).
"""
def get_operation(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetOperation", input, options)
end
@doc """
Gets the settings for a specified service.
"""
def get_service(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetService", input, options)
end
@doc """
Lists summary information about the instances that you registered by using a
specified service.
"""
def list_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListInstances", input, options)
end
@doc """
Lists summary information about the namespaces that were created by the current
AWS account.
"""
def list_namespaces(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListNamespaces", input, options)
end
@doc """
Lists operations that match the criteria that you specify.
"""
def list_operations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListOperations", input, options)
end
@doc """
Lists summary information for all the services that are associated with one or
more specified namespaces.
"""
def list_services(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListServices", input, options)
end
@doc """
Lists tags for the specified resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Creates or updates one or more records and, optionally, creates a health check
based on the settings in a specified service.
When you submit a `RegisterInstance` request, the following occurs:
* For each DNS record that you define in the service that is
specified by `ServiceId`, a record is created or updated in the hosted zone that
is associated with the corresponding namespace.
* If the service includes `HealthCheckConfig`, a health check is
created based on the settings in the health check configuration.
* The health check, if any, is associated with each of the new or
updated records.
One `RegisterInstance` request must complete before you can submit another
request and specify the same service ID and instance ID.
For more information, see
[CreateService](https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html). When AWS Cloud Map receives a DNS query for the specified DNS name, it returns
the applicable value:
* **If the health check is healthy**: returns all the records
* **If the health check is unhealthy**: returns the applicable value
for the last healthy instance
* **If you didn't specify a health check configuration**: returns
all the records
For the current quota on the number of instances that you can register using the
same namespace and using the same service, see [AWS Cloud Map
Limits](https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
in the *AWS Cloud Map Developer Guide*.
"""
def register_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterInstance", input, options)
end
@doc """
Adds one or more tags to the specified resource.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes one or more tags from the specified resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Submits a request to change the health status of a custom health check to
healthy or unhealthy.
You can use `UpdateInstanceCustomHealthStatus` to change the status only for
custom health checks, which you define using `HealthCheckCustomConfig` when you
create a service. You can't use it to change the status for Route 53 health
checks, which you define using `HealthCheckConfig`.
For more information, see
[HealthCheckCustomConfig](https://docs.aws.amazon.com/cloud-map/latest/api/API_HealthCheckCustomConfig.html).
"""
def update_instance_custom_health_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateInstanceCustomHealthStatus", input, options)
end
@doc """
Submits a request to perform the following operations:
* Update the TTL setting for existing `DnsRecords` configurations
* Add, update, or delete `HealthCheckConfig` for a specified service
You can't add, update, or delete a `HealthCheckCustomConfig` configuration.
For public and private DNS namespaces, note the following:
* If you omit any existing `DnsRecords` or `HealthCheckConfig`
configurations from an `UpdateService` request, the configurations are deleted
from the service.
* If you omit an existing `HealthCheckCustomConfig` configuration
from an `UpdateService` request, the configuration is not deleted from the
service.
When you update settings for a service, AWS Cloud Map also updates the
corresponding settings in all the records and health checks that were created by
using the specified service.
"""
def update_service(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateService", input, options)
end
end
|
lib/aws/generated/service_discovery.ex
| 0.925382 | 0.466785 |
service_discovery.ex
|
starcoder
|
defmodule Verk.SortedSet do
@moduledoc """
This module interacts with the jobs on a sorted set
"""
import Verk.Dsl
alias Verk.Job
@requeue_now_script Verk.Scripts.sha("requeue_job_now")
@doc """
Counts how many jobs are inside the sorted set
"""
@spec count(String.t, GenServer.server) :: {:ok, integer} | {:error, Redix.Error.t}
def count(key, redis) do
Redix.command(redis, ["ZCARD", key])
end
@doc """
Counts how many jobs are inside the sorted set, raising if there's an error
"""
@spec count!(String.t, GenServer.server) :: integer
def count!(key, redis) do
bangify(count(key, redis))
end
@doc """
Clears the sorted set
It will return `{:ok, true}` if the sorted set was cleared and `{:ok, false}` otherwise
An error tuple may be returned if Redis failed
"""
@spec clear(String.t, GenServer.server) :: {:ok, boolean} | {:error, Redix.Error.t}
def clear(key, redis) do
case Redix.command(redis, ["DEL", key]) do
{:ok, 0} -> {:ok, false}
{:ok, 1} -> {:ok, true}
{:error, error} -> {:error, error}
end
end
@doc """
Clears the sorted set, raising if there's an error
It will return `true` if the sorted set was cleared and `false` otherwise
"""
@spec clear!(String.t, GenServer.server) :: boolean
def clear!(key, redis) do
bangify(clear(key, redis))
end
@doc """
Lists jobs from `start` to `stop`
"""
@spec range(String.t, integer, integer, GenServer.server) :: {:ok, [Verk.Job.T]} | {:error, Redix.Error.t}
def range(key, start \\ 0, stop \\ -1, redis) do
case Redix.command(redis, ["ZRANGE", key, start, stop]) do
{:ok, jobs} -> {:ok, (for job <- jobs, do: Job.decode!(job))}
{:error, error} -> {:error, error}
end
end
@doc """
Lists jobs from `start` to `stop`, raising if there's an error
"""
@spec range!(String.t, integer, integer, GenServer.server) :: nil
def range!(key, start \\ 0, stop \\ -1, redis) do
bangify(range(key, start, stop, redis))
end
@doc """
Lists jobs from `start` to `stop` along with the item scores
"""
@spec range_with_score(String.t, integer, integer, GenServer.server)
:: {:ok, [{Verk.Job.T, integer}]} | {:error, Redix.Error.t}
def range_with_score(key, start \\ 0, stop \\ -1, redis) do
case Redix.command(redis, ["ZRANGE", key, start, stop, "WITHSCORES"]) do
{:ok, jobs} ->
# The Redis returned list alternates, [<job>, <job score>, ...].
jobs_with_scores =
jobs
|> Enum.chunk(2)
|> Enum.into([], fn [job, score] -> {Job.decode!(job), String.to_integer(score)} end)
{:ok, jobs_with_scores}
{:error, error} -> {:error, error}
end
end
@doc """
Lists jobs from `start` to `stop` along with the item scores, raising if there's an error
"""
@spec range_with_score!(String.t, integer, integer, GenServer.server) :: nil
def range_with_score!(key, start \\ 0, stop \\ -1, redis) do
bangify(range_with_score(key, start, stop, redis))
end
@doc """
Deletes the job from the sorted set
It returns `{:ok, true}` if the job was found and deleted
Otherwise it returns `{:ok, false}`
An error tuple may be returned if Redis failed
"""
@spec delete_job(String.t, %Job{} | String.t, GenServer.server) :: {:ok, boolean} | {:error, Redix.Error.t}
def delete_job(key, %Job{original_json: original_json}, redis) do
delete_job(key, original_json, redis)
end
def delete_job(key, original_json, redis) do
case Redix.command(redis, ["ZREM", key, original_json]) do
{:ok, 0} -> {:ok, false}
{:ok, 1} -> {:ok, true}
{:error, error} -> {:error, error}
end
end
@doc """
Deletes the job from the sorted set, raising if there's an error
It returns `true` if the job was found and delete
Otherwise it returns `false`
"""
@spec delete_job!(String.t, %Job{} | String.t, GenServer.server) :: boolean
def delete_job!(key, %Job{original_json: original_json}, redis) do
delete_job!(key, original_json, redis)
end
def delete_job!(key, original_json, redis) do
bangify(delete_job(key, original_json, redis))
end
@doc """
Moves the job from the sorted set back to its original queue
It returns `{:ok, true}` if the job was found and requeued
Otherwise it returns `{:ok, false}`
An error tuple may be returned if Redis failed
"""
@spec requeue_job(String.t, %Job{} | String.t, GenServer.server) :: {:ok, boolean} | {:error, Redix.Error.t}
def requeue_job(key, %Job{original_json: original_json}, redis) do
requeue_job(key, original_json, redis)
end
def requeue_job(key, original_json, redis) do
case Redix.command(redis, ["EVALSHA", @requeue_now_script, 1, key, original_json]) do
{:ok, nil} -> {:ok, false}
{:ok, _job} -> {:ok, true}
{:error, %Redix.Error{message: message}} ->
{:error, message}
error ->
{:error, error}
end
end
@doc """
Moves the job from the sorted set back to its original queue, raising if there's an error
It returns `true` if the job was found and requeued
Otherwise it returns `false`
"""
@spec requeue_job!(String.t, %Job{} | String.t, GenServer.server) :: boolean
def requeue_job!(key, %Job{original_json: original_json}, redis) do
requeue_job!(key, original_json, redis)
end
def requeue_job!(key, original_json, redis) do
bangify(requeue_job(key, original_json, redis))
end
end
|
lib/verk/sorted_set.ex
| 0.839471 | 0.476275 |
sorted_set.ex
|
starcoder
|
defmodule Chess.Square do
@moduledoc """
Square module
"""
@x_lines ["a", "b", "c", "d", "e", "f", "g", "h"]
@y_lines [1, 2, 7, 8]
alias Chess.{Figure, Position}
@doc """
Creates 32 figures for new game and puts them to specific squares
## Examples
iex> Chess.Square.prepare_for_new_game()
[
a1: %Chess.Figure{color: "w", type: "r"},
b1: %Chess.Figure{color: "w", type: "n"},
...
]
"""
def prepare_for_new_game() do
do_line()
|> List.flatten()
|> Enum.map(fn {x, y} -> create_figure_for_square(x, y) end)
|> Enum.reverse()
end
# create list of square names
defp do_line, do: Enum.reduce(@y_lines, [], fn y, acc -> [do_line(y) | acc] end)
defp do_line(y), do: Enum.reduce(@x_lines, [], fn x, acc -> [{x, y} | acc] end)
# create figure for square
defp create_figure_for_square(x, y) do
color = choose_color(y)
type = choose_type(x, y)
{
:"#{x}#{y}",
Figure.new(color, type)
}
end
# choose color based on x_line
defp choose_color(line) when line <= 4, do: "w"
defp choose_color(line) when line >= 5, do: "b"
# choose type based on y_line
defp choose_type(_, y) when y == 2 or y == 7, do: "p"
# choose type based on x_line
defp choose_type(x, _) do
cond do
x in ["a", "h"] -> "r"
x in ["b", "g"] -> "n"
x in ["c", "f"] -> "b"
x == "d" -> "q"
x == "e" -> "k"
end
end
@doc """
Creates figures for new game from existed position
## Examples
iex> Chess.Square.prepare_from_position(position)
[
a1: %Chess.Figure{color: "w", type: "r"},
b1: %Chess.Figure{color: "w", type: "n"},
...
]
"""
def prepare_from_position(%Position{position: position}) do
position
|> String.split("/", trim: true)
|> Stream.with_index()
|> parse_lines()
|> List.flatten()
end
defp parse_lines(lines) do
Enum.reduce(lines, [], fn {line, index}, acc ->
{result, _} = parse_line(line, index)
[result | acc]
end)
end
defp parse_line(line, index) do
line
|> String.codepoints()
|> add_figures(index)
end
defp add_figures(figures, index) do
Enum.reduce(figures, {[], 0}, fn x, {squares, inline_index} ->
case Integer.parse(x) do
:error -> {[add_figure(x, inline_index, 8 - index) | squares], inline_index + 1}
{number, _} -> {squares, inline_index + number}
end
end)
end
defp add_figure(x, x_index, y_line) do
type = String.downcase(x)
color = if type == x, do: "b", else: "w"
{
:"#{Enum.at(@x_lines, x_index)}#{y_line}",
Figure.new(color, type)
}
end
end
|
lib/chess/square.ex
| 0.830903 | 0.528047 |
square.ex
|
starcoder
|
defmodule ResxBase.Decoder do
@moduledoc """
Decode data resources from a RFC 4648 encoding.
### Decoding
The type of decoding is specified by using the `:encoding` option.
Resx.Resource.transform(resource, ResxBase.Decoder, encoding: :base64)
The list of available decoding formats to choose from are:
* `:base16` - By default this works the same as `Base.decode16/1`.
Optionally the case can be specified using the `:case` option, this can
be either `:lower` (for lowercase input) or `:upper` (for uppercase input)
or `:mixed` (for case-insensitive input).
* `:base32` - By default this works the same as `Base.encode32/1`.
* `:base64` - By default this works the same as `Base.encode64/1`.
* `:hex32` - By default this works the same as `Base.hex_encode32/1`.
* `:url64` - By default this works the same as `Base.url_encode64/1`.
All decodings also take the configuration options specified in `ResxBase`.
### Streams
Streamed data is expected to be made up of individual complete encoding
sequences. Where each encoding is decoded as-is in the stream.
e.g. If you had the encoded data `"aGVsbG8=IA==d29ybGQ="` this would be
decoded to: `"hello world"`. However if it was a stream consisting of
`["aGVsbG8=", "IA==", "d29ybGQ="]`, it would be decoded as:
`["hello", " ", "world"]`.
"""
use Resx.Transformer
alias Resx.Resource.Content
defmodule DecodingError do
defexception [:message, :resource, :data, :options]
@impl Exception
def exception({ resource, data, options }) do
%DecodingError{
message: "failed to decode the resource with a #{inspect options[:encoding]} decoder",
resource: resource,
data: data,
options: options
}
end
end
@impl Resx.Transformer
def transform(resource, opts) do
decode = case opts[:encoding] do
:base16 ->
case opts[:case] || :upper do
:lower -> &ResxBase.decode16_lower(&1, opts)
:mixed -> &ResxBase.decode16_upper(String.upcase(&1), opts)
:upper -> &ResxBase.decode16_upper(&1, opts)
end
|> decoder(opts)
base32 when base32 in [:base32, :hex32] ->
decoding_opts = opts ++ [pad_chr: "="]
case base32 do
:base32 -> &ResxBase.decode32(&1, decoding_opts)
:hex32 -> &ResxBase.hex_decode32(&1, decoding_opts)
end
|> decoder(decoding_opts)
base64 when base64 in [:base64, :url64] ->
decoding_opts = opts ++ [pad_chr: "="]
case base64 do
:base64 -> &ResxBase.decode64(&1, decoding_opts)
:url64 -> &ResxBase.url_decode64(&1, decoding_opts)
end
|> decoder(decoding_opts)
encoding -> fn _ -> { :error, { :internal, "Unknown encoding format: #{inspect(encoding)}" } } end
end
decode.(resource)
end
defp decoder(fun, opts) do
fn resource = %{ content: content } ->
content = Content.Stream.new(content)
data = Stream.map(content, fn data ->
case fun.(data) do
{ :ok, data } -> data
:error -> raise DecodingError, { resource, data, opts }
end
end)
{ :ok, %{ resource | content: %{ content | data: data } } }
end
end
end
|
lib/resx_base/decoder.ex
| 0.935391 | 0.557062 |
decoder.ex
|
starcoder
|
defmodule TableRex.Renderer.Text do
@moduledoc """
Renderer module which handles outputting ASCII-style tables for display.
"""
alias TableRex.Cell
alias TableRex.Table
alias TableRex.Renderer.Text.Meta
@behaviour TableRex.Renderer
# horizontal_styles: [:all, :header, :frame:, :off]
# vertical_styles: [:all, :frame, :off]
# Which horizontal/vertical styles render a specific separator.
@render_horizontal_frame_styles [:all, :frame, :header]
@render_vertical_frame_styles [:all, :frame]
@render_column_separators_styles [:all]
@render_row_separators_styles [:all]
@doc """
Provides a level of sane defaults for the Text rendering module.
"""
def default_options do
%{
horizontal_style: :header,
vertical_style: :all,
horizontal_symbol: "-",
vertical_symbol: "|",
intersection_symbol: "+",
top_frame_symbol: "-",
title_separator_symbol: "-",
header_separator_symbol: "-",
bottom_frame_symbol: "-"
}
end
@doc """
Implementation of the TableRex.Renderer behaviour.
Available styling options.
`horizontal_styles` controls horizontal separators and can be one of:
* `:all`: display separators between and around every row.
* `:header`: display outer and header horizontal separators only.
* `:frame`: display outer horizontal separators only.
* `:off`: display no horizontal separators.
`vertical_styles` controls vertical separators and can be one of:
* `:all`: display between and around every column.
* `:frame`: display outer vertical separators only.
* `:off`: display no vertical separators.
"""
def render(table = %Table{}, opts) do
{col_widths, row_heights} = max_dimensions(table)
# Calculations that would otherwise be carried out multiple times are done once and their
# results are stored in the %Meta{} struct which is then passed through the pipeline.
render_horizontal_frame? = opts[:horizontal_style] in @render_horizontal_frame_styles
render_vertical_frame? = opts[:vertical_style] in @render_vertical_frame_styles
render_column_separators? = opts[:vertical_style] in @render_column_separators_styles
render_row_separators? = opts[:horizontal_style] in @render_row_separators_styles
table_width = table_width(col_widths, vertical_frame?: render_vertical_frame?)
intersections = intersections(table_width, col_widths, vertical_style: opts[:vertical_style])
meta = %Meta{
col_widths: col_widths,
row_heights: row_heights,
table_width: table_width,
intersections: intersections,
render_horizontal_frame?: render_horizontal_frame?,
render_vertical_frame?: render_vertical_frame?,
render_column_separators?: render_column_separators?,
render_row_separators?: render_row_separators?
}
rendered =
{table, meta, opts, []}
|> render_top_frame
|> render_title
|> render_title_separator
|> render_header
|> render_header_separator
|> render_rows
|> render_bottom_frame
|> render_to_string
{:ok, rendered}
end
defp render_top_frame({table, %Meta{render_horizontal_frame?: false} = meta, opts, rendered}) do
{table, meta, opts, rendered}
end
defp render_top_frame({%Table{title: title} = table, meta, opts, rendered})
when is_binary(title) do
intersections = if meta.render_vertical_frame?, do: [0, meta.table_width - 1], else: []
line =
render_line(
meta.table_width,
intersections,
opts[:top_frame_symbol],
opts[:intersection_symbol]
)
{table, meta, opts, [line | rendered]}
end
defp render_top_frame({table, meta, opts, rendered}) do
line =
render_line(
meta.table_width,
meta.intersections,
opts[:top_frame_symbol],
opts[:intersection_symbol]
)
{table, meta, opts, [line | rendered]}
end
defp render_title({%Table{title: nil} = table, meta, opts, rendered}) do
{table, meta, opts, rendered}
end
defp render_title({%Table{title: title} = table, meta, opts, rendered}) do
inner_width = Meta.inner_width(meta)
line = do_render_cell(title, inner_width)
line =
if meta.render_vertical_frame? do
line |> frame_with(opts[:vertical_symbol])
else
line
end
{table, meta, opts, [line | rendered]}
end
defp render_title_separator({%Table{title: nil} = table, meta, opts, rendered}) do
{table, meta, opts, rendered}
end
defp render_title_separator(
{table, meta, %{horizontal_style: horizontal_style} = opts, rendered}
)
when horizontal_style in [:all, :header] do
line =
render_line(
meta.table_width,
meta.intersections,
opts[:title_separator_symbol],
opts[:intersection_symbol]
)
{table, meta, opts, [line | rendered]}
end
defp render_title_separator({table, %Meta{render_vertical_frame?: true} = meta, opts, rendered}) do
line = render_line(meta.table_width, [0, meta.table_width - 1], " ", opts[:vertical_symbol])
{table, meta, opts, [line | rendered]}
end
defp render_title_separator(
{table, %Meta{render_vertical_frame?: false} = meta, opts, rendered}
) do
{table, meta, opts, ["" | rendered]}
end
defp render_header({%Table{header_row: []} = table, meta, opts, rendered}) do
{table, meta, opts, rendered}
end
defp render_header({%Table{header_row: header_row} = table, meta, opts, rendered}) do
separator = if meta.render_column_separators?, do: opts[:vertical_symbol], else: " "
line = render_cell_row(table, meta, header_row, separator)
line =
if meta.render_vertical_frame? do
line |> frame_with(opts[:vertical_symbol])
else
line
end
{table, meta, opts, [line | rendered]}
end
defp render_header_separator({%Table{header_row: []} = table, meta, opts, rendered}) do
{table, meta, opts, rendered}
end
defp render_header_separator(
{table, meta, %{horizontal_style: horizontal_style} = opts, rendered}
)
when horizontal_style in [:all, :header] do
line =
render_line(
meta.table_width,
meta.intersections,
opts[:header_separator_symbol],
opts[:intersection_symbol]
)
{table, meta, opts, [line | rendered]}
end
defp render_header_separator(
{table, %Meta{render_vertical_frame?: true} = meta, opts, rendered}
) do
line = render_line(meta.table_width, [0, meta.table_width - 1], " ", opts[:vertical_symbol])
{table, meta, opts, [line | rendered]}
end
defp render_header_separator(
{table, %Meta{render_vertical_frame?: false} = meta, opts, rendered}
) do
{table, meta, opts, ["" | rendered]}
end
defp render_rows({%Table{rows: rows} = table, meta, opts, rendered}) do
separator = if meta.render_column_separators?, do: opts[:vertical_symbol], else: " "
lines = Enum.map(rows, &render_cell_row(table, meta, &1, separator))
lines =
if meta.render_vertical_frame? do
Enum.map(lines, &frame_with(&1, opts[:vertical_symbol]))
else
lines
end
lines =
if meta.render_row_separators? do
row_separator =
render_line(
meta.table_width,
meta.intersections,
opts[:horizontal_symbol],
opts[:intersection_symbol]
)
Enum.intersperse(lines, row_separator)
else
lines
end
rendered = lines ++ rendered
{table, meta, opts, rendered}
end
defp render_bottom_frame({table, %Meta{render_horizontal_frame?: false} = meta, opts, rendered}) do
{table, meta, opts, rendered}
end
defp render_bottom_frame({table, meta, opts, rendered}) do
line =
render_line(
meta.table_width,
meta.intersections,
opts[:bottom_frame_symbol],
opts[:intersection_symbol]
)
{table, meta, opts, [line | rendered]}
end
defp render_line(table_width, intersections, separator_symbol, intersection_symbol) do
for n <- 0..(table_width - 1) do
if n in intersections, do: intersection_symbol, else: separator_symbol
end
|> Enum.join()
end
defp render_cell_row(%Table{} = table, %Meta{} = meta, row, separator) do
row
|> Enum.with_index()
|> Enum.map(&render_cell(table, meta, &1))
|> Enum.intersperse(separator)
|> Enum.join()
end
defp render_cell(%Table{} = table, %Meta{} = meta, {%Cell{} = cell, col_index}) do
col_width = Meta.col_width(meta, col_index)
col_padding = Table.get_column_meta(table, col_index, :padding)
cell_align = Map.get(cell, :align) || Table.get_column_meta(table, col_index, :align)
cell_color = Map.get(cell, :color) || Table.get_column_meta(table, col_index, :color)
do_render_cell(cell.rendered_value, col_width, col_padding, align: cell_align)
|> format_with_color(cell.rendered_value, cell_color)
end
defp do_render_cell(value, inner_width) do
do_render_cell(value, inner_width, 0, align: :center)
end
defp do_render_cell(value, inner_width, _padding, align: :center) do
value_len = String.length(strip_ansi_color_codes(value))
post_value = ((inner_width - value_len) / 2) |> round
pre_value = inner_width - (post_value + value_len)
String.duplicate(" ", pre_value) <> value <> String.duplicate(" ", post_value)
end
defp do_render_cell(value, inner_width, padding, align: align) do
value_len = String.length(strip_ansi_color_codes(value))
alt_side_padding = inner_width - value_len - padding
{pre_value, post_value} =
case align do
:left ->
{padding, alt_side_padding}
:right ->
{alt_side_padding, padding}
end
String.duplicate(" ", pre_value) <> value <> String.duplicate(" ", post_value)
end
defp intersections(_table_width, _col_widths, vertical_style: :off), do: []
defp intersections(table_width, _col_widths, vertical_style: :frame) do
[0, table_width - 1]
|> Enum.into(MapSet.new())
end
defp intersections(table_width, col_widths, vertical_style: :all) do
col_widths = ordered_col_widths(col_widths)
inner_intersections =
Enum.reduce(col_widths, [0], fn x, [acc_h | _] = acc ->
[acc_h + x + 1 | acc]
end)
([0, table_width - 1] ++ inner_intersections)
|> Enum.into(MapSet.new())
end
defp max_dimensions(%Table{} = table) do
{col_widths, row_heights} =
[table.header_row | table.rows]
|> Enum.with_index()
|> Enum.reduce({%{}, %{}}, &reduce_row_maximums(table, &1, &2))
num_columns = map_size(col_widths)
# Infer padding on left and right of title
title_padding =
[0, num_columns - 1]
|> Enum.map(&Table.get_column_meta(table, &1, :padding))
|> Enum.sum()
# Compare table body width with title width
col_separators_widths = num_columns - 1
body_width = (col_widths |> Map.values() |> Enum.sum()) + col_separators_widths
title_width = if(is_nil(table.title), do: 0, else: String.length(table.title)) + title_padding
# Add extra padding equally to all columns if required to match body and title width.
revised_col_widths =
if body_width >= title_width do
col_widths
else
extra_padding = ((title_width - body_width) / num_columns) |> Float.ceil() |> round
Enum.into(col_widths, %{}, fn {k, v} -> {k, v + extra_padding} end)
end
{revised_col_widths, row_heights}
end
defp reduce_row_maximums(%Table{} = table, {row, row_index}, {col_widths, row_heights}) do
row
|> Enum.with_index()
|> Enum.reduce({col_widths, row_heights}, &reduce_cell_maximums(table, &1, &2, row_index))
end
defp reduce_cell_maximums(
%Table{} = table,
{cell, col_index},
{col_widths, row_heights},
row_index
) do
padding = Table.get_column_meta(table, col_index, :padding)
{width, height} = content_dimensions(cell.rendered_value, padding)
col_widths = Map.update(col_widths, col_index, width, &Enum.max([&1, width]))
row_heights = Map.update(row_heights, row_index, height, &Enum.max([&1, height]))
{col_widths, row_heights}
end
defp content_dimensions(value, padding) when is_binary(value) and is_number(padding) do
lines =
value
|> strip_ansi_color_codes()
|> String.split("\n")
height = Enum.count(lines)
width = Enum.max(lines) |> String.length()
{width + padding * 2, height}
end
defp table_width(%{} = col_widths, vertical_frame?: vertical_frame?) do
width =
col_widths
|> Map.values()
|> Enum.intersperse(1)
|> Enum.sum()
if vertical_frame?, do: width + 2, else: width
end
defp ordered_col_widths(%{} = col_widths) do
col_widths
|> Enum.into([])
|> Enum.sort()
|> Enum.map(&elem(&1, 1))
end
defp frame_with(string, frame) do
frame <> string <> frame
end
defp render_to_string({_, _, _, rendered_lines}) when is_list(rendered_lines) do
rendered_lines
|> Enum.map(&String.trim_trailing/1)
|> Enum.reverse()
|> Enum.join("\n")
|> Kernel.<>("\n")
end
defp format_with_color(text, _, nil), do: text
defp format_with_color(text, value, color) when is_function(color) do
[color.(text, value) | IO.ANSI.reset()]
|> IO.ANSI.format_fragment(true)
end
defp format_with_color(text, _, color) do
[[color | text] | IO.ANSI.reset()]
|> IO.ANSI.format_fragment(true)
end
defp strip_ansi_color_codes(text) do
Regex.replace(~r|\e\[\d+m|u, text, "")
end
end
|
lib/table_rex/renderer/text.ex
| 0.825765 | 0.516291 |
text.ex
|
starcoder
|
defmodule Crontab.CronExpression do
@moduledoc """
The `Crontab.CronExpression` module / struct.
"""
alias Crontab.CronExpression.Parser
@type t :: %Crontab.CronExpression{
extended: boolean,
reboot: boolean,
second: [value(second)],
minute: [value(minute)],
hour: [value(hour)],
day: [value(day)],
month: [value(month)],
weekday: [value(weekday)],
year: [value(year)]
}
@type interval :: :second | :minute | :hour | :day | :month | :weekday | :year
@typedoc deprecated: "Use Crontab.CronExpression.min_max/1 instead"
@type min_max :: {:-, time_unit, time_unit}
@type min_max(time_unit) :: {:-, time_unit, time_unit}
@type value ::
value(Calendar.second())
| value(Calendar.minute())
| value(Calendar.hour())
| value(Calendar.day())
| value(Calendar.month())
| value(Calendar.day_of_week())
| value(Calendar.year())
@type value(time_unit) ::
time_unit
| :*
| :L
| {:L, value(time_unit)}
| {:/,
time_unit
| :*
| min_max(time_unit), pos_integer}
| min_max(time_unit)
| {:W, time_unit | :L}
@typedoc deprecated: "Use Calendar.second/0 instead"
@type second :: Calendar.second()
@typedoc deprecated: "Use Calendar.minute/0 instead"
@type minute :: Calendar.minute()
@typedoc deprecated: "Use Calendar.hour/0 instead"
@type hour :: Calendar.hour()
@typedoc deprecated: "Use Calendar.day/0 instead"
@type day :: Calendar.day()
@typedoc deprecated: "Use Calendar.month/0 instead"
@type month :: Calendar.month()
@typedoc deprecated: "Use Calendar.day_of_week/0 instead"
@type weekday :: Calendar.day_of_week()
@typedoc deprecated: "Use Calendar.year/0 instead"
@type year :: Calendar.year()
@typedoc deprecated: "Use Calendar.[second|minute|hour|day|month|day_of_week|year]/0 instead"
@type time_unit :: second | minute | hour | day | month | weekday | year
@type condition(name, time_unit) :: {name, [value(time_unit)]}
@type condition ::
condition(:second, Calendar.second())
| condition(:minute, Calendar.minute())
| condition(:hour, Calendar.hour())
| condition(:day, Calendar.day())
| condition(:month, Calendar.month())
| condition(:weekday, Calendar.day_of_week())
| condition(:year, Calendar.year())
@type condition_list :: [condition]
@doc """
Defines the Cron interval.
* * * * * * *
| | | | | | |
| | | | | | +-- :year Year (range: 1900-3000)
| | | | | +---- :weekday Day of the Week (range: 1-7, 1 standing for Monday)
| | | | +------ :month Month of the Year (range: 1-12)
| | | +-------- :day Day of the Month (range: 1-31)
| | +---------- :hour Hour (range: 0-23)
| +------------ :minute Minute (range: 0-59)
+-------------- :second Second (range: 0-59)
The `:extended` attribute defines if the second is taken into account.
"""
defstruct extended: false,
reboot: false,
second: [:*],
minute: [:*],
hour: [:*],
day: [:*],
month: [:*],
weekday: [:*],
year: [:*]
@doc """
Create a `%Crontab.CronExpression{}` via sigil.
## Examples
iex> ~e[*]
%Crontab.CronExpression{
extended: false,
second: [:*],
minute: [:*],
hour: [:*],
day: [:*],
month: [:*],
weekday: [:*],
year: [:*]}
iex> ~e[*]e
%Crontab.CronExpression{
extended: true,
second: [:*],
minute: [:*],
hour: [:*],
day: [:*],
month: [:*],
weekday: [:*],
year: [:*]}
iex> ~e[1 2 3 4 5 6 7]e
%Crontab.CronExpression{
extended: true,
second: [1],
minute: [2],
hour: [3],
day: [4],
month: [5],
weekday: [6],
year: [7]}
"""
@spec sigil_e(binary, charlist) :: t
def sigil_e(cron_expression, options)
def sigil_e(cron_expression, [?e]), do: Parser.parse!(cron_expression, true)
def sigil_e(cron_expression, _options), do: Parser.parse!(cron_expression, false)
@doc """
Convert `Crontab.CronExpression` struct to tuple List.
## Examples
iex> Crontab.CronExpression.to_condition_list %Crontab.CronExpression{
...> minute: [1], hour: [2], day: [3], month: [4], weekday: [5], year: [6]}
[ {:minute, [1]},
{:hour, [2]},
{:day, [3]},
{:month, [4]},
{:weekday, [5]},
{:year, [6]}]
iex> Crontab.CronExpression.to_condition_list %Crontab.CronExpression{
...> extended: true, second: [0], minute: [1], hour: [2], day: [3], month: [4], weekday: [5], year: [6]}
[ {:second, [0]},
{:minute, [1]},
{:hour, [2]},
{:day, [3]},
{:month, [4]},
{:weekday, [5]},
{:year, [6]}]
"""
@spec to_condition_list(t) :: condition_list
def to_condition_list(interval = %__MODULE__{extended: false}) do
[
{:minute, interval.minute},
{:hour, interval.hour},
{:day, interval.day},
{:month, interval.month},
{:weekday, interval.weekday},
{:year, interval.year}
]
end
def to_condition_list(interval = %__MODULE__{}) do
[{:second, interval.second} | to_condition_list(%{interval | extended: false})]
end
defimpl Inspect do
alias Crontab.CronExpression
alias Crontab.CronExpression.Composer
@doc """
Pretty print Cron expressions.
## Examples
iex> IO.inspect %Crontab.CronExpression{}
~e[* * * * * *]
iex> import Crontab.CronExpression
iex> IO.inspect %Crontab.CronExpression{extended: true}
~e[* * * * * * *]e
"""
@spec inspect(CronExpression.t(), any) :: String.t()
def inspect(cron_expression = %CronExpression{extended: false}, _options) do
"~e[" <> Composer.compose(cron_expression) <> "]"
end
def inspect(cron_expression = %CronExpression{extended: true}, _options) do
"~e[" <> Composer.compose(cron_expression) <> "]e"
end
end
end
|
lib/crontab/cron_expression.ex
| 0.914601 | 0.615203 |
cron_expression.ex
|
starcoder
|
require Record
defmodule JOSE.JWE do
@moduledoc ~S"""
JWE stands for JSON Web Encryption which is defined in [RFC 7516](https://tools.ietf.org/html/rfc7516).
## Key Derivation Algorithms
The following key derivation algorithms for the `"alg"` field are currently supported by `JOSE.JWE` (some may need the `JOSE.crypto_fallback/1` option to be enabled):
* `"A128GCMKW"`
* `"A192GCMKW"`
* `"A256GCMKW"`
* `"A128KW"`
* `"A192KW"`
* `"A256KW"`
* `"dir"`
* `"ECDH-ES"`
* `"ECDH-ES+A128KW"`
* `"ECDH-ES+A192KW"`
* `"ECDH-ES+A256KW"`
* `"PBES2-HS256+A128KW"`
* `"PBES2-HS384+A192KW"`
* `"PBES2-HS512+A256KW"`
* `"RSA1_5"`
* `"RSA-OAEP"`
* `"RSA-OAEP-256"`
## Encryption Algorithms
The following encryption algorithms for the `"enc"` field are currently supported by `JOSE.JWE` (some may need the `JOSE.crypto_fallback/1` option to be enabled):
* `"A128CBC-HS256"`
* `"A192CBC-HS384"`
* `"A256CBC-HS512"`
* `"A128GCM"`
* `"A192GCM"`
* `"A256GCM"`
* `"ChaCha20/Poly1305"`
## Compression Algorithms
The following compression algorithms for the `"zip"` field are currently supported by `JOSE.JWE`:
* `"DEF"`
## Key Derivation Examples
All of the examples below will use `"enc"` set to `"A128GCM"`, `"A192GCM"`, or `"A256GCM"` depending on the derived key size.
The octet key used will typically be all zeroes of the required size in the form of `<<0::128>>` (for a 128-bit key).
All of the example keys generated below can be found here: [https://gist.github.com/potatosalad/dd140560b2bdbdab886d](https://gist.github.com/potatosalad/dd140560b2bdbdab886d)
# octet keys we'll use below
jwk_oct128 = JOSE.JWK.from_oct(<<0::128>>)
jwk_oct192 = JOSE.JWK.from_oct(<<0::192>>)
jwk_oct256 = JOSE.JWK.from_oct(<<0::256>>)
jwk_secret = JOSE.JWK.from_oct("secret")
# EC keypairs we'll use below
jwk_ec256_alice_sk = JOSE.JWK.generate_key({:ec, :secp256r1})
jwk_ec256_alice_pk = JOSE.JWK.to_public(jwk_ec256_alice_sk)
jwk_ec256_bob_sk = JOSE.JWK.generate_key({:ec, :secp256r1})
jwk_ec256_bob_pk = JOSE.JWK.to_public(jwk_ec256_bob_sk)
# X25519 keypairs we'll use below
jwk_x25519_alice_sk = JOSE.JWK.generate_key({:okp, :X25519})
jwk_x25519_alice_pk = JOSE.JWK.to_public(jwk_x25519_alice_sk)
jwk_x25519_bob_sk = JOSE.JWK.generate_key({:okp, :X25519})
jwk_x25519_bob_pk = JOSE.JWK.to_public(jwk_x25519_bob_sk)
# X448 keypairs we'll use below
jwk_x448_alice_sk = JOSE.JWK.generate_key({:okp, :X448})
jwk_x448_alice_pk = JOSE.JWK.to_public(jwk_x448_alice_sk)
jwk_x448_bob_sk = JOSE.JWK.generate_key({:okp, :X448})
jwk_x448_bob_pk = JOSE.JWK.to_public(jwk_x448_bob_sk)
# RSA keypairs we'll use below
jwk_rsa_sk = JOSE.JWK.generate_key({:rsa, 4096})
jwk_rsa_pk = JOSE.JWK.to_public(jwk_rsa_sk)
### A128GCMKW, A192GCMKW, and A256GCMKW
# A128GCMKW
iex> encrypted_a128gcmkw = JOSE.JWE.block_encrypt(jwk_oct128, "{}", %{ "alg" => "A128GCMKW", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct128, encrypted_a128gcmkw) |> elem(0)
"{}"
# A192GCMKW
iex> encrypted_a192gcmkw = JOSE.JWE.block_encrypt(jwk_oct192, "{}", %{ "alg" => "A192GCMKW", "enc" => "A192GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct192, encrypted_a192gcmkw) |> elem(0)
"{}"
# A256GCMKW
iex> encrypted_a256gcmkw = JOSE.JWE.block_encrypt(jwk_oct256, "{}", %{ "alg" => "A256GCMKW", "enc" => "A256GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct256, encrypted_a256gcmkw) |> elem(0)
"{}"
### A128KW, A192KW, and A256KW
# A128KW
iex> encrypted_a128kw = JOSE.JWE.block_encrypt(jwk_oct128, "{}", %{ "alg" => "A128KW", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct128, encrypted_a128kw) |> elem(0)
"{}"
# A192KW
iex> encrypted_a192kw = JOSE.JWE.block_encrypt(jwk_oct192, "{}", %{ "alg" => "A192KW", "enc" => "A192GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct192, encrypted_a192kw) |> elem(0)
"{}"
# A256KW
iex> encrypted_a256kw = JOSE.JWE.block_encrypt(jwk_oct256, "{}", %{ "alg" => "A256KW", "enc" => "A256GCM" }) |> JOSE.JWE.compact |> elem(1)
"eyJhbGciOiJBMjU2S1ciLCJlbmMiOiJBMjU2R0NNIn0.OvAhC1a2BoP_2SMIiZXwIHWPoIkD-Cosgp3nlpiTs8ySUBPfPzwG1g.4GeackYJbuBksAWA.HPE.vG0sGC2kuklH5xk8KXhyNA"
iex> JOSE.JWE.block_decrypt(jwk_oct256, encrypted_a256kw) |> elem(0)
"{}"
### dir
The `"dir"` key derivation algorithm is essentially just a pass-through to the underlying `"enc"` algorithm.
The `"encrypted_key"` is not included in the protected header, so the key must be fully known by both parties.
# dir
iex> encrypted_dir = JOSE.JWE.block_encrypt(jwk_oct128, "{}", %{ "alg" => "dir", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct128, encrypted_dir) |> elem(0)
"{}"
### ECDH-ES, ECDH-ES+A128KW, ECDH-ES+A192KW, and ECDH-ES+A256KW
The `"ECDH-ES"` key derivation algorithm does not include the `"encrypted_key"` field in the protected header, similar to how `"dir"` functions.
The size of the generated key is dependent on the `"enc"` setting (for example, `"A128GCM"` will generate a 128-bit key, `"A256GCM"` a 256-bit key, etc).
# ECDH-ES with EC keypairs
iex> encrypted_ecdhes_ec256_alice2bob = JOSE.JWE.block_encrypt({jwk_ec256_bob_pk, jwk_ec256_alice_sk}, "{}", %{ "alg" => "ECDH-ES", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt({jwk_ec256_alice_pk, jwk_ec256_bob_sk}, encrypted_ecdhes_ec256_alice2bob) |> elem(0)
"{}"
# ECDH-ES with X25519 keypairs
iex> encrypted_ecdhes_x25519_alice2bob = JOSE.JWE.block_encrypt({jwk_x25519_bob_pk, jwk_x25519_alice_sk}, "{}", %{ "alg" => "ECDH-ES", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt({jwk_x25519_alice_pk, jwk_x25519_bob_sk}, encrypted_ecdhes_x25519_alice2bob) |> elem(0)
"{}"
# ECDH-ES with X448 keypairs
iex> encrypted_ecdhes_x448_alice2bob = JOSE.JWE.block_encrypt({jwk_x448_bob_pk, jwk_x448_alice_sk}, "{}", %{ "alg" => "ECDH-ES", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>WZIWSJ9fQ..T-UNE-wOApuRH71r.Uj8.l8bIfhC1UPAPVWBV3wkc6A"
iex> JOSE.JWE.block_decrypt({jwk_x448_alice_pk, jwk_x448_bob_sk}, encrypted_ecdhes_x448_alice2bob) |> elem(0)
"{}"
When decrypting with any of the `"ECDH-ES"` related algorithms, the other party's public key is recommended, but not required for decryption (the embedded Ephemeral Public Key will be used instead):
# decrypting the X448 example with and without the public key specified
iex> JOSE.JWE.block_decrypt({jwk_x448_alice_pk, jwk_x448_bob_sk}, encrypted_ecdhes_x448_alice2bob) |> elem(0)
"{}"
iex> JOSE.JWE.block_decrypt(jwk_x448_bob_sk, encrypted_ecdhes_x448_alice2bob) |> elem(0)
"{}"
The `"ECDH-ES+A128KW"`, `"ECDH-ES+A192KW"`, and `"ECDH-ES+A256KW"` key derivation algorithms do include the `"encrypted_key"` and the suffix after `"ECDH-ES+"` determines the key size (so `"ECDH-ES+A128KW"` computes a 128-bit key).
# ECDH-ES+A128KW with EC keypairs
iex> encrypted_ecdhesa128kw_alice2bob = JOSE.JWE.block_encrypt({jwk_ec256_bob_pk, jwk_ec256_alice_sk}, "{}", %{ "alg" => "ECDH-ES+A128KW", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt({jwk_ec256_alice_pk, jwk_ec256_bob_sk}, encrypted_ecdhesa128kw_alice2bob) |> elem(0)
"{}"
# ECDH-ES+A192KW with EC keypairs
iex> encrypted_ecdhesa192kw_alice2bob = JOSE.JWE.block_encrypt({jwk_ec256_bob_pk, jwk_ec256_alice_sk}, "{}", %{ "alg" => "ECDH-ES+A192KW", "enc" => "A192GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt({jwk_ec256_alice_pk, jwk_ec256_bob_sk}, encrypted_ecdhesa192kw_alice2bob) |> elem(0)
"{}"
# ECDH-ES+A256KW with EC keypairs
iex> encrypted_ecdhesa256kw_alice2bob = JOSE.JWE.block_encrypt({jwk_ec256_bob_pk, jwk_ec256_alice_sk}, "{}", %{ "alg" => "ECDH-ES+A256KW", "enc" => "A256GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt({jwk_ec256_alice_pk, jwk_ec256_bob_sk}, encrypted_ecdhesa256kw_alice2bob) |> elem(0)
"{}"
See `JOSE.JWK.box_encrypt/2` for generating an Ephemeral Public Key based on the same curve as the supplied other party key in the same step.
### PBES2-HS256+A128KW, PBES2-HS384+A192KW, and PBES2-HS512+A256KW
# PBES2-HS256+A128KW
iex> encrypted_pbes2hs256a128kw = JOSE.JWE.block_encrypt(jwk_secret, "{}", %{ "alg" => "PBES2-HS256+A128KW", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>8AjpjkcqJGpYe53VRf2s.vVEb2ZtKmtPIw8M-.Cmg.<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_secret, encrypted_pbes2hs256a128kw) |> elem(0)
"{}"
# PBES2-HS384+A192KW
iex> encrypted_pbes2hs384a192kw = JOSE.JWE.block_encrypt(jwk_secret, "{}", %{ "alg" => "PBES2-HS384+A192KW", "enc" => "A192GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_secret, encrypted_pbes2hs384a192kw) |> elem(0)
"{}"
# PBES2-HS512+A256KW
iex> encrypted_pbes2hs512a256kw = JOSE.JWE.block_encrypt(jwk_secret, "{}", %{ "alg" => "PBES2-HS512+A256KW", "enc" => "A256GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_secret, encrypted_pbes2hs512a256kw) |> elem(0)
"{}"
The `"p2s"` and `"p2i"` fields may also be specified to control the Salt and Iterations of the PBES2 Key Derivation Function, respectively.
The default Salt is a randomly generated binary the same length of bytes as the key wrap (for example, `"PBES2-HS256+A128KW"` will generate a 16-byte Salt).
The default Iterations is 32 times the number of bits specified by the key wrap (for example, `"PBES2-HS256+A128KW"` will have 4096 Iterations).
# let's setup the JWE header
iterations = 8192
salt = <<0::256>> # all zero salt, for example usage only
jwe = %{
"alg" => "PBES2-HS256+A128KW",
"enc" => "A128GCM",
"p2i" => iterations,
"p2s" => :base64url.encode(salt)
}
# PBES2-HS256+A128KW
iex> encrypted_pbes2 = JOSE.JWE.block_encrypt(jwk_secret, "{}", jwe) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_secret, encrypted_pbes2) |> elem(0)
"{}"
### RSA1_5, RSA-OAEP, and RSA-OAEP-256
# RSA1_5
iex> encrypted_rsa1_5 = JOSE.JWE.block_encrypt(jwk_rsa_pk, "{}", %{ "alg" => "RSA1_5", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_rsa_sk, encrypted_rsa1_5) |> elem(0)
"{}"
# RSA-OAEP
iex> encrypted_rsaoaep = JOSE.JWE.block_encrypt(jwk_rsa_pk, "{}", %{ "alg" => "RSA-OAEP", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_rsa_sk, encrypted_rsaoaep) |> elem(0)
"{}"
# RSA-OAEP-256
iex> encrypted_rsaoaep256 = JOSE.JWE.block_encrypt(jwk_rsa_pk, "{}", %{ "alg" => "RSA-OAEP-256", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"eyJhbGciOiJSU0EtT0FFUC0yNTYiLCJlbmMiOiJBMTI4R0NNIn0.OW9Hy9qpOIgVueODQXcWIUw_-Sm3UFGtxosyOAaI6JUQFt8q-iEtKkUp4NHrOlczO6tP5t8zRKdNXFfCm9QZk6F9PsSO-NzE2-DV1ANAMck-CDfGTK0mwG5U_KZwlObSgU0gxf87K49Wuno1rWlHWzJb__C_hCJXi_aQW17tLmbuTpJMkB0NTCKX3y6QaxvynP98jqwMJT6uGmE3AeuZYhPGzAOWbltbWyw-TqWqyLJirAUY_fvDNsKt1TDrTd9216TK5y7RQeUtdGfbuYK9lt2TIwfh9ycAHd7SANH_YJc2cKYa3e6CgqnQAjVpbhpogBz5sz5HaK95XYbXOdnYyHQ00gS44YquiQCvX331UgEWnthtmYwDZfnCxTkPydafGOBsjaagGvV2tQtxUKW3JmVChF97bNj5lQZ7rAkyooxx-k3IMT0005x6_74O5tXGN5fb7oyT3Mx_NZ5dKzlYAA_V8oOpNslaFhV5K5Q_-hRkUsEPWdaD5s2uS9Z7l7ot39CzzTKDj65f2eCTWFReFKOjhabCL4ZiFXbElB3dA3y5FdxXPAfe6N31G9ynalx1JIcrEaRb8sdqk6U6uC3s3DpkoRSnp3osBJOxxuk_Lgb-ZM9d8UuRVj4W78-qjfX_lcG1RlRmlYoDIU03ly0UfRWi-7HmpPECrGTsGZEfULg.J-txckmMXEi-bZVh.Rbw.D7UpSkticmDCGiNyLVggLg"
iex> JOSE.JWE.block_decrypt(jwk_rsa_sk, encrypted_rsaoaep256) |> elem(0)
"{}"
## Encryption Examples
All of the examples below will use `"alg"` set to `"dir"` passing the key directly to the Encryption Algorithm.
The octet key used will typically be all zeroes of the required size in the form of `<<0::128>>` (for a 128-bit key).
All of the example keys generated below can be found here: [https://gist.github.com/potatosalad/dd140560b2bdbdab886d](https://gist.github.com/potatosalad/dd140560b2bdbdab886d)
# octet keys we'll use below
jwk_oct128 = JOSE.JWK.from_oct(<<0::128>>)
jwk_oct192 = JOSE.JWK.from_oct(<<0::192>>)
jwk_oct256 = JOSE.JWK.from_oct(<<0::256>>)
jwk_oct384 = JOSE.JWK.from_oct(<<0::384>>)
jwk_oct512 = JOSE.JWK.from_oct(<<0::512>>)
### A128CBC-HS256, A192CBC-HS384, and A256CBC-HS512
# A128CBC-HS256
iex> encrypted_a128cbchs256 = JOSE.JWE.block_encrypt(jwk_oct256, "{}", %{ "alg" => "dir", "enc" => "A128CBC-HS256" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>3AqrqJ4f5PHjGseHYw.kopJoTDxk34IVhheoToLSA"
iex> JOSE.JWE.block_decrypt(jwk_oct256, encrypted_a128cbchs256) |> elem(0)
"{}"
# A192CBC-HS384
iex> encrypted_a192cbchs384 = JOSE.JWE.block_encrypt(jwk_oct384, "{}", %{ "alg" => "dir", "enc" => "A192CBC-HS384" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct384, encrypted_a192cbchs384) |> elem(0)
"{}"
# A256CBC-HS512
iex> encrypted_a256cbchs512 = JOSE.JWE.block_encrypt(jwk_oct512, "{}", %{ "alg" => "dir", "enc" => "A256CBC-HS512" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct512, encrypted_a256cbchs512) |> elem(0)
"{}"
### A128GCM, A192GCM, and A256GCM
# A128GCM
iex> encrypted_a128gcm = JOSE.JWE.block_encrypt(jwk_oct128, "{}", %{ "alg" => "dir", "enc" => "A128GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct128, encrypted_a128gcm) |> elem(0)
"{}"
# A192GCM
iex> encrypted_a192gcm = JOSE.JWE.block_encrypt(jwk_oct192, "{}", %{ "alg" => "dir", "enc" => "A192GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct192, encrypted_a192gcm) |> elem(0)
"{}"
# A256GCM
iex> encrypted_a256gcm = JOSE.JWE.block_encrypt(jwk_oct256, "{}", %{ "alg" => "dir", "enc" => "A256GCM" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct256, encrypted_a256gcm) |> elem(0)
"{}"
### ChaCha20/Poly1305
This is highly experimental and based on [RFC 7539](https://tools.ietf.org/html/rfc7539).
# ChaCha20/Poly1305
iex> encrypted_chacha20_poly1305 = JOSE.JWE.block_encrypt(jwk_oct256, "{}", %{ "alg" => "dir", "enc" => "ChaCha20/Poly1305" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct256, encrypted_chacha20_poly1305) |> elem(0)
"{}"
## Compression Examples
All of the examples below will use `"alg"` set to `"dir"` passing the key directly to the Encryption Algorithm (`"enc"` is set to `"A128GCM"`).
The octet key used will typically be all zeroes of the required size in the form of `<<0::128>>` (for a 128-bit key).
All of the example keys generated below can be found here: [https://gist.github.com/potatosalad/dd140560b2bdbdab886d](https://gist.github.com/potatosalad/dd140560b2bdbdab886d)
# octet keys we'll use below
jwk_oct128 = JOSE.JWK.from_oct(<<0::128>>)
### DEF
# DEF
iex> encrypted_def = JOSE.JWE.block_encrypt(jwk_oct128, "{}", %{ "alg" => "dir", "enc" => "A128GCM", "zip" => "DEF" }) |> JOSE.JWE.compact |> elem(1)
"<KEY>"
iex> JOSE.JWE.block_decrypt(jwk_oct128, encrypted_def) |> elem(0)
"{}"
"""
record = Record.extract(:jose_jwe, from_lib: "jose/include/jose_jwe.hrl")
keys = :lists.map(&elem(&1, 0), record)
vals = :lists.map(&{&1, [], nil}, keys)
pairs = :lists.zip(keys, vals)
defstruct keys
@type t :: %__MODULE__{}
@doc """
Converts a `JOSE.JWE` struct to a `:jose_jwe` record.
"""
def to_record(%JOSE.JWE{unquote_splicing(pairs)}) do
{:jose_jwe, unquote_splicing(vals)}
end
def to_record(list) when is_list(list), do: for element <- list, into: [], do: to_record(element)
@doc """
Converts a `:jose_jwe` record into a `JOSE.JWE`.
"""
def from_record(jose_jwe)
def from_record({:jose_jwe, unquote_splicing(vals)}) do
%JOSE.JWE{unquote_splicing(pairs)}
end
def from_record(list) when is_list(list), do: for element <- list, into: [], do: from_record(element)
## Decode API
@doc """
Converts a binary or map into a `JOSE.JWE`.
iex> JOSE.JWE.from(%{ "alg" => "dir" })
%JOSE.JWE{alg: {:jose_jwe_alg_dir, :dir}, enc: :undefined, fields: %{},
zip: :undefined}
iex> JOSE.JWE.from("{\"alg\":\"dir\"}")
%JOSE.JWE{alg: {:jose_jwe_alg_dir, :dir}, enc: :undefined, fields: %{},
zip: :undefined}
There are 3 keys which can have custom modules defined for them:
* `"alg"` - must implement `:jose_jwe` and `:jose_jwe_alg` behaviours
* `"enc"` - must implement `:jose_jwe` and `:jose_jwe_enc` behaviours
* `"zip"` - must implement `:jose_jwe` and `:jose_jwe_zip` behaviours
For example:
iex> JOSE.JWE.from({%{ zip: MyCustomCompress }, %{ "alg" => "dir", "zip" => "custom" }})
%JOSE.JWE{alg: {:jose_jwe_alg_dir, :dir}, enc: :undefined, fields: %{},
zip: {MyCustomCompress, :state}}
"""
def from(list) when is_list(list), do: for element <- list, into: [], do: from(element)
def from(jwe=%JOSE.JWE{}), do: from(to_record(jwe))
def from(any), do: :jose_jwe.from(any) |> from_record()
@doc """
Converts a binary into a `JOSE.JWE`.
"""
def from_binary(list) when is_list(list), do: for element <- list, into: [], do: from_binary(element)
def from_binary(binary), do: :jose_jwe.from_binary(binary) |> from_record()
@doc """
Reads file and calls `from_binary/1` to convert into a `JOSE.JWE`.
"""
def from_file(file), do: :jose_jwe.from_file(file) |> from_record()
@doc """
Converts a map into a `JOSE.JWE`.
"""
def from_map(list) when is_list(list), do: for element <- list, into: [], do: from_map(element)
def from_map(map), do: :jose_jwe.from_map(map) |> from_record()
## Encode API
@doc """
Converts a `JOSE.JWE` into a binary.
"""
def to_binary(list) when is_list(list), do: for element <- list, into: [], do: to_binary(element)
def to_binary(jwe=%JOSE.JWE{}), do: to_binary(to_record(jwe))
def to_binary(any), do: :jose_jwe.to_binary(any)
@doc """
Calls `to_binary/1` on a `JOSE.JWE` and then writes the binary to file.
"""
def to_file(file, jwe=%JOSE.JWE{}), do: to_file(file, to_record(jwe))
def to_file(file, any), do: :jose_jwe.to_file(file, any)
@doc """
Converts a `JOSE.JWE` into a map.
"""
def to_map(list) when is_list(list), do: for element <- list, into: [], do: to_map(element)
def to_map(jwe=%JOSE.JWE{}), do: to_map(to_record(jwe))
def to_map(any), do: :jose_jwe.to_map(any)
## API
@doc """
Decrypts the `encrypted` binary or map using the `jwk`.
iex> jwk = JOSE.JWK.from(%{"k" => "STlqtIOhWJjoVnYjUjxFLZ6oN1oB70QARGSTWQ_5XgM", "kty" => "oct"})
%JOSE.JWK{fields: %{}, keys: :undefined,
kty: {:jose_jwk_kty_oct,
<<73, 57, 106, 180, 131, 161, 88, 152, 232, 86, 118, 35, 82, 60, 69, 45, 158, 168, 55, 90, 1, 239, 68, 0, 68, 100, 147, 89, 15, 249, 94, 3>>}}
iex> JOSE.JWE.block_decrypt(jwk, "<KEY>")
{"{}",
%JOSE.JWE{alg: {:jose_jwe_alg_dir, :dir},
enc: {:jose_jwe_enc_aes,
{:jose_jwe_enc_aes, {:aes_cbc, 128}, 256, 32, 16, 16, 16, 16, :sha256}},
fields: %{}, zip: :undefined}}
See `block_encrypt/2`.
"""
def block_decrypt(jwk=%JOSE.JWK{}, encrypted), do: block_decrypt(JOSE.JWK.to_record(jwk), encrypted)
def block_decrypt({your_public_jwk=%JOSE.JWK{}, my_private_jwk}, encrypted), do: block_decrypt({JOSE.JWK.to_record(your_public_jwk), my_private_jwk}, encrypted)
def block_decrypt({your_public_jwk, my_private_jwk=%JOSE.JWK{}}, encrypted), do: block_decrypt({your_public_jwk, JOSE.JWK.to_record(my_private_jwk)}, encrypted)
def block_decrypt(jwk, encrypted) do
case :jose_jwe.block_decrypt(jwk, encrypted) do
{plain_text, jwe} when is_tuple(jwe) ->
{plain_text, from_record(jwe)}
error ->
error
end
end
@doc """
Encrypts `plain_text` using the `jwk` and algorithm specified by the `jwe` by getting the `cek` for `block_encrypt/4`.
"""
def block_encrypt(jwk=%JOSE.JWK{}, plain_text, jwe), do: block_encrypt(JOSE.JWK.to_record(jwk), plain_text, jwe)
def block_encrypt({your_public_jwk=%JOSE.JWK{}, my_private_jwk}, plain_text, jwe), do: block_encrypt({JOSE.JWK.to_record(your_public_jwk), my_private_jwk}, plain_text, jwe)
def block_encrypt({your_public_jwk, my_private_jwk=%JOSE.JWK{}}, plain_text, jwe), do: block_encrypt({your_public_jwk, JOSE.JWK.to_record(my_private_jwk)}, plain_text, jwe)
def block_encrypt(jwk, plain_text, jwe=%JOSE.JWE{}), do: block_encrypt(jwk, plain_text, to_record(jwe))
def block_encrypt(jwk, plain_text, jwe), do: :jose_jwe.block_encrypt(jwk, plain_text, jwe)
@doc """
Encrypts `plain_text` using the `jwk`, `cek`, and algorithm specified by the `jwe` by getting the `iv` for `block_encrypt/5`.
"""
def block_encrypt(jwk=%JOSE.JWK{}, plain_text, cek, jwe), do: block_encrypt(JOSE.JWK.to_record(jwk), plain_text, cek, jwe)
def block_encrypt({your_public_jwk=%JOSE.JWK{}, my_private_jwk}, plain_text, cek, jwe), do: block_encrypt({JOSE.JWK.to_record(your_public_jwk), my_private_jwk}, plain_text, cek, jwe)
def block_encrypt({your_public_jwk, my_private_jwk=%JOSE.JWK{}}, plain_text, cek, jwe), do: block_encrypt({your_public_jwk, JOSE.JWK.to_record(my_private_jwk)}, plain_text, cek, jwe)
def block_encrypt(jwk, plain_text, cek, jwe=%JOSE.JWE{}), do: block_encrypt(jwk, plain_text, cek, to_record(jwe))
def block_encrypt(jwk, plain_text, cek, jwe), do: :jose_jwe.block_encrypt(jwk, plain_text, cek, jwe)
@doc """
Encrypts the `plain_text` using the `jwk`, `cek`, `iv`, and algorithm specified by the `jwe`.
iex> jwk = JOSE.JWK.from(%{"k" => "STlqtIOhWJjoVnYjUjxFLZ6oN1oB70QARGSTWQ_5XgM", "kty" => "oct"})
%JOSE.JWK{fields: %{}, keys: :undefined,
kty: {:jose_jwk_kty_oct,
<<73, 57, 106, 180, 131, 161, 88, 152, 232, 86, 118, 35, 82, 60, 69, 45, 158, 168, 55, 90, 1, 239, 68, 0, 68, 100, 147, 89, 15, 249, 94, 3>>}}
iex> JOSE.JWE.block_encrypt(jwk, "{}", %{ "alg" => "dir", "enc" => "A128CBC-HS256" })
{%{alg: :jose_jwe_alg_dir, enc: :jose_jwe_enc_aes},
%{"ciphertext" => "Ei49MvTLLje7bsZ5EZCZMA", "encrypted_key" => "",
"iv" => "jBt5tTa1Q0N3uFPEkf30MQ",
"protected" => "<KEY>",
"tag" => "gMWOAmhZSq9ksHCZm6VSoA"}}
See `block_decrypt/2`.
"""
def block_encrypt(jwk=%JOSE.JWK{}, plain_text, cek, iv, jwe), do: block_encrypt(JOSE.JWK.to_record(jwk), plain_text, cek, iv, jwe)
def block_encrypt({your_public_jwk=%JOSE.JWK{}, my_private_jwk}, plain_text, cek, iv, jwe), do: block_encrypt({JOSE.JWK.to_record(your_public_jwk), my_private_jwk}, plain_text, cek, iv, jwe)
def block_encrypt({your_public_jwk, my_private_jwk=%JOSE.JWK{}}, plain_text, cek, iv, jwe), do: block_encrypt({your_public_jwk, JOSE.JWK.to_record(my_private_jwk)}, plain_text, cek, iv, jwe)
def block_encrypt(jwk, plain_text, cek, iv, jwe=%JOSE.JWE{}), do: block_encrypt(jwk, plain_text, cek, iv, to_record(jwe))
def block_encrypt(jwk, plain_text, cek, iv, jwe), do: :jose_jwe.block_encrypt(jwk, plain_text, cek, iv, jwe)
@doc """
Compacts an expanded encrypted map into a binary.
iex> JOSE.JWE.compact(%{"ciphertext" => "Ei49MvTLLje7bsZ5EZCZMA", "encrypted_key" => "",
"iv" => "jBt5tTa1Q0N3uFPEkf30MQ",
"protected" => "<KEY>",
"tag" => "gMWOAmhZSq9ksHCZm6VSoA"})
{%{},
"eyJhbGciOiJkaXIiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2In0..jBt5tTa1Q0N3uFPEkf30MQ.Ei49MvTLLje7bsZ5EZCZMA.gMWOAmhZSq9ksHCZm6VSoA"}
See `expand/1`.
"""
defdelegate compact(encrypted), to: :jose_jwe
@doc """
Compresses the `plain_text` using the `"zip"` algorithm specified by the `jwe`.
iex> JOSE.JWE.compress("{}", %{ "alg" => "dir", "zip" => "DEF" })
<<120, 156, 171, 174, 5, 0, 1, 117, 0, 249>>
See `uncompress/2`.
"""
def compress(plain_text, jwe=%JOSE.JWE{}), do: compress(plain_text, to_record(jwe))
def compress(plain_text, jwe), do: :jose_jwe.compress(plain_text, jwe)
@doc """
Expands a compacted encrypted binary into a map.
iex> JOSE.JWE.expand("<KEY>")
{%{},
%{"ciphertext" => "Ei49MvTLLje7bsZ5EZCZMA", "encrypted_key" => "",
"iv" => "jBt5tTa1Q0N3uFPEkf30MQ",
"protected" => "<KEY>",
"tag" => "gMWOAmhZSq9ksHCZm6VSoA"}}
See `compact/1`.
"""
defdelegate expand(encrypted), to: :jose_jwe
@doc """
Generates a new `JOSE.JWK` based on the algorithms of the specified `JOSE.JWE`.
iex> JOSE.JWE.generate_key(%{"alg" => "dir", "enc" => "A128GCM"})
%JOSE.JWK{fields: %{"alg" => "dir", "enc" => "A128GCM", "use" => "enc"},
keys: :undefined,
kty: {:jose_jwk_kty_oct,
<<188, 156, 171, 224, 232, 231, 41, 250, 210, 117, 112, 219, 134, 218, 94, 50>>}}
"""
def generate_key(list) when is_list(list), do: for element <- list, into: [], do: generate_key(element)
def generate_key(jwe=%JOSE.JWE{}), do: generate_key(to_record(jwe))
def generate_key(any), do: JOSE.JWK.from_record(:jose_jwe.generate_key(any))
@doc """
Decrypts the `encrypted_key` using the `jwk` and the `"alg"` and `"enc"` specified by the `jwe`.
# let's define our jwk and encrypted_key
jwk = JOSE.JWK.from(%{"k" => "idN_YyeYZqEE7BkpexhA2Q", "kty" => "oct"})
enc = <<27, 123, 126, 121, 56, 105, 105, 81, 140, 76, 30, 2, 14, 92, 231, 174, 203, 196, 110, 204, 57, 238, 248, 73>>
iex> JOSE.JWE.key_decrypt(jwk, enc, %{ "alg" => "A128KW", "enc" => "A128CBC-HS256" })
<<134, 82, 15, 176, 181, 115, 173, 19, 13, 44, 189, 185, 187, 125, 28, 240>>
See `key_encrypt/3`.
"""
def key_decrypt(jwk=%JOSE.JWK{}, encrypted_key, jwe), do: key_decrypt(JOSE.JWK.to_record(jwk), encrypted_key, jwe)
def key_decrypt({your_public_jwk=%JOSE.JWK{}, my_private_jwk}, encrypted_key, jwe), do: key_decrypt({JOSE.JWK.to_record(your_public_jwk), my_private_jwk}, encrypted_key, jwe)
def key_decrypt({your_public_jwk, my_private_jwk=%JOSE.JWK{}}, encrypted_key, jwe), do: key_decrypt({your_public_jwk, JOSE.JWK.to_record(my_private_jwk)}, encrypted_key, jwe)
def key_decrypt(jwk, encrypted_key, jwe=%JOSE.JWE{}), do: key_decrypt(jwk, encrypted_key, to_record(jwe))
def key_decrypt(jwk, encrypted_key, jwe), do: :jose_jwe.key_decrypt(jwk, encrypted_key, jwe)
@doc """
Encrypts the `decrypted_key` using the `jwk` and the `"alg"` and `"enc"` specified by the `jwe`.
# let's define our jwk and cek (or decrypted_key)
jwk = JOSE.JWK.from(%{"k" => "idN_YyeYZqEE7BkpexhA2Q", "kty" => "oct"}) # JOSE.JWK.generate_key({:oct, 16})
cek = <<134, 82, 15, 176, 181, 115, 173, 19, 13, 44, 189, 185, 187, 125, 28, 240>> # :crypto.rand_bytes(16)
iex> JOSE.JWE.key_encrypt(jwk, cek, %{ "alg" => "A128KW", "enc" => "A128CBC-HS256" })
{<<27, 123, 126, 121, 56, 105, 105, 81, 140, 76, 30, 2, 14, 92, 231, 174, 203, 196, 110, 204, 57, 238, 248, 73>>,
%JOSE.JWE{alg: {:jose_jwe_alg_aes_kw,
{:jose_jwe_alg_aes_kw, 128, false, :undefined, :undefined}},
enc: {:jose_jwe_enc_aes,
{:jose_jwe_enc_aes, {:aes_cbc, 128}, 256, 32, 16, 16, 16, 16, :sha256}},
fields: %{}, zip: :undefined}}
See `key_decrypt/3`.
"""
def key_encrypt(jwk=%JOSE.JWK{}, decrypted_key, jwe), do: key_encrypt(JOSE.JWK.to_record(jwk), decrypted_key, jwe)
def key_encrypt({your_public_jwk=%JOSE.JWK{}, my_private_jwk}, decrypted_key, jwe), do: key_encrypt({JOSE.JWK.to_record(your_public_jwk), my_private_jwk}, decrypted_key, jwe)
def key_encrypt({your_public_jwk, my_private_jwk=%JOSE.JWK{}}, decrypted_key, jwe), do: key_encrypt({your_public_jwk, JOSE.JWK.to_record(my_private_jwk)}, decrypted_key, jwe)
def key_encrypt(jwk, decrypted_key, jwe=%JOSE.JWE{}), do: key_encrypt(jwk, decrypted_key, to_record(jwe))
def key_encrypt(jwk, decrypted_key, jwe) do
case :jose_jwe.key_encrypt(jwk, decrypted_key, jwe) do
{encrypted_key, jwe} when is_tuple(jwe) ->
{encrypted_key, from_record(jwe)}
error ->
error
end
end
@doc """
Merges map on right into map on left.
"""
def merge(left=%JOSE.JWE{}, right), do: merge(left |> to_record, right)
def merge(left, right=%JOSE.JWE{}), do: merge(left, right |> to_record)
def merge(left, right), do: :jose_jwe.merge(left, right) |> from_record
@doc """
Returns the next `cek` using the `jwk` and the `"alg"` and `"enc"` specified by the `jwe`.
# let's define our jwk
jwk = JOSE.JWK.from(%{"k" => "idN_YyeYZqEE7BkpexhA2Q", "kty" => "oct"}) # JOSE.JWK.generate_key({:oct, 16})
iex> JOSE.JWE.next_cek(jwk, %{ "alg" => "A128KW", "enc" => "A128CBC-HS256" })
<<37, 83, 139, 165, 44, 23, 163, 186, 255, 155, 183, 17, 220, 211, 80, 247, 239, 149, 194, 53, 134, 41, 254, 176, 0, 247, 66, 38, 217, 252, 82, 233>>
# when using the "dir" algorithm, the jwk itself will be used
iex> JOSE.JWE.next_cek(jwk, %{ "alg" => "dir", "enc" => "A128GCM" })
<<137, 211, 127, 99, 39, 152, 102, 161, 4, 236, 25, 41, 123, 24, 64, 217>>
"""
def next_cek(jwk=%JOSE.JWK{}, jwe), do: next_cek(JOSE.JWK.to_record(jwk), jwe)
def next_cek({your_public_jwk=%JOSE.JWK{}, my_private_jwk}, jwe), do: next_cek({JOSE.JWK.to_record(your_public_jwk), my_private_jwk}, jwe)
def next_cek({your_public_jwk, my_private_jwk=%JOSE.JWK{}}, jwe), do: next_cek({your_public_jwk, JOSE.JWK.to_record(my_private_jwk)}, jwe)
def next_cek(jwk, jwe=%JOSE.JWE{}), do: next_cek(jwk, to_record(jwe))
def next_cek(jwk, jwe), do: :jose_jwe.next_cek(jwk, jwe)
@doc """
Returns the next `iv` the `"alg"` and `"enc"` specified by the `jwe`.
# typically just returns random bytes for the specified "enc" algorithm
iex> bit_size(JOSE.JWE.next_iv(%{ "alg" => "dir", "enc" => "A128CBC-HS256" }))
128
iex> bit_size(JOSE.JWE.next_iv(%{ "alg" => "dir", "enc" => "A128GCM" }))
96
"""
def next_iv(jwe=%JOSE.JWE{}), do: next_iv(to_record(jwe))
def next_iv(jwe), do: :jose_jwe.next_iv(jwe)
@doc """
Uncompresses the `cipher_text` using the `"zip"` algorithm specified by the `jwe`.
iex> JOSE.JWE.uncompress(<<120, 156, 171, 174, 5, 0, 1, 117, 0, 249>>, %{ "alg" => "dir", "zip" => "DEF" })
"{}"
See `compress/2`.
"""
def uncompress(cipher_text, jwe=%JOSE.JWE{}), do: uncompress(cipher_text, to_record(jwe))
def uncompress(cipher_text, jwe), do: :jose_jwe.uncompress(cipher_text, jwe)
end
|
backend/deps/jose/lib/jose/jwe.ex
| 0.836321 | 0.548553 |
jwe.ex
|
starcoder
|
defmodule ForgeAbi.BigUint do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: binary
}
defstruct [:value]
field :value, 1, type: :bytes
end
defmodule ForgeAbi.BigSint do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: binary,
minus: boolean
}
defstruct [:value, :minus]
field :value, 1, type: :bytes
field :minus, 2, type: :bool
end
defmodule ForgeAbi.WalletType do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
pk: ForgeAbi.KeyType.t(),
hash: ForgeAbi.HashType.t(),
address: ForgeAbi.EncodingType.t(),
role: ForgeAbi.RoleType.t()
}
defstruct [:pk, :hash, :address, :role]
field :pk, 1, type: ForgeAbi.KeyType, enum: true
field :hash, 2, type: ForgeAbi.HashType, enum: true
field :address, 3, type: ForgeAbi.EncodingType, enum: true
field :role, 4, type: ForgeAbi.RoleType, enum: true
end
defmodule ForgeAbi.WalletInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type: ForgeAbi.WalletType.t() | nil,
sk: binary,
pk: binary,
address: String.t()
}
defstruct [:type, :sk, :pk, :address]
field :type, 1, type: ForgeAbi.WalletType, deprecated: true
field :sk, 2, type: :bytes
field :pk, 3, type: :bytes
field :address, 4, type: :string
end
defmodule ForgeAbi.ChainInfo.ForgeAppsVersionEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: :string
end
defmodule ForgeAbi.ChainInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
id: String.t(),
network: String.t(),
moniker: String.t(),
consensus_version: String.t(),
synced: boolean,
app_hash: binary,
block_hash: binary,
block_height: non_neg_integer,
block_time: Google.Protobuf.Timestamp.t() | nil,
address: String.t(),
voting_power: non_neg_integer,
total_txs: non_neg_integer,
version: String.t(),
forge_apps_version: %{String.t() => String.t()},
supported_txs: [String.t()]
}
defstruct [
:id,
:network,
:moniker,
:consensus_version,
:synced,
:app_hash,
:block_hash,
:block_height,
:block_time,
:address,
:voting_power,
:total_txs,
:version,
:forge_apps_version,
:supported_txs
]
field :id, 1, type: :string
field :network, 2, type: :string
field :moniker, 3, type: :string
field :consensus_version, 4, type: :string
field :synced, 5, type: :bool
field :app_hash, 6, type: :bytes
field :block_hash, 7, type: :bytes
field :block_height, 8, type: :uint64
field :block_time, 9, type: Google.Protobuf.Timestamp
field :address, 10, type: :string
field :voting_power, 11, type: :uint64
field :total_txs, 12, type: :uint64
field :version, 13, type: :string
field :forge_apps_version, 15,
repeated: true,
type: ForgeAbi.ChainInfo.ForgeAppsVersionEntry,
map: true
field :supported_txs, 16, repeated: true, type: :string
end
defmodule ForgeAbi.NodeInfo.ForgeAppsVersionEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: :string
end
defmodule ForgeAbi.NodeInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
id: String.t(),
network: String.t(),
moniker: String.t(),
consensus_version: String.t(),
synced: boolean,
app_hash: binary,
block_hash: binary,
block_height: non_neg_integer,
block_time: Google.Protobuf.Timestamp.t() | nil,
address: String.t(),
voting_power: non_neg_integer,
total_txs: non_neg_integer,
version: String.t(),
forge_apps_version: %{String.t() => String.t()},
supported_txs: [String.t()],
ip: String.t(),
geo_info: ForgeAbi.GeoInfo.t() | nil,
p2p_address: String.t()
}
defstruct [
:id,
:network,
:moniker,
:consensus_version,
:synced,
:app_hash,
:block_hash,
:block_height,
:block_time,
:address,
:voting_power,
:total_txs,
:version,
:forge_apps_version,
:supported_txs,
:ip,
:geo_info,
:p2p_address
]
field :id, 1, type: :string
field :network, 2, type: :string
field :moniker, 3, type: :string
field :consensus_version, 4, type: :string
field :synced, 5, type: :bool
field :app_hash, 6, type: :bytes
field :block_hash, 7, type: :bytes
field :block_height, 8, type: :uint64
field :block_time, 9, type: Google.Protobuf.Timestamp
field :address, 10, type: :string
field :voting_power, 11, type: :uint64
field :total_txs, 12, type: :uint64
field :version, 13, type: :string
field :forge_apps_version, 15,
repeated: true,
type: ForgeAbi.NodeInfo.ForgeAppsVersionEntry,
map: true
field :supported_txs, 16, repeated: true, type: :string
field :ip, 17, type: :string
field :geo_info, 18, type: ForgeAbi.GeoInfo
field :p2p_address, 19, type: :string
end
defmodule ForgeAbi.Validator do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
power: non_neg_integer
}
defstruct [:address, :power]
field :address, 1, type: :string
field :power, 2, type: :uint64
end
defmodule ForgeAbi.ConsensusParams do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
max_bytes: non_neg_integer,
max_gas: integer,
max_validators: non_neg_integer,
max_candidates: non_neg_integer,
pub_key_types: [String.t()],
validators: [ForgeAbi.Validator.t()],
validator_changed: boolean,
param_changed: boolean
}
defstruct [
:max_bytes,
:max_gas,
:max_validators,
:max_candidates,
:pub_key_types,
:validators,
:validator_changed,
:param_changed
]
field :max_bytes, 1, type: :uint64
field :max_gas, 2, type: :sint64
field :max_validators, 3, type: :uint32
field :max_candidates, 4, type: :uint32
field :pub_key_types, 5, repeated: true, type: :string
field :validators, 6, repeated: true, type: ForgeAbi.Validator
field :validator_changed, 7, type: :bool
field :param_changed, 8, type: :bool
end
defmodule ForgeAbi.UpgradeTask do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type: ForgeAbi.UpgradeType.t(),
data_hash: String.t(),
actions: [[ForgeAbi.UpgradeAction.t()]]
}
defstruct [:type, :data_hash, :actions]
field :type, 1, type: ForgeAbi.UpgradeType, enum: true
field :data_hash, 2, type: :string
field :actions, 4, repeated: true, type: ForgeAbi.UpgradeAction, enum: true
end
defmodule ForgeAbi.UpgradeTasks do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
item: [ForgeAbi.UpgradeTask.t()]
}
defstruct [:item]
field :item, 1, repeated: true, type: ForgeAbi.UpgradeTask
end
defmodule ForgeAbi.AbciContext do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tx_hash: String.t(),
block_height: non_neg_integer,
block_time: Google.Protobuf.Timestamp.t() | nil,
total_txs: non_neg_integer,
tx_statistics: ForgeAbi.TxStatistics.t() | nil,
tx_index: non_neg_integer,
last_block_time: Google.Protobuf.Timestamp.t() | nil
}
defstruct [
:tx_hash,
:block_height,
:block_time,
:total_txs,
:tx_statistics,
:tx_index,
:last_block_time
]
field :tx_hash, 1, type: :string
field :block_height, 2, type: :uint64
field :block_time, 3, type: Google.Protobuf.Timestamp
field :total_txs, 4, type: :uint64
field :tx_statistics, 5, type: ForgeAbi.TxStatistics
field :tx_index, 6, type: :uint32
field :last_block_time, 7, type: Google.Protobuf.Timestamp
end
defmodule ForgeAbi.Multisig do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
signer: String.t(),
pk: binary,
signature: binary,
delegator: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:signer, :pk, :signature, :delegator, :data]
field :signer, 1, type: :string
field :pk, 2, type: :bytes
field :signature, 3, type: :bytes
field :delegator, 4, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.Transaction do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
from: String.t(),
nonce: non_neg_integer,
chain_id: String.t(),
pk: binary,
gas: non_neg_integer,
delegator: String.t(),
signature: binary,
signatures: [ForgeAbi.Multisig.t()],
itx: Google.Protobuf.Any.t() | nil
}
defstruct [:from, :nonce, :chain_id, :pk, :gas, :delegator, :signature, :signatures, :itx]
field :from, 1, type: :string
field :nonce, 2, type: :uint64
field :chain_id, 3, type: :string
field :pk, 4, type: :bytes
field :gas, 5, type: :uint32
field :delegator, 6, type: :string
field :signature, 13, type: :bytes
field :signatures, 14, repeated: true, type: ForgeAbi.Multisig
field :itx, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.TransactionInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tx: ForgeAbi.Transaction.t() | nil,
height: non_neg_integer,
index: non_neg_integer,
hash: String.t(),
tags: [AbciVendor.KVPair.t()],
code: ForgeAbi.StatusCode.t(),
time: Google.Protobuf.Timestamp.t() | nil
}
defstruct [:tx, :height, :index, :hash, :tags, :code, :time]
field :tx, 1, type: ForgeAbi.Transaction
field :height, 2, type: :uint64
field :index, 3, type: :uint32
field :hash, 4, type: :string
field :tags, 5, repeated: true, type: AbciVendor.KVPair
field :code, 6, type: ForgeAbi.StatusCode, enum: true
field :time, 7, type: Google.Protobuf.Timestamp
end
defmodule ForgeAbi.DeclareConfig do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
restricted: boolean,
hierarchy: non_neg_integer,
cost: ForgeAbi.BigUint.t() | nil
}
defstruct [:restricted, :hierarchy, :cost]
field :restricted, 1, type: :bool
field :hierarchy, 2, type: :uint32
field :cost, 3, type: ForgeAbi.BigUint
end
defmodule ForgeAbi.DelegateConfig do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
delta_interval: non_neg_integer,
type_urls: [String.t()]
}
defstruct [:delta_interval, :type_urls]
field :delta_interval, 1, type: :uint32
field :type_urls, 2, repeated: true, type: :string
end
defmodule ForgeAbi.TransactionConfig do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
max_asset_size: non_neg_integer,
max_list_size: non_neg_integer,
max_multisig: non_neg_integer,
minimum_stake: non_neg_integer,
declare: ForgeAbi.DeclareConfig.t() | nil,
delegate: ForgeAbi.DelegateConfig.t() | nil,
poke: ForgeAbi.PokeConfig.t() | nil,
stake: ForgeAbi.StakeConfig.t() | nil
}
defstruct [
:max_asset_size,
:max_list_size,
:max_multisig,
:minimum_stake,
:declare,
:delegate,
:poke,
:stake
]
field :max_asset_size, 1, type: :uint32
field :max_list_size, 2, type: :uint32
field :max_multisig, 3, type: :uint32
field :minimum_stake, 4, type: :uint64
field :declare, 5, type: ForgeAbi.DeclareConfig
field :delegate, 6, type: ForgeAbi.DelegateConfig
field :poke, 7, type: ForgeAbi.PokeConfig
field :stake, 8, type: ForgeAbi.StakeConfig
end
defmodule ForgeAbi.BlockInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
height: non_neg_integer,
num_txs: non_neg_integer,
time: Google.Protobuf.Timestamp.t() | nil,
app_hash: binary,
proposer: binary,
txs: [ForgeAbi.TransactionInfo.t()],
total_txs: non_neg_integer,
invalid_txs: [ForgeAbi.TransactionInfo.t()],
txs_hashes: [String.t()],
invalid_txs_hashes: [String.t()],
consensus_hash: binary,
data_hash: binary,
evidence_hash: binary,
last_commit_hash: binary,
last_results_hash: binary,
next_validators_hash: binary,
validators_hash: binary,
version: AbciVendor.Version.t() | nil,
last_block_id: AbciVendor.BlockID.t() | nil
}
defstruct [
:height,
:num_txs,
:time,
:app_hash,
:proposer,
:txs,
:total_txs,
:invalid_txs,
:txs_hashes,
:invalid_txs_hashes,
:consensus_hash,
:data_hash,
:evidence_hash,
:last_commit_hash,
:last_results_hash,
:next_validators_hash,
:validators_hash,
:version,
:last_block_id
]
field :height, 1, type: :uint64
field :num_txs, 2, type: :uint32
field :time, 3, type: Google.Protobuf.Timestamp
field :app_hash, 4, type: :bytes
field :proposer, 5, type: :bytes
field :txs, 6, repeated: true, type: ForgeAbi.TransactionInfo
field :total_txs, 7, type: :uint64
field :invalid_txs, 8, repeated: true, type: ForgeAbi.TransactionInfo
field :txs_hashes, 9, repeated: true, type: :string
field :invalid_txs_hashes, 10, repeated: true, type: :string
field :consensus_hash, 11, type: :bytes
field :data_hash, 12, type: :bytes
field :evidence_hash, 13, type: :bytes
field :last_commit_hash, 14, type: :bytes
field :last_results_hash, 15, type: :bytes
field :next_validators_hash, 16, type: :bytes
field :validators_hash, 17, type: :bytes
field :version, 18, type: AbciVendor.Version
field :last_block_id, 19, type: AbciVendor.BlockID
end
defmodule ForgeAbi.BlockInfoSimple do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
height: non_neg_integer,
num_txs: non_neg_integer,
time: Google.Protobuf.Timestamp.t() | nil,
app_hash: binary,
proposer: binary,
total_txs: non_neg_integer,
txs_hashes: [String.t()],
invalid_txs_hashes: [String.t()],
consensus_hash: binary,
data_hash: binary,
evidence_hash: binary,
last_commit_hash: binary,
last_results_hash: binary,
next_validators_hash: binary,
validators_hash: binary,
version: AbciVendor.Version.t() | nil,
last_block_id: AbciVendor.BlockID.t() | nil
}
defstruct [
:height,
:num_txs,
:time,
:app_hash,
:proposer,
:total_txs,
:txs_hashes,
:invalid_txs_hashes,
:consensus_hash,
:data_hash,
:evidence_hash,
:last_commit_hash,
:last_results_hash,
:next_validators_hash,
:validators_hash,
:version,
:last_block_id
]
field :height, 1, type: :uint64
field :num_txs, 2, type: :uint32
field :time, 3, type: Google.Protobuf.Timestamp
field :app_hash, 4, type: :bytes
field :proposer, 5, type: :bytes
field :total_txs, 6, type: :uint64
field :txs_hashes, 7, repeated: true, type: :string
field :invalid_txs_hashes, 8, repeated: true, type: :string
field :consensus_hash, 9, type: :bytes
field :data_hash, 10, type: :bytes
field :evidence_hash, 11, type: :bytes
field :last_commit_hash, 12, type: :bytes
field :last_results_hash, 13, type: :bytes
field :next_validators_hash, 14, type: :bytes
field :validators_hash, 15, type: :bytes
field :version, 16, type: AbciVendor.Version
field :last_block_id, 17, type: AbciVendor.BlockID
end
defmodule ForgeAbi.TxStatus do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
code: ForgeAbi.StatusCode.t(),
hash: String.t()
}
defstruct [:code, :hash]
field :code, 1, type: ForgeAbi.StatusCode, enum: true
field :hash, 2, type: :string
end
defmodule ForgeAbi.CircularQueue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
items: [binary],
type_url: String.t(),
max_items: non_neg_integer,
circular: boolean,
fifo: boolean
}
defstruct [:items, :type_url, :max_items, :circular, :fifo]
field :items, 1, repeated: true, type: :bytes
field :type_url, 2, type: :string
field :max_items, 3, type: :uint32
field :circular, 4, type: :bool
field :fifo, 5, type: :bool
end
defmodule ForgeAbi.StateContext do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
genesis_tx: String.t(),
renaissance_tx: String.t(),
genesis_time: Google.Protobuf.Timestamp.t() | nil,
renaissance_time: Google.Protobuf.Timestamp.t() | nil
}
defstruct [:genesis_tx, :renaissance_tx, :genesis_time, :renaissance_time]
field :genesis_tx, 1, type: :string
field :renaissance_tx, 2, type: :string
field :genesis_time, 3, type: Google.Protobuf.Timestamp
field :renaissance_time, 4, type: Google.Protobuf.Timestamp
end
defmodule ForgeAbi.StakeContext do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
total_stakes: ForgeAbi.BigUint.t() | nil,
total_unstakes: ForgeAbi.BigUint.t() | nil,
total_received_stakes: ForgeAbi.BigUint.t() | nil,
recent_stakes: ForgeAbi.CircularQueue.t() | nil,
recent_received_stakes: ForgeAbi.CircularQueue.t() | nil
}
defstruct [
:total_stakes,
:total_unstakes,
:total_received_stakes,
:recent_stakes,
:recent_received_stakes
]
field :total_stakes, 1, type: ForgeAbi.BigUint
field :total_unstakes, 2, type: ForgeAbi.BigUint
field :total_received_stakes, 3, type: ForgeAbi.BigUint
field :recent_stakes, 4, type: ForgeAbi.CircularQueue
field :recent_received_stakes, 15, type: ForgeAbi.CircularQueue
end
defmodule ForgeAbi.StakeSummary do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
total_stakes: ForgeAbi.BigUint.t() | nil,
total_unstakes: ForgeAbi.BigUint.t() | nil,
context: ForgeAbi.StateContext.t() | nil
}
defstruct [:total_stakes, :total_unstakes, :context]
field :total_stakes, 1, type: ForgeAbi.BigUint
field :total_unstakes, 2, type: ForgeAbi.BigUint
field :context, 3, type: ForgeAbi.StateContext
end
defmodule ForgeAbi.StakeConfig do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
timeout_general: non_neg_integer,
timeout_stake_for_node: non_neg_integer
}
defstruct [:timeout_general, :timeout_stake_for_node]
field :timeout_general, 1, type: :uint32
field :timeout_stake_for_node, 2, type: :uint32
end
defmodule ForgeAbi.UnconfirmedTxs do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
n_txs: non_neg_integer,
txs: [ForgeAbi.Transaction.t()]
}
defstruct [:n_txs, :txs]
field :n_txs, 1, type: :uint32
field :txs, 2, repeated: true, type: ForgeAbi.Transaction
end
defmodule ForgeAbi.NetInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
listening: boolean,
listeners: [String.t()],
n_peers: non_neg_integer,
peers: [ForgeAbi.PeerInfo.t()]
}
defstruct [:listening, :listeners, :n_peers, :peers]
field :listening, 1, type: :bool
field :listeners, 2, repeated: true, type: :string
field :n_peers, 3, type: :uint32
field :peers, 4, repeated: true, type: ForgeAbi.PeerInfo
end
defmodule ForgeAbi.GeoInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
city: String.t(),
country: String.t(),
latitude: float | :infinity | :negative_infinity | :nan,
longitude: float | :infinity | :negative_infinity | :nan
}
defstruct [:city, :country, :latitude, :longitude]
field :city, 1, type: :string
field :country, 2, type: :string
field :latitude, 3, type: :float
field :longitude, 4, type: :float
end
defmodule ForgeAbi.PeerInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
id: String.t(),
network: String.t(),
consensus_version: String.t(),
moniker: String.t(),
ip: String.t(),
geo_info: ForgeAbi.GeoInfo.t() | nil
}
defstruct [:id, :network, :consensus_version, :moniker, :ip, :geo_info]
field :id, 1, type: :string
field :network, 2, type: :string
field :consensus_version, 3, type: :string
field :moniker, 4, type: :string
field :ip, 5, type: :string
field :geo_info, 6, type: ForgeAbi.GeoInfo
end
defmodule ForgeAbi.ValidatorsInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
block_height: non_neg_integer,
validators: [ForgeAbi.ValidatorInfo.t()]
}
defstruct [:block_height, :validators]
field :block_height, 1, type: :uint64
field :validators, 2, repeated: true, type: ForgeAbi.ValidatorInfo
end
defmodule ForgeAbi.ValidatorInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
pub_key: AbciVendor.PubKey.t() | nil,
voting_power: non_neg_integer,
proposer_priority: String.t(),
name: String.t(),
geo_info: ForgeAbi.GeoInfo.t() | nil
}
defstruct [:address, :pub_key, :voting_power, :proposer_priority, :name, :geo_info]
field :address, 1, type: :string
field :pub_key, 2, type: AbciVendor.PubKey
field :voting_power, 3, type: :uint64
field :proposer_priority, 4, type: :string
field :name, 5, type: :string
field :geo_info, 6, type: ForgeAbi.GeoInfo
end
defmodule ForgeAbi.GenesisInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
genesis_time: String.t(),
chain_id: String.t(),
consensus_params: AbciVendor.ConsensusParams.t() | nil,
validators: [ForgeAbi.ValidatorInfo.t()],
app_hash: String.t()
}
defstruct [:genesis_time, :chain_id, :consensus_params, :validators, :app_hash]
field :genesis_time, 1, type: :string
field :chain_id, 2, type: :string
field :consensus_params, 3, type: AbciVendor.ConsensusParams
field :validators, 4, repeated: true, type: ForgeAbi.ValidatorInfo
field :app_hash, 5, type: :string
end
defmodule ForgeAbi.ForgeStats do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
num_blocks: [non_neg_integer],
num_txs: [non_neg_integer],
num_stakes: [ForgeAbi.BigUint.t()],
num_validators: [non_neg_integer],
num_account_migrate_txs: [non_neg_integer],
num_create_asset_txs: [non_neg_integer],
num_consensus_upgrade_txs: [non_neg_integer],
num_declare_txs: [non_neg_integer],
num_declare_file_txs: [non_neg_integer],
num_exchange_txs: [non_neg_integer],
num_stake_txs: [non_neg_integer],
num_sys_upgrade_txs: [non_neg_integer],
num_transfer_txs: [non_neg_integer],
num_update_asset_txs: [non_neg_integer],
num_consume_asset_txs: [non_neg_integer],
num_poke_txs: [non_neg_integer],
tps: [non_neg_integer],
max_tps: non_neg_integer,
avg_tps: non_neg_integer,
avg_block_time: float | :infinity | :negative_infinity | :nan
}
defstruct [
:num_blocks,
:num_txs,
:num_stakes,
:num_validators,
:num_account_migrate_txs,
:num_create_asset_txs,
:num_consensus_upgrade_txs,
:num_declare_txs,
:num_declare_file_txs,
:num_exchange_txs,
:num_stake_txs,
:num_sys_upgrade_txs,
:num_transfer_txs,
:num_update_asset_txs,
:num_consume_asset_txs,
:num_poke_txs,
:tps,
:max_tps,
:avg_tps,
:avg_block_time
]
field :num_blocks, 1, repeated: true, type: :uint64
field :num_txs, 2, repeated: true, type: :uint64
field :num_stakes, 3, repeated: true, type: ForgeAbi.BigUint
field :num_validators, 4, repeated: true, type: :uint32
field :num_account_migrate_txs, 5, repeated: true, type: :uint64
field :num_create_asset_txs, 6, repeated: true, type: :uint64
field :num_consensus_upgrade_txs, 7, repeated: true, type: :uint32
field :num_declare_txs, 8, repeated: true, type: :uint64
field :num_declare_file_txs, 9, repeated: true, type: :uint64
field :num_exchange_txs, 10, repeated: true, type: :uint64
field :num_stake_txs, 11, repeated: true, type: :uint64
field :num_sys_upgrade_txs, 12, repeated: true, type: :uint32
field :num_transfer_txs, 13, repeated: true, type: :uint64
field :num_update_asset_txs, 14, repeated: true, type: :uint64
field :num_consume_asset_txs, 15, repeated: true, type: :uint64
field :num_poke_txs, 16, repeated: true, type: :uint64
field :tps, 17, repeated: true, type: :uint32
field :max_tps, 18, type: :uint32
field :avg_tps, 19, type: :uint32
field :avg_block_time, 20, type: :float
end
defmodule ForgeAbi.TxStatistics do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
num_account_migrate_txs: non_neg_integer,
num_create_asset_txs: non_neg_integer,
num_consensus_upgrade_txs: non_neg_integer,
num_declare_txs: non_neg_integer,
num_declare_file_txs: non_neg_integer,
num_exchange_txs: non_neg_integer,
num_stake_txs: non_neg_integer,
num_sys_upgrade_txs: non_neg_integer,
num_transfer_txs: non_neg_integer,
num_update_asset_txs: non_neg_integer,
num_consume_asset_txs: non_neg_integer,
num_poke_txs: non_neg_integer
}
defstruct [
:num_account_migrate_txs,
:num_create_asset_txs,
:num_consensus_upgrade_txs,
:num_declare_txs,
:num_declare_file_txs,
:num_exchange_txs,
:num_stake_txs,
:num_sys_upgrade_txs,
:num_transfer_txs,
:num_update_asset_txs,
:num_consume_asset_txs,
:num_poke_txs
]
field :num_account_migrate_txs, 1, type: :uint64
field :num_create_asset_txs, 2, type: :uint64
field :num_consensus_upgrade_txs, 3, type: :uint32
field :num_declare_txs, 4, type: :uint64
field :num_declare_file_txs, 5, type: :uint64
field :num_exchange_txs, 6, type: :uint64
field :num_stake_txs, 7, type: :uint64
field :num_sys_upgrade_txs, 8, type: :uint32
field :num_transfer_txs, 9, type: :uint64
field :num_update_asset_txs, 10, type: :uint64
field :num_consume_asset_txs, 11, type: :uint64
field :num_poke_txs, 12, type: :uint64
end
defmodule ForgeAbi.ForgeToken do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
symbol: String.t(),
unit: String.t(),
description: String.t(),
icon: binary,
decimal: non_neg_integer,
initial_supply: non_neg_integer,
total_supply: non_neg_integer,
inflation_rate: non_neg_integer
}
defstruct [
:name,
:symbol,
:unit,
:description,
:icon,
:decimal,
:initial_supply,
:total_supply,
:inflation_rate
]
field :name, 1, type: :string
field :symbol, 2, type: :string
field :unit, 3, type: :string
field :description, 4, type: :string
field :icon, 5, type: :bytes
field :decimal, 6, type: :uint32
field :initial_supply, 7, type: :uint64
field :total_supply, 8, type: :uint64
field :inflation_rate, 9, type: :uint32
end
defmodule ForgeAbi.PokeInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
daily_limit: ForgeAbi.BigUint.t() | nil,
leftover: ForgeAbi.BigUint.t() | nil,
amount: ForgeAbi.BigUint.t() | nil
}
defstruct [:daily_limit, :leftover, :amount]
field :daily_limit, 1, type: ForgeAbi.BigUint
field :leftover, 2, type: ForgeAbi.BigUint
field :amount, 3, type: ForgeAbi.BigUint
end
defmodule ForgeAbi.PokeConfig do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
daily_limit: non_neg_integer,
amount: non_neg_integer,
enabled: boolean
}
defstruct [:daily_limit, :amount, :enabled]
field :daily_limit, 2, type: :uint64
field :amount, 4, type: :uint64
field :enabled, 5, type: :bool
end
defmodule ForgeAbi.UpgradeInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
height: non_neg_integer,
version: String.t()
}
defstruct [:height, :version]
field :height, 1, type: :uint64
field :version, 2, type: :string
end
defmodule ForgeAbi.WithdrawItem do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
hash: String.t(),
value: ForgeAbi.BigUint.t() | nil
}
defstruct [:hash, :value]
field :hash, 1, type: :string
field :value, 2, type: ForgeAbi.BigUint
end
defmodule ForgeAbi.AccountConfig do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
pk: binary,
balance: ForgeAbi.BigUint.t() | nil
}
defstruct [:address, :pk, :balance]
field :address, 1, type: :string
field :pk, 2, type: :bytes
field :balance, 3, type: ForgeAbi.BigUint
end
defmodule ForgeAbi.TokenSwapConfig do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
commission_holder_address: String.t(),
commission_rate: non_neg_integer,
revoke_commission_rate: non_neg_integer,
min_commission: ForgeAbi.BigUint.t() | nil,
max_commission: ForgeAbi.BigUint.t() | nil
}
defstruct [
:commission_holder_address,
:commission_rate,
:revoke_commission_rate,
:min_commission,
:max_commission
]
field :commission_holder_address, 1, type: :string
field :commission_rate, 4, type: :uint32
field :revoke_commission_rate, 5, type: :uint32
field :min_commission, 6, type: ForgeAbi.BigUint
field :max_commission, 7, type: ForgeAbi.BigUint
end
defmodule ForgeAbi.Evidence do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
hash: String.t(),
chain_type: String.t(),
chain_id: String.t(),
original_tx: binary,
receiver_address: String.t()
}
defstruct [:hash, :chain_type, :chain_id, :original_tx, :receiver_address]
field :hash, 1, type: :string
field :chain_type, 2, type: :string
field :chain_id, 3, type: :string
field :original_tx, 4, type: :bytes
field :receiver_address, 5, type: :string
end
|
lib/protobuf/gen/type.pb.ex
| 0.771456 | 0.59305 |
type.pb.ex
|
starcoder
|
defmodule Benchmark do
alias :timer, as: Timer
@doc "Macro for logging benchmarks of function calls."
defmacro bench(label \\ "", do: block) do
quote do
IO.write(unquote(label) <> ": ")
{time, val} = Timer.tc(fn -> unquote(block) end)
IO.write("Returned value #{val} in #{time/1000} milliseconds\n")
val
end
end
end
defmodule ComplexMath do
defmodule Math do
import :math, only: [sqrt: 1]
import Integer, only: [is_even: 1]
@doc "Returns true if number is prime."
def prime?(2), do: true
def prime?(n) when is_even(n), do: false
def prime?(n) when n > 1, do: prime?(n, sqrt(n), 3)
def prime?(_), do: false
@doc false
defp prime?(n, root, i) do
cond do
i > root -> true
rem(n, i) === 0 -> false
true -> prime?(n, root, i+2)
end
end
def factors(n) do
import List, only: [flatten: 1]
for x <- 1..round(sqrt(n)), rem(n, x) === 0 do
[x, div(n,x)]
end
|> flatten
|> Enum.uniq
|> Enum.sort
end
def divisors(n), do: factors(n) -- [n]
end
defmodule Lazy do
@doc "Lazy Sequence of prime numbers."
def prime(start \\ 2) do
start
|> Stream.iterate(fn(n)-> n+1 end)
|> Stream.filter(&Math.prime?/1)
end
@doc "Lazy Sequence of fibonacci numbers."
def fib(f1 \\ 1, f2 \\ 1) do
{f1,f2}
|> Stream.iterate(fn {x,y} -> {y,x+y} end)
|> Stream.map(fn {x,_} -> x end)
end
end
defmodule Example do
import Integer, only: [is_even: 1]
import Benchmark
@doc "Shows off Pipe Operator. Returns the sum all even fibonacci numbers below 4 million."
def pipe_example do
Lazy.fib
|> Stream.take_while(fn n -> n < 4_000_000 end)
|> Stream.filter(&is_even/1)
|> Enum.sum()
end
@doc "Same as calculation as pipe_example, but highlights Eager vs Lazy evaluation."
def lazy_example do
bench "Eager example" do
Lazy.fib
|> Enum.take_while(fn n -> n < 4_000_000 end)
|> Enum.filter(&is_even/1)
|> Enum.sum()
end
bench "Lazy example" do
Lazy.fib
|> Stream.take_while(fn n -> n < 4_000_000 end)
|> Stream.filter(&is_even/1)
|> Enum.sum()
end
end
@doc "Shows off Task module (future)."
def task_example do
import :timer, only: [sleep: 1]
expensive1 = fn-> sleep(1000); 5 end
expensive2 = fn-> sleep(1000); 4 end
bench "Starting sequential part" do
expensive1.() + expensive2.()
end
bench "Starting threaded part" do
task1 = Task.async(expensive1)
task2 = Task.async(expensive2)
Task.await(task1) + Task.await(task2)
end
end
end
end
|
elixir_libs/elixir_example/lib/complex_math.ex
| 0.70304 | 0.40439 |
complex_math.ex
|
starcoder
|
defmodule Mirage.Image do
@moduledoc """
Module for reading, writing, and creating images.
"""
alias Mirage.Color
@typedoc """
Represents a loaded image in working memory.
"""
@type t :: %__MODULE__{
byte_size: non_neg_integer(),
height: non_neg_integer(),
width: non_neg_integer()
}
@typedoc """
The format of an image. This is also the list of supported formats that can be
read and written with this library.
"""
@type format ::
:png
| :jpeg
| :gif
| :webp
| :pnm
| :tiff
| :tga
| :dds
| :bmp
| :ico
| :hdr
| :farbfeld
| :avif
defstruct(
byte_size: nil,
height: nil,
width: nil,
resource: nil
)
@doc """
Loads an image from a `binary`.
Returns the discovered format of the image on success.
## Example
```elixir
# Could also be from a HTTP request or something like S3!
bytes = File.read!("./input.png")
{:ok, :png, image} = Mirage.Image.from_bytes(bytes)
```
"""
@spec from_bytes(binary()) ::
{:ok, format(), t()}
| {:error, :invalid_image | :unsupported_image_format}
def from_bytes(bytes) do
Mirage.Native.from_bytes(bytes)
end
@doc """
Reads an image from the filesystem at `path`.
## Example
```elixir
{:ok, :png, image} = Mirage.Image.read("./input.png")
```
"""
@spec read(String.t()) ::
{:ok, format(), t()}
| {:error, File.posix() | :invalid_image | :unsupported_image_format}
def read(path) do
with {:ok, bytes} <- File.read(path) do
from_bytes(bytes)
end
end
@doc """
Similar to `read/1` but raises `Mirage.ReadError` if an error occurs.
## Example
```elixir
{:png, image} = Mirage.Image.read!("./input.png")
```
"""
@spec read!(String.t()) :: {format(), t()} | no_return()
def read!(path) do
case read(path) do
{:ok, format, image} ->
{format, image}
{:error, error} ->
raise Mirage.ReadError,
message: "Error while reading image from path '#{path}': '#{inspect(error)}'",
path: path,
error: error
end
end
@doc """
Creates a new empty image with the given width and height.
## Example
iex> match?(%Mirage.Image{width: 100, height: 100}, Mirage.Image.empty(100, 100))
true
"""
@spec empty(non_neg_integer(), non_neg_integer()) :: t()
def empty(width, height) do
Mirage.Native.empty(width, height)
end
@doc """
Creates a new image with the given dimensions consisting entirely of the specified color.
## Example
iex> match?(%Mirage.Image{width: 100, height: 100}, Mirage.Image.new(100, 100, %Mirage.Color{r: 1.0, g: 1.0, b: 1.0, a: 1.0}))
true
"""
@spec new(non_neg_integer(), non_neg_integer(), Color.t()) :: t()
def new(width, height, %Color{} = color) do
fill(Mirage.Native.empty(width, height), color)
end
@doc """
Fills an image with a specific color.
Overwrites any existing pixel values.
```elixir
Mirage.Image.fill(Mirage.Image.empty(), %Mirage.Color{r: 1.0, g: 1.0, b: 1.0, a: 1.0})
```
"""
@spec fill(t(), Color.t()) :: t()
def fill(image, %Color{} = color) do
Mirage.Native.fill(image, color.r, color.g, color.b, color.a)
end
@doc """
Writes the image to the provided path. The format of the image is determined
by the file extension in the path.
## Example
```elixir
Mirage.Image.write(image, "./output.png")
```
"""
@spec write(t(), String.t()) :: :ok | {:error, String.t()}
def write(image, path) do
Mirage.Native.write(image, path)
end
@doc """
Similar to `write/2` but raises `Mirage.WriteError` if an error occurs.
## Example
```elixir
Mirage.Image.write!(image, "./output.png")
```
"""
@spec write!(t(), String.t()) :: :ok | no_return()
def write!(image, path) do
case Mirage.Native.write(image, path) do
:ok ->
:ok
{:error, error} ->
raise Mirage.WriteError,
message: "Error while writing image to path '#{path}': '#{inspect(error)}'",
path: path,
error: error
end
end
end
|
lib/mirage/image.ex
| 0.939789 | 0.896704 |
image.ex
|
starcoder
|
defmodule Tzdata do
alias Tzdata.BasicData, as: TzData
alias Tzdata.Periods
alias Tzdata.ReleaseParser, as: TzReleaseParser
alias Tzdata.LeapSecParser
@moduledoc """
The Tzdata module provides data from the IANA tz database. Also known
as the Olson/Eggert database, zoneinfo, tzdata and other names.
The database files from IANA are text files. Tzdata ships with a copy
of the newest files. The `dl_latest_data.sh` script downloads the newest
files. When a new version of the database is released from IANA you can
run that script and recompile this library. Then the library will use the
newest version of the database.
Or you can get an updated version of this `tzdata`
Elixir library where the updated database is included.
A list of time zone names (e.g. `America/Los_Angeles`) are provided.
As well as functions for finding out the UTC offset, abbreviation,
standard offset (DST) for a specific point in time in a certain
timezone.
There are also functions for leap seconds. In the `Tzdata.TableData`
module, data from the table that matches countries with time zones is
available.
"""
# Provide lists of zone- and link-names
# Note that the function names are different from TzData!
# The term "alias" is used instead of "link"
@doc """
zone_list provides a list of all the zone names that can be used with
DateTime. This includes aliases.
"""
def zone_list, do: unquote(Macro.escape(TzData.zone_and_link_list))
@doc """
Like zone_list, but excludes aliases for zones.
"""
def canonical_zone_list, do: unquote(Macro.escape(TzData.zone_list))
@doc """
A list of aliases for zone names. For instance Europe/Jersey
is an alias for Europe/London. Aliases are also known as linked zones.
"""
def zone_alias_list, do: unquote(Macro.escape(TzData.link_list))
@doc """
Takes the name of a zone. Returns true zone exists. Otherwise false.
iex> Tzdata.zone_exists? "Pacific/Auckland"
true
iex> Tzdata.zone_exists? "America/Sao_Paulo"
true
iex> Tzdata.zone_exists? "Europe/Jersey"
true
"""
def zone_exists?(name), do: Enum.member?(zone_list, name)
@doc """
Takes the name of a zone. Returns true if zone exists and is canonical.
Otherwise false.
iex> Tzdata.canonical_zone? "Europe/London"
true
iex> Tzdata.canonical_zone? "Europe/Jersey"
false
"""
def canonical_zone?(name), do: Enum.member?(canonical_zone_list, name)
@doc """
Takes the name of a zone. Returns true if zone exists and is an alias.
Otherwise false.
iex> Tzdata.zone_alias? "Europe/Jersey"
true
iex> Tzdata.zone_alias? "Europe/London"
false
"""
def zone_alias?(name), do: Enum.member?(zone_alias_list, name)
# Provide map of links
@doc """
Returns a map of links. Also known as aliases.
iex> Tzdata.links["Europe/Jersey"]
"Europe/London"
"""
def links, do: unquote(Macro.escape(TzData.links))
@doc """
Returns a map with keys being group names and the values lists of
time zone names. The group names mirror the file names used by the tzinfo
database.
"""
def zone_lists_grouped, do: unquote(Macro.escape(TzData.zones_and_links_by_groups))
@doc """
Returns tzdata release version as a string.
Example:
Tzdata.tzdata_version
"2014i"
"""
def tzdata_version, do: unquote(Macro.escape(TzReleaseParser.tzdata_version))
@doc """
Returns a list of periods for the `zone_name` provided as an argument.
A period in this case is a period of time where the UTC offset and standard
offset are in a certain way. When they change, for instance in spring when
DST takes effect, a new period starts. For instance a period can begin in
spring when winter time ends and summer time begins. The period lasts until
DST ends.
If either the UTC or standard offset change for any reason, a new period
begins. For instance instead of DST ending or beginning, a rule change
that changes the UTC offset will also mean a new period.
The result is tagged with :ok if the zone_name is correct.
The from and until times can be :mix, :max or gregorian seconds.
## Example
iex> Tzdata.periods("Europe/Madrid") |> elem(1) |> Enum.take(1)
[%{from: %{standard: :min, utc: :min, wall: :min}, std_off: 0,
until: %{standard: 59989766400, utc: 59989767284, wall: 59989766400},
utc_off: -884, zone_abbr: "LMT"}]
"""
def periods(zone_name) do
Periods.periods(zone_name)
end
@min_cache_time_point :calendar.datetime_to_gregorian_seconds {{2014, 1, 1}, {0, 0, 0}} # 2014
@max_cache_time_point :calendar.datetime_to_gregorian_seconds {{(:calendar.universal_time|>elem(0)|>elem(0)) + 10, 1, 1}, {0, 0, 0}} # 10 years from compile time
@wall_time_cache_buffer 3600*24*3 # seconds to stay away from period limits in wall time to avoid problems with overlapping periods
@doc """
Get the periods that cover a certain point in time. Usually it will be a list
with just one period. But in some cases it will be zero or two periods. For
instance when going from summer to winter time (DST to standard time) there
will be an overlap if `time_type` is `:wall`.
`zone_name` should be a valid time zone name. The function `zone_list/0`
provides a valid list of valid zone names.
`time_point` is the point in time in gregorian seconds (see erlang
calendar module documentation for more info on gregorian seconds).
Valid values for `time_type` is `:utc`, `:wall` or `:standard`.
## Examples
# 63555753600 seconds is equivalent to {{2015, 1, 1}, {0, 0, 0}}
iex> Tzdata.periods_for_time("Asia/Tokyo", 63587289600, :wall)
[%{from: %{standard: 61589206800, utc: 61589174400, wall: 61589206800}, std_off: 0,
until: %{standard: :max, utc: :max, wall: :max}, utc_off: 32400, zone_abbr: "JST"}]
# 63612960000 seconds is equivalent to 2015-10-25 02:40:00 and is an ambiguous
# wall time for the zone. So two possible periods will be returned.
iex> Tzdata.periods_for_time("Europe/Copenhagen", 63612960000, :wall)
[%{from: %{standard: 63594813600, utc: 63594810000, wall: 63594817200}, std_off: 3600,
until: %{standard: 63612957600, utc: 63612954000, wall: 63612961200}, utc_off: 3600, zone_abbr: "CEST"},
%{from: %{standard: 63612957600, utc: 63612954000, wall: 63612957600}, std_off: 0,
until: %{standard: 63626263200, utc: 63626259600, wall: 63626263200}, utc_off: 3600, zone_abbr: "CET"}]
# 63594816000 seconds is equivalent to 2015-03-29 02:40:00 and is a
# non-existing wall time for the zone. It is spring and the clock skips that hour.
iex> Tzdata.periods_for_time("Europe/Copenhagen", 63594816000, :wall)
[]
"""
# For certain years we generate functions that pattern match on certain time points
# to more quickly return the correct periods for most time points in those years
Enum.each TzData.zone_list, fn (zone_name) ->
{:ok, periods} = Periods.periods(zone_name)
Enum.each periods, fn(period) ->
if period.until.utc > @min_cache_time_point && period.from.utc < @max_cache_time_point do
def periods_for_time(unquote(zone_name), time_point, :utc) when time_point > unquote(period.from.utc) and time_point < unquote(period.until.utc) do
unquote(Macro.escape([period]))
end
# For the wall time we make sure that the interval is has to match is a bit more
# narrow, but using the buffer
def periods_for_time(unquote(zone_name), time_point, :wall) when time_point-@wall_time_cache_buffer> unquote(period.from.wall) and time_point+@wall_time_cache_buffer < unquote(period.until.wall) do
unquote(Macro.escape([period]))
end
end
end
end
# For each linked zone, call canonical zone
Enum.each TzData.links, fn {alias_name, canonical_name} ->
def periods_for_time(unquote(alias_name), time_point, time_type) do
periods_for_time(unquote(canonical_name), time_point, time_type)
end
end
def periods_for_time(zone_name, time_point, time_type) do
{:ok, periods} = possible_periods_for_zone_and_time(zone_name, time_point)
periods
|> Enum.filter(fn x ->
((Map.get(x.from, time_type) |> smaller_than_or_equals(time_point)) &&
(Map.get(x.until, time_type) |> bigger_than(time_point)))
end)
end
# Use dynamic periods for points in time that are about 50 years into the future
@years_in_the_future_where_precompiled_periods_are_used 40
@point_from_which_to_use_dynamic_periods :calendar.datetime_to_gregorian_seconds {{(:calendar.universal_time|>elem(0)|>elem(0)) + @years_in_the_future_where_precompiled_periods_are_used, 1, 1}, {0, 0, 0}}
defp possible_periods_for_zone_and_time(zone_name, time_point) when time_point >= @point_from_which_to_use_dynamic_periods do
# If period in 30 years from compile time goes to :max, use normal periods
if Tzdata.FarFutureDynamicPeriods.zone_in_30_years_in_eternal_period?(zone_name) do
periods(zone_name)
# If not, use dynamic periods
else
Tzdata.FarFutureDynamicPeriods.periods_for_point_in_time(time_point, zone_name)
end
end
defp possible_periods_for_zone_and_time(zone_name, _time_point) do
periods(zone_name)
end
leap_seconds_data = LeapSecParser.read_file
@doc """
Get a list of maps with known leap seconds and
the difference between UTC and the TAI in seconds.
See also `leap_seconds/1`
## Example
iex> Tzdata.leap_seconds_with_tai_diff |> Enum.take(3)
[%{date_time: {{1971, 12, 31}, {23, 59, 60}}, tai_diff: 10},
%{date_time: {{1972, 6, 30}, {23, 59, 60}}, tai_diff: 11},
%{date_time: {{1972, 12, 31}, {23, 59, 60}}, tai_diff: 12}]
"""
def leap_seconds_with_tai_diff do
unquote(Macro.escape(leap_seconds_data[:leap_seconds]))
end
just_leap_seconds = leap_seconds_data[:leap_seconds]
|> Enum.map(&(Map.get(&1, :date_time)))
@doc """
Get a list of known leap seconds. The leap seconds are datetime
tuples representing the extra leap second to be inserted.
The date-times are in UTC.
See also `leap_seconds_with_tai_diff/1`
## Example
iex> Tzdata.leap_seconds |> Enum.take(3)
[{{1971, 12, 31}, {23, 59, 60}},
{{1972, 6, 30}, {23, 59, 60}},
{{1972, 12, 31}, {23, 59, 60}}]
"""
def leap_seconds do
unquote(Macro.escape(just_leap_seconds))
end
@doc """
The time when the leap second information returned from the other leap second
related function expires. The date-time is in UTC.
## Example
Tzdata.leap_second_data_valid_until
{{2015, 12, 28}, {0, 0, 0}}
"""
def leap_second_data_valid_until do
unquote(Macro.escape(leap_seconds_data[:valid_until]))
end
defp smaller_than_or_equals(:min, _), do: true
defp smaller_than_or_equals(first, second), do: first <= second
defp bigger_than(:max, _), do: true
defp bigger_than(first, second), do: first > second
end
|
elixir/codes-from-books/little-elixir/cap8/blitzy/deps/tzdata/lib/tzdata.ex
| 0.875228 | 0.537041 |
tzdata.ex
|
starcoder
|
defmodule Breadboard.GPIO.BaseGPIOHelper do
@moduledoc """
Define an 'helper' behaviour to define a complete map of GPIOs pinout for a specific platform from a small set of informations.
Implementing the defined callback the result is obtained from `build_pinout_map/0` funcion, starting for example from:
```
%{
0 => [pin_name: "GPIO0"],
1 => [pin_name: "GPIO1"],
2 => [pin_name: "GPIO2"],
...
63 => [pin_name: "GPIO63"]
}
```
for any item is build a new extended item in the form:
```
[pin: 1, sysfs: 1, pin_key: :pin1, pin_label: :gpio1, pin_name: "GPIO1"]
```
and finally the complete map for any key:
```
%{
{:pin, 0} => [pin: 0, sysfs: 0, pin_key: :pin0, pin_label: :gpio0, pin_name: "GPIO0"],
{:sysfs, 0} => [pin: 0, sysfs: 0, pin_key: :pin0, pin_label: :gpio0, pin_name: "GPIO0"],
{:pin_key, :pin0} => [pin: 0, sysfs: 0, pin_key: :pin0, pin_label: :gpio0, pin_name: "GPIO0"],
{:pin_label, :gpio0} => [pin: 0, sysfs: 0, pin_key: :pin0, pin_label: :gpio0, pin_name: "GPIO0"],
{:pin_name, "GPIO0"} => [pin: 0, sysfs: 0, pin_key: :pin0, pin_label: :gpio0, pin_name: "GPIO0"],
...
}
```
as requested from `Breadboard.GPIO.BaseGPIO` module
Note:
for values in the exended item the requested key/value pairs are used if present where:
* `:pin` value is forced to the original pin number
* `:pin_name` if missing is build as "GPIO***n***" (n='pin number')
* `:pin_key` if missing is build as :pin***n*** (n='pin number)
* `:pin_label` if missing is build from 'pin name' as lowercase atom
"""
@doc """
Return the basic pinout definition map for all pins number to build the complete pinout map through `build_pinout_map`
The keys of the map are the real pin number and the value (keyword list) must contains at least the pin name as key in the form:
```
%{
1 => [pin_name: "GPIO1"],
...
}
```
"""
@callback pinout_definition() :: map()
@doc """
Return the the pin number in user space using sysfs .
Argument come from the item from `pinout_definition`
- first argument: pin number
- second argument: available pin information
"""
@callback pin_to_sysfs_pin(non_neg_integer(), list()) :: non_neg_integer()
defmacro __using__(_opts) do
quote do
@behaviour Breadboard.GPIO.BaseGPIOHelper
alias Breadboard.GPIO.PinoutHelper
def build_pinout_map() do
pinout_definition()
|> Enum.reduce([], &update_pin_info/2)
|> PinoutHelper.expand_map_with_value()
end
defp update_pin_info({pin_number, info}, pins) do
pin_name = Keyword.get(info, :pin_name, PinoutHelper.to_pin_name("GPIO", pin_number))
pin_info =
info
|> Keyword.put_new(:sysfs, pin_to_sysfs_pin(pin_number, info))
|> Keyword.put_new(:pin_key, PinoutHelper.to_pin_key("pin", pin_number))
|> Keyword.put_new(:pin_label, PinoutHelper.to_label_key(pin_name))
|> Keyword.put_new(:pin_name, pin_name)
# pin is unique as the original map pin_number
|> Keyword.put(:pin, pin_number)
[pin_info | pins]
end
end
end
end
# SPDX-License-Identifier: Apache-2.0
|
lib/breadboard/gpio/base_gpio_helper.ex
| 0.73659 | 0.925432 |
base_gpio_helper.ex
|
starcoder
|
defmodule Macchiato.Token do
def split(s) do
split(s, [], "")
end
def split(s, tokens, acc) do
{head, tail} = String.split_at(s, 1)
case {head, acc} do
{"(", ""} -> split(tail, ["(" | tokens], "")
{"(", some} -> split(tail, ["(" | [some | tokens]], "")
{")", ""} -> split(tail, [")" | tokens], "")
{")", some} -> split(tail, [")" | [some | tokens]], "")
{"\"", ""} -> case read_string(tail) do
{str, rest} -> split(rest, [str | tokens], "")
end
{"\"", some} -> case read_string(tail) do
{str, rest} -> split(rest, [str | [some | tokens]], "")
end
{"#", ""} -> split(tail, ["#" | tokens], "")
{"#", some} -> split(tail, ["#" | [some | tokens]], "")
{":", ""} -> split(tail, [":" | tokens], "")
{":", some} -> split(tail, [":" | [some | tokens]], "")
{" ", " "} -> split(tail, tokens, acc)
{" ", ""} -> split(tail, tokens, " ")
{" ", some} -> split(tail, [some | tokens], " ")
{"\n", " "} -> split(tail, tokens, acc)
{"\n", ""} -> split(tail, tokens, " ")
{"\n", some} -> split(tail, [some | tokens], " ")
{"", ""} -> Enum.reverse(tokens)
{"", some} -> split(tail, [some | tokens], "")
{some, " "} -> split(tail, [" " | tokens], some)
{some, _} -> split(tail, tokens, acc <> some)
end
end
def read_string(s) do
read_string(s, 0)
end
def read_string(s, i) do
char = String.at(s, i)
case char do
"\"" -> case String.split_at(s, i) do
{str, tail} -> case String.split_at(tail, 1) do
{"\"", rest} -> {{:String, str}, rest}
end
end
"\\" -> read_string(s, i+2)
_ -> read_string(s, i+1)
end
end
def tokenize(word) do
case word do
"(" -> :LeftParen
")" -> :RightParen
"#" -> :Sharp
":" -> :Colon
" " -> :Space
{:String, _} -> word
_ -> identify(word)
end
end
def tokenize_all(words) do
Enum.map(words, fn word -> tokenize(word) end)
end
def identify(word) do
if is_number_token(word) do
{:Number, word}
else case word do
"t" -> "true"
"nil" -> "null"
_ -> {:Symbol, word}
end
end
end
def is_number_token(word) do
String.match?(word, ~r/^[+-]?(([[:digit:]]+\.?[[:digit:]]*)|([[:digit:]]*\.?[[:digit:]]+))(e[+-]?[[:digit:]]+)?$/)
end
end
|
lib/token.ex
| 0.504394 | 0.578002 |
token.ex
|
starcoder
|
defmodule Etherscan.API.Proxy do
@moduledoc """
Module to wrap Etherscan Geth/Parity proxy endpoints.
[Etherscan API Documentation](https://etherscan.io/apis#proxy)
"""
use Etherscan.API
use Etherscan.Constants
alias Etherscan.{ProxyBlock, ProxyTransaction, ProxyTransactionReceipt}
@eth_estimate_gas_default_params %{
to: nil,
value: nil,
gasPrice: nil,
gas: nil
}
@doc """
Returns the number of most recent block.
## Example
iex> Etherscan.eth_block_number()
{:ok, "#{@test_proxy_block_number}"}
"""
def eth_block_number(network \\ :default) do
"proxy"
|> get("eth_blockNumber", %{}, network)
|> parse()
|> hex_to_number()
|> wrap(:ok)
end
@doc """
Returns information about a block by block number.
## Example
iex> Etherscan.eth_get_block_by_number("#{@test_proxy_block_tag}")
{:ok, %Etherscan.ProxyBlock{}}
"""
def eth_get_block_by_number(tag, network \\ :default)
def eth_get_block_by_number(tag, network) when is_binary(tag) do
"proxy"
|> get("eth_getBlockByNumber", %{tag: tag, boolean: true}, network)
|> parse(as: %{"result" => %ProxyBlock{transactions: [%ProxyTransaction{}]}})
|> wrap(:ok)
end
def eth_get_block_by_number(_, _), do: @error_invalid_tag
@doc """
Returns information about a uncle by block number.
## Example
iex> Etherscan.eth_get_uncle_by_block_number_and_index("#{@test_proxy_uncle_tag}", "#{
@test_proxy_index
}")
{:ok, %{"number" => "#{@test_proxy_uncle_block_tag}", ...}}
"""
def eth_get_uncle_by_block_number_and_index(tag, index, network \\ :default)
def eth_get_uncle_by_block_number_and_index(tag, index, network)
when is_binary(tag) and is_binary(index) do
"proxy"
|> get("eth_getUncleByBlockNumberAndIndex", %{tag: tag, index: index}, network)
|> parse()
|> wrap(:ok)
end
def eth_get_uncle_by_block_number_and_index(tag, index, network)
when not is_binary(tag) and is_binary(index),
do: @error_invalid_tag
def eth_get_uncle_by_block_number_and_index(tag, index, network)
when not is_binary(index) and is_binary(tag),
do: @error_invalid_index
def eth_get_uncle_by_block_number_and_index(_, _, _), do: @error_invalid_tag_and_index
@doc """
Returns the number of transactions in a block from a block matching the
given block number.
## Example
iex> Etherscan.eth_get_block_transaction_count_by_number("#{@test_proxy_transaction_tag}")
{:ok, "#{@test_proxy_block_transaction_count}"}
"""
def eth_get_block_transaction_count_by_number(tag, network \\ :default)
def eth_get_block_transaction_count_by_number(tag, network) when is_binary(tag) do
"proxy"
|> get("eth_getBlockTransactionCountByNumber", %{tag: tag}, network)
|> parse()
|> hex_to_number()
|> wrap(:ok)
end
def eth_get_block_transaction_count_by_number(_, _), do: @error_invalid_tag
@doc """
Returns the information about a transaction requested by transaction hash.
## Example
iex> transaction_hash = "#{@test_proxy_transaction_hash}"
iex> Etherscan.eth_get_transaction_by_hash(transaction_hash)
{:ok, %Etherscan.ProxyTransaction{}}
"""
def eth_get_transaction_by_hash(transaction_hash, network \\ :default)
def eth_get_transaction_by_hash(transaction_hash, network) when is_binary(transaction_hash) do
"proxy"
|> get("eth_getTransactionByHash", %{txhash: transaction_hash}, network)
|> parse(as: %{"result" => %ProxyTransaction{}})
|> wrap(:ok)
end
def eth_get_transaction_by_hash(_, _), do: @error_invalid_transaction_hash
@doc """
Returns information about a transaction by block number and transaction
index position.
## Example
iex> Etherscan.eth_get_transaction_by_block_number_and_index("#{@test_proxy_block_tag}", "#{
@test_proxy_index
}")
{:ok, %Etherscan.ProxyTransaction{}}
"""
def eth_get_transaction_by_block_number_and_index(tag, index, network \\ :default)
def eth_get_transaction_by_block_number_and_index(tag, index, network)
when is_binary(tag) and is_binary(index) do
"proxy"
|> get("eth_getTransactionByBlockNumberAndIndex", %{tag: tag, index: index}, network)
|> parse(as: %{"result" => %ProxyTransaction{}})
|> wrap(:ok)
end
def eth_get_transaction_by_block_number_and_index(tag, index, network)
when not is_binary(tag) and is_binary(index),
do: @error_invalid_tag
def eth_get_transaction_by_block_number_and_index(tag, index, network)
when not is_binary(index) and is_binary(tag),
do: @error_invalid_index
def eth_get_transaction_by_block_number_and_index(_, _, _), do: @error_invalid_tag_and_index
@doc """
Returns the number of transactions sent from an address.
## Example
iex> Etherscan.eth_get_transaction_count("#{@test_proxy_address}")
{:ok, #{@test_proxy_transaction_count}}
"""
def eth_get_transaction_count(address, network \\ :default)
def eth_get_transaction_count(address, network) when is_binary(address) do
"proxy"
|> get("eth_getTransactionCount", %{address: address, tag: "latest"}, network)
|> parse()
|> hex_to_number()
|> wrap(:ok)
end
def eth_get_transaction_count(_, _), do: @error_invalid_address
@doc """
Creates new message call transaction or a contract creation for
signed transactions.
Replace the hex value with your raw hex encoded transaction that you want
to send.
## Example
iex> Etherscan.eth_send_raw_transaction("#{@test_proxy_hex}")
{:ok, <TODO>}
"""
def eth_send_raw_transaction(hex, network \\ :default)
def eth_send_raw_transaction(hex, network) when is_binary(hex) do
"proxy"
|> get("eth_sendRawTransaction", %{hex: hex}, network)
|> parse()
|> wrap(:ok)
end
def eth_send_raw_transaction(_, _), do: @error_invalid_hex
@doc """
Returns the receipt of a transaction by transaction hash.
## Example
iex> transaction_hash = "#{@test_proxy_transaction_hash}"
iex> Etherscan.eth_get_transaction_receipt(transaction_hash)
{:ok, %Etherscan.ProxyTransactionReceipt{}}
"""
def eth_get_transaction_receipt(transaction_hash, network \\ :default)
def eth_get_transaction_receipt(transaction_hash, network) when is_binary(transaction_hash) do
"proxy"
|> get("eth_getTransactionReceipt", %{txhash: transaction_hash}, network)
|> parse(as: %{"result" => %ProxyTransactionReceipt{}})
|> wrap(:ok)
end
def eth_get_transaction_receipt(_, _), do: @error_invalid_transaction_hash
@doc """
Executes a new message call immediately without creating a transaction on
the block chain.
## Example
iex> Etherscan.eth_call("#{@test_proxy_to}", "#{@test_proxy_data}")
{:ok, "#{@test_proxy_eth_call_result}"}
"""
def eth_call(to, data, network \\ :default)
def eth_call(to, data, network) when is_binary(to) and is_binary(data) do
"proxy"
|> get("eth_call", %{to: to, data: data, tag: "latest"}, network)
|> parse()
|> wrap(:ok)
end
def eth_call(to, data, network) when not is_binary(to) and is_binary(data), do: @error_invalid_to
def eth_call(to, data, network) when not is_binary(data) and is_binary(to), do: @error_invalid_data
def eth_call(_, _, _), do: @error_invalid_to_and_data
@doc """
Returns code at a given address.
## Example
iex> Etherscan.eth_get_code("#{@test_proxy_code_address}", "latest")
{:ok, "#{@test_proxy_code_result}"}
"""
def eth_get_code(address, tag, network \\ :default)
def eth_get_code(address, tag, network) when is_binary(address) and is_binary(tag) do
"proxy"
|> get("eth_getCode", %{address: address, tag: tag}, network)
|> parse()
|> wrap(:ok)
end
def eth_get_code(address, tag, network) when not is_binary(address) and is_binary(tag),
do: @error_invalid_address
def eth_get_code(address, tag, network) when not is_binary(tag) and is_binary(address),
do: @error_invalid_tag
def eth_get_code(_, _, _), do: @error_invalid_address_and_tag
@doc """
Returns the value from a storage position at a given address.
## Example
iex> Etherscan.eth_get_storage_at("#{@test_proxy_storage_address}", "#{
@test_proxy_storage_position
}")
{:ok, "#{@test_proxy_storage_result}"}
"""
def eth_get_storage_at(address, position, network \\ :default)
def eth_get_storage_at(address, position, network) when is_binary(address) and is_binary(position) do
"proxy"
|> get("eth_getStorageAt", %{address: address, position: position, tag: "latest"}, network)
|> parse()
|> wrap(:ok)
end
def eth_get_storage_at(address, position, network) when not is_binary(address) and is_binary(position),
do: @error_invalid_address
def eth_get_storage_at(address, position, network) when not is_binary(position) and is_binary(address),
do: @error_invalid_position
def eth_get_storage_at(_, _, _), do: @error_invalid_address_and_position
@doc """
Returns the current price per gas in wei.
## Example
iex> Etherscan.eth_gas_price()
{:ok, "#{@test_proxy_current_gas}"}
"""
def eth_gas_price(network \\ :default) do
"proxy"
|> get("eth_gasPrice", %{}, network)
|> parse()
|> hex_to_number()
|> wrap(:ok)
end
@doc """
Makes a call or transaction, which won't be added to the blockchain and
returns the used gas, which can be used for estimating the used gas.
## Example
iex> params = %{
to: "#{@test_proxy_estimate_to}",
value: "#{@test_proxy_value}",
gasPrice: "#{@test_proxy_gas_price}",
gas: "#{@test_proxy_gas}",
}
iex> Etherscan.eth_estimate_gas(params)
{:ok, <TODO>}
"""
def eth_estimate_gas(params, network \\ :default)
def eth_estimate_gas(%{to: _, value: _, gasPrice: _, gas: _} = params, network) when is_map(params) do
params = merge_params(params, @eth_estimate_gas_default_params)
"proxy"
|> get("eth_estimateGas", params, network)
|> parse()
|> wrap(:ok)
end
def eth_estimate_gas(_, _), do: @error_invalid_params
end
|
lib/etherscan/api/proxy.ex
| 0.84792 | 0.440229 |
proxy.ex
|
starcoder
|
defmodule Mix.Tasks.Elasticsearch.Build do
@moduledoc """
Builds Elasticsearch indexes using a zero-downtime, hot-swap technique.
1. Build an index for the given `alias`, with a timestamp: `alias-12323123`
2. Bulk upload data to that index using `store` and `sources`.
3. Alias the `alias` to `alias-12323123`.
4. Remove old indexes beginning with `alias`.
5. Refresh `alias-12323123`.
For a functional version of this approach, see
`Elasticsearch.Index.hot_swap/4`.
## Example
$ mix elasticsearch.build posts [index2] [index3]
To build an index only if it does not exist, use the `--existing` option:
$ mix elasticsearch.build posts --existing
Index posts already exists.
"""
require Logger
alias Elasticsearch.{
Index,
Config
}
@doc false
def run(args) do
Mix.Task.run("app.start", [])
{indexes, type} = parse_args!(args)
for alias <- indexes do
config = Config.config_for_index(alias)
build(alias, config, type)
end
end
defp build(alias, config, :existing) do
case Index.latest_starting_with(alias) do
{:ok, name} ->
IO.puts("Index already exists: #{name}")
{:error, :not_found} ->
build(alias, config, :rebuild)
{:error, exception} ->
Mix.raise(exception)
end
end
defp build(alias, %{settings: settings, store: store, sources: sources}, :rebuild) do
with :ok <- Index.hot_swap(alias, settings, store, sources) do
:ok
else
{:error, errors} when is_list(errors) ->
errors = for error <- errors, do: "#{inspect(error)}\n"
Mix.raise("""
Index created, but not aliased: #{alias}
The following errors occurred:
#{errors}
""")
{:error, :enoent} ->
Mix.raise("""
Schema file not found at #{settings}.
""")
{:error, exception} ->
Mix.raise("""
Index #{alias} could not be created.
#{inspect(exception)}
""")
error ->
Mix.raise(error)
end
end
defp parse_args!(args) do
{options, indexes} =
OptionParser.parse!(
args,
switches: [
existing: :boolean
]
)
indexes =
indexes
|> Enum.map(&String.to_atom/1)
|> MapSet.new()
type =
cond do
options[:existing] ->
:existing
true ->
:rebuild
end
validate_indexes!(indexes)
{indexes, type}
end
defp validate_indexes!(indexes) do
configured = configured_names()
cond do
MapSet.size(indexes) == 0 ->
Mix.raise("""
No indexes specified. The following indexes are configured:
#{inspect(Enum.to_list(configured))}
""")
MapSet.subset?(indexes, configured) == false ->
Mix.raise("""
The following indexes are not configured:
#{inspect(Enum.to_list(MapSet.difference(indexes, configured)))}
""")
true ->
:ok
end
end
defp configured_names do
config()
|> Keyword.get(:indexes)
|> Enum.map(fn {key, _val} -> key end)
|> MapSet.new()
end
defp config do
Application.get_all_env(:elasticsearch)
end
end
|
lib/mix/elasticsearch.build.ex
| 0.904514 | 0.613873 |
elasticsearch.build.ex
|
starcoder
|
defmodule DeliriumTremex do
@moduledoc """
DeliriumTremex is a library for standardized
[GraphQL](http://graphql.org/)
error handling through
[Absinthe](https://hex.pm/packages/absinthe).
## Idea
All errors should be returned it the `errors` field.
Errors have the following format:
```JSON
{
"key": "username",
"message": "Username is already taken",
"messages": ["is already taken", "is too short"],
"fullMessages": ["Username is already taken", "Username is too short"],
"index": null,
"subErrors": null
}
```
Field explantion:
* `key` - The key the error is attached to
* `message` - A single error message associated to the key
* `messages` - List of all the non-formatted error messages associated with the key
* `fullMessages` - List of all the formatted error messages associated with the key
* `index` - If the error is a nested error, specifies the index in the array at which the error occured
* `subErrors` - Contains all sub-errors of the key
Sub-errors are errors that occur in nested data. E.g. let's say that you are
creating an article together with an array of comments. If any of those
comments fail validation their errors would be returned in the `subErrors`
array.
E.g.: If the second comment's content is too short.
```JSON
{
"key": "comments",
"message": "Error validating all comments",
"messages": null,
"fullMessages": null,
"index": null,
"subErrors": [
{
"key": "content",
"message": "Content is too short",
"messages": ["is too short"],
"fullMessages": ["Content is too short"],
"index": 1,
"subErrors": null
}
]
}
```
Note that sub-errors can also have sub-errors which allows for basically
infinite nesting. This should satisfy most use cases.
## Integrations
Currently `DeliriumTremex` integrates with:
* [Ecto](https://github.com/elixir-ecto/ecto) - Automatically formats validation errors if passed a changeset.
## Installation
Add the following to your `mix.exs` file:
```Elixir
defp deps do
[
{:absinthe, "~> 1.4"},
{:delirium_tremex, "~> 0.1.0"}
]
end
```
If you have other dependencies just append the contents of the list above to
your dependencies.
## Usage
In your GraphQL/Absinthe schema add the following middleware to the
queries/mutations for which you want the errors to be formatted.
e.g.
```Elixir
alias DeliriumTremex.Middleware.HandleErrors
query do
field :current_account, type: :account do
resolve &AccountResolver.current_account/2
middleware HandleErrors # <-- This line adds the error handeling
end
end
mutation do
field :register, type: :account do
arg :username, :string
arg :password, :string
resolve &AccountResolver.register/2
middleware HandleErrors # <-- This line adds the error handeling
end
end
```
## Contribution
For suggestions, fixes or questions, please feel free to open an issue.
Pull requests are always welcome.
## License
This project is licensed under the GPLv3. It comes with absolutely no
warranty. [The license is available in this repository.](/LICENSE.txt)
"""
end
|
lib/delirium_tremex.ex
| 0.8488 | 0.908496 |
delirium_tremex.ex
|
starcoder
|
defmodule Mariaex.Coder do
@moduledoc """
Declarative generator for MySQL protocol messages, which can generate based on declarative description
decoder and encoder.
Example:
defcoder :text_cmd do
command 1
statement :string_eof
end
Will generate 2 functions:
__encode__({:text_cmd, 0x0e, "test"}) # => <<14, 116, 101, 115, 116>>
__decode__(:text_cmd, <<14, 116, 101, 115, 116>>) # => {:text_cmd, 14, "test"}
Additionally it generates record, like `Record.record(:text_cmd, [:command, :statement])`,
so that you can use it to create commands or access information in it.
Example would be: `text_cmd(command: 14, statement: "test")`
Check `Mariaex.Messages` for more examples.
For now, there is possible to insert custom functions for decoding of data. Example is in handshake
command:
See definition and implementation:
`auth_plugin_data2: {__MODULE__, auth_plugin_data2}`
It is used only for decoding, but it may change in the future for encoding.
"""
defmacro __using__(_opts) do
quote do
import Mariaex.Coder, only: [defcoder: 2]
import Record, only: [defrecord: 2]
import Mariaex.Coder.Utils
@before_compile unquote(__MODULE__)
Module.register_attribute(__MODULE__, :decoders, accumulate: true)
Module.register_attribute(__MODULE__, :encoders, accumulate: true)
end
end
defmacro __before_compile__(env) do
decoders = Enum.reverse Module.get_attribute(env.module, :decoders)
encoders = Enum.reverse Module.get_attribute(env.module, :encoders)
[for {type, function} <- decoders do
quote do
def __decode__(unquote(type), body), do: unquote(function)(body)
end
end,
for {type, function} <- encoders do
quote do
def __encode__(unquote(type)() = rec), do: unquote(function)(rec)
end
end]
end
defmacro defcoder(name, [do: spec]) do
spec = case spec do
{:__block__, _meta, spec} -> spec
spec -> [spec]
end
keys = for {key, _, _} <- spec, key != :_, do: key
decoder = split_to_stages(spec) |> gen_stages(name, keys)
encoder = gen_encoder(name, spec, keys)
quote do
defrecord unquote(name), unquote(keys)
unquote(decoder)
unquote(encoder)
end
end
def gen_encoder(name, spec, keys) do
function = ("encode_" <> Atom.to_string(name)) |> String.to_atom
quote do
Module.put_attribute __MODULE__, :encoders, {unquote(name), unquote(function)}
def unquote(function)(unquote(name)(unquote(for key <- keys, do: {key, Macro.var(key, nil)}))) do
unquote({:<<>>, [], Enum.flat_map(spec, &match(&1, :encode))})
end
end
end
@empty_stage %{head: [], body: nil}
defp split_to_stages(spec) do
{last, other} = Enum.reduce(spec, {@empty_stage, []}, fn(kv = {_key, _, [value | _]}, {actual = %{head: head, body: _body}, all}) ->
cond do
is_integer(value) ->
{%{actual | head: [kv | head]}, all}
true ->
{@empty_stage, [%{actual | head: Enum.reverse([:next | head]), body: kv} | all]}
end
end)
case last do
%{head: [], body: nil} -> other
_ -> [%{last | head: Enum.reverse(last.head) } | other]
end |> Enum.reverse
end
defp gen_stages(allspec, name, keys) do
matches = gen_matches(allspec, keys)
function = ("decode_" <> Atom.to_string(name)) |> String.to_atom
quote do
Module.put_attribute __MODULE__, :decoders, {unquote(name), unquote(function)}
def unquote(function)(next) do
unquote_splicing(matches)
unquote(name)(unquote(for key <- keys, do: {key, Macro.var(key, nil)}))
end
end
end
defp gen_matches(allspec, keys) do
for spec <- allspec do
body = gen_body(spec[:body], keys)
quoted_head = case spec[:head] do
[:next] ->
[]
head ->
binary_match = {:<<>>, [], Enum.flat_map(head, &match(&1, :decode))}
[(quote do: unquote(binary_match) = next)]
end
quoted_head ++ [body]
end |> List.flatten
end
defp gen_body({key, _, [:length_string]}, _) do
quote do
<<length :: size(8)-little, unquote(Macro.var(key, nil)) :: size(length)-binary, next :: binary>> = next
end
end
defp gen_body({key, _, [{module, function}]}, _) do
quote do: {unquote(Macro.var(key, nil)), next} = apply(unquote(module), unquote(function), [next])
end
defp gen_body({key, _, [:length_encoded_integer]}, _) do
quote do: {unquote(Macro.var(key, nil)), next} = length_encoded_integer(next)
end
defp gen_body({key, _, [:length_encoded_string]}, _) do
quote do: {unquote(Macro.var(key, nil)), next} = length_encoded_string(next)
end
defp gen_body({key, _, [:length_encoded_string, :until_eof]}, _) do
quote do: unquote(Macro.var(key, nil)) = length_encoded_string_eof(next)
end
defp gen_body({key, _, [:string]}, _) do
quote do: [unquote(Macro.var(key, nil)), next] = :binary.split(next, <<0>>)
end
defp gen_body({key, _, [:string_eof]}, _) do
quote do: unquote(Macro.var(key, nil)) = next
end
defp gen_body({key, _, [function]}, _keys) do
quote do
size = unquote(function) * 8
<<unquote(Macro.var(key, nil)) :: size(size), next :: binary>> = next
end
end
defp gen_body({key, _, [function, :string]}, _keys) do
quote do
size = unquote(function)
<<unquote(Macro.var(key, nil)) :: size(size)-binary, next :: binary>> = next
end
end
defp gen_body(nil, _keys) do
[]
end
defp match({:_, _, [length]}, _) when is_integer(length) do
[quote do: 0 :: unquote(length)*8]
end
defp match({key, _, [length]}, _) when is_integer(length) do
[quote do: unquote(Macro.var(key, nil)) :: size(unquote(length*8))-little]
end
defp match({key, _, [length, :string]}, _) do
[quote do: unquote(Macro.var(key, nil)) :: size(unquote(length))-binary]
end
defp match(:next, _) do
[quote do: next :: binary]
end
defp match({key, _, [:string]}, _) do
[(quote do: unquote(Macro.var(key, nil)) :: binary),
(quote do: 0 :: 8)]
end
defp match({key, _, [:length_string]}, :encode) do
[(quote do: byte_size(unquote(Macro.var(key, nil))) :: 8 ),
(quote do: unquote(Macro.var(key, nil)) :: binary)]
end
defp match({key, _, [:string_eof]}, :encode) do
[(quote do: unquote(Macro.var(key, nil)) :: binary)]
end
# this clauses are wrong, because it is imposible to generate this kind of integer in a binary match
defp match({key, _, [:length_encoded_integer]}, :encode) do
[(quote do: unquote(Macro.var(key, nil)) :: integer)]
end
defp match({key, _, [:length_encoded_string | _]}, :encode) do
[(quote do: unquote(Macro.var(key, nil)) :: binary)]
end
# All custom implementations are ignored yet
defp match({key, _, [{_module, _function}]}, :encode) do
[(quote do: unquote(Macro.var(key, nil)) :: binary)]
end
defmodule Utils do
def length_encoded_string(bin) do
{length, next} = length_encoded_integer(bin)
<< string :: size(length)-binary, next :: binary >> = next
{string, next}
end
def length_encoded_string_eof(bin, acc \\ []) do
case length_encoded_string(bin) do
{value, ""} ->
Enum.reverse([value | acc])
{value, rest} ->
length_encoded_string_eof(rest, [value | acc])
end
end
def length_encoded_integer(bin) do
case bin do
<< value :: 8, rest :: binary >> when value <= 250 -> {value, rest}
<< 252 :: 8, value :: 16-little, rest :: bits >> -> {value, rest}
<< 253 :: 8, value :: 24-little, rest :: bits >> -> {value, rest}
<< 254 :: 8, value :: 64-little, rest :: bits >> -> {value, rest}
end
end
def to_length_encoded_integer(int) do
case int do
int when int <= 250 -> << int :: 8 >>
int when int <= 65535 -> << 252 :: 8, int :: 16-little >>
int when int <= 16777215 -> << 253 :: 8, int :: 24-little >>
int -> << 254 :: 8, int :: 64-little >>
end
end
end
end
|
lib/mariaex/coder.ex
| 0.73173 | 0.40204 |
coder.ex
|
starcoder
|
defmodule ExAliyun.MNS do
@moduledoc """
The official link to introduce [Alibaba Cloud Message Service](https://www.alibabacloud.com/help/doc-detail/27414.htm){:target="_blank"}.
## Queue APIs
* `batch_send_message/2`
* `batch_delete_message/3`
* `change_message_visibility/4`
* `create_queue/2`
* `delete_message/3`
* `delete_queue/2`
* `get_queue_attributes/2`
* `list_queues/1`
* `peek_message/2`
* `send_message/3`
* `set_queue_attributes/2`
* `receive_message/2`
## Topic APIs
* `create_topic/2`
* `delete_topic/1`
* `get_subscription_attributes/3`
* `get_topic_attributes/1`
* `list_subscriptions/2`
* `list_topics/1`
* `publish_topic_message/3`
* `set_subscription_attributes/4`
* `set_topic_attributes/2`
* `subscribe/4`
* `unsubscribe/3`
"""
alias ExAliyun.MNS.{Topic, Queue, Client, Config}
defmodule Operation do
@moduledoc false
defstruct [
:params,
:action,
:headers
]
end
@type result :: {:ok, map()} | {:error, map()} | {:error, term()}
@doc """
Send HTTP request, NO need to directly call this function by default.
The following options all are optional, but they are requeired to identify request's authorization in
every operation request, we can set them as a global config:
```elixir
config :ex_aliyun_mns,
access_key_id: "",
access_key_secret: "",
host: ""
```
Or set these option(s) via `config_overrides` option to dynamically set/override in each operation request.
## Config options
* `access_key_id`, optional, the access key id of Alibaba Cloud RAM for MNS;
* `access_key_secret`, optional, the access key secret of Alibaba Cloud RAM for MNS;
* `host`, optional, the MNS's regions to request, the available regions can be found in MNS's console,
e.g. "https://xxxx.mns.us-east-1.aliyuncs.com".
## Http options
* `timeout`, optional, time in milliseconds, used when receiving data over a connection, default it `15_000`.
"""
@spec request(operation :: Operation.t(), config_overrides :: Keyword.t()) :: result
def request(operation, config_overrides \\ [], http_opts \\ []) do
Client.request(operation, Config.new(config_overrides), http_opts)
end
@doc """
Create a new message queue, the message queue name should be no more than 256 characters, and constituted by letters, digits, or hyphens (-), while the first character must be a letter.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/35129.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:delay_seconds`, optional, message sent to the queue can be consumed after `delay_seconds` seconds, the valid value range in 0..604800 (7 days), by default is 0 second;
* `:maximum_message_size`, optional, maximum body length of a message sent to the queue, measured in bytes, by default is 65536 (64 KB);
* `:message_retention_period`, optional, maximum lifetime of the message in the queue, measured in seconds,
the valid value range in 60..604800 seconds, by default is 259200 (3 days);
* `:visibility_timeout`, optional, the valid value range in 1..43200 seconds (12 hours), by default is 30 seconds;
* `:polling_wait_seconds`, optional, the valid value range in 0..30 seconds, by default is 0 second;
* `:logging_enabled`, optional, whether to enable MNS server logging, by default is false.
"""
@spec create_queue(queue_name :: String.t(), opts :: Keyword.t()) :: result
def create_queue(queue_name, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Queue.create(queue_name, opts) |> request(config_overrides)
end
@doc """
Modify attributes of a message queue.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/35130.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:delay_seconds`, optional, message sent to the queue can be consumed after `delay_seconds` seconds, the valid value range in 0..604800 (7 days), by default is 0 second;
* `:maximum_message_size`, optional, maximum body length of a message sent to the queue, measured in bytes,
by default is 65536 (64 KB);
* `:message_retention_period`, optional, maximum lifetime of the message in the queue, measured in seconds,
the valid value range in 60..604800 seconds, by default is 259200 (3 days);
* `:visibility_timeout`, optional, the valid value range in 1..43200 seconds (12 hours), by default is 30 seconds;
* `:polling_wait_seconds`, optional, the valid value range in 0..30 seconds, by default is 0 second;
* `:logging_enabled`, optional, whether to enable MNS server logging, by default is false.
"""
@spec set_queue_attributes(queue_url :: String.t(), opts :: Keyword.t()) :: result
def set_queue_attributes(queue_url, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Queue.set_queue_attributes(queue_url, opts) |> request(config_overrides)
end
@doc """
Get the attributes of a message queue.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/35131.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec get_queue_attributes(queue_url :: String.t(), opts :: Keyword.t()) :: result
def get_queue_attributes(queue_url, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Queue.get_queue_attributes(queue_url) |> request(config_overrides)
end
@doc """
List the available message queues.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/35133.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:queue_name_prefix`, optional, search for the queue name starting with this prefix;
* `:number`, optional, maximum number of results returned for a single request, the valid value range in 1..1000, by default is 1000;
* `:marker`, optional, a similar pagination cursor when list a large queues list, which is acquired from the `NextMarker` returned in the previous request.
"""
@spec list_queues(opts :: Keyword.t()) :: result
def list_queues(opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Queue.list_queues(opts) |> request(config_overrides)
end
@doc """
Delete an existed message queue.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/35132.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec delete_queue(queue_url :: String.t(), opts :: Keyword.t()) :: result
def delete_queue(queue_url, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Queue.delete(queue_url) |> request(config_overrides)
end
@doc """
Sand a message to MNS Queue.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/35134.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:delay_seconds`, optional, message sent to the queue can be consumed after `delay_seconds` seconds, the valid value range in 0..604800 (7 days), by default is 0 second;
* `:priority`
"""
@spec send_message(queue_url :: String.t(), message_body :: String.t(), opts :: Keyword.t()) ::
result
def send_message(queue_url, message_body, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Queue.send_message(queue_url, message_body, opts) |> request(config_overrides)
end
@type mns_batch_message ::
String.t()
| [
{:message_body, String.t()},
{:delay_seconds, 0..604_800},
{:priority, 1..16}
]
@doc """
Send up to 16 messages to a MNS Queue in a single request.
[Alibaba Cloud API Docs](https://help.aliyun.com/document_detail/35135.html){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec batch_send_message(queue_url :: String.t(), messages :: [mns_batch_message]) :: result
def batch_send_message(queue_url, messages, opts \\ []) when is_list(messages) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Queue.batch_send_message(queue_url, messages) |> request(config_overrides)
end
@doc """
Delete a message from a MNS Queue.
[Alibaba Cloud API Docs](https://help.aliyun.com/document_detail/35138.html){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec delete_message(queue_url :: String.t(), receipt_handle :: String.t(), opts :: Keyword.t()) ::
result
def delete_message(queue_url, receipt_handle, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Queue.delete_message(queue_url, receipt_handle) |> request(config_overrides)
end
@doc """
Delete a list of messages from a MNS Queue in a single request.
[Alibaba Cloud API Docs](https://help.aliyun.com/document_detail/35139.html){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec batch_delete_message(
queue_url :: String.t(),
receipt_handles :: [String.t()],
opts :: Keyword.t()
) :: result
def batch_delete_message(queue_url, receipt_handles, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Queue.batch_delete_message(queue_url, receipt_handles) |> request(config_overrides)
end
@doc """
Read message(s) from a MNS Queue.
[Alibaba Cloud API Docs](https://help.aliyun.com/document_detail/35136.html){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:wait_time_seconds`, optional, the maximum wait time for polling message in current request, settable value range is 0..30 (seconds),
if not set this option will use Queue's `polling_wait_seconds` attribute (see `create_queue/2`) as default.
* `:number`, optional, receive up to 16 messages ([doc](https://help.aliyun.com/document_detail/35137.html)) from a MNS Queue in a single request, by default as 1.
"""
@spec receive_message(queue_url :: String.t(), opts :: Keyword.t()) :: result
def receive_message(queue_url, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Queue.receive_message(queue_url, opts) |> request(config_overrides)
end
@doc """
View message(s) from a MNS Queue but do not change message(s) status.
[Alibaba Cloud API Docs](https://help.aliyun.com/document_detail/35140.html){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:number`, optional, maximum number of messages can be viewed for the current operation ([see BatchPeekMessage doc](https://www.alibabacloud.com/help/doc-detail/35141.htm)), the default number is 1, the maximum number is 16.
"""
@spec peek_message(queue_url :: String.t(), opts :: Keyword.t()) :: result
def peek_message(queue_url, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Queue.peek_message(queue_url, opts) |> request(config_overrides)
end
@doc """
Modify the next consumable time of a message which has been consumed and is still in `inactive` status. After `VisibilityTimeout` of the message is modified successfully, a new ReceiptHandle will be returned.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/35142.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec change_message_visibility(
queue_url :: String.t(),
receipt_handle :: String.t(),
visibility_timeout :: integer(),
opts :: Keyword.t()
) :: result
def change_message_visibility(queue_url, receipt_handle, visibility_timeout, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Queue.change_message_visibility(queue_url, receipt_handle, visibility_timeout)
|> request(config_overrides)
end
@doc """
Create a new topic, a topic name is a string of no more than 256 characters, including letters, numbers, and hyphens (-). It must start with a letter or number.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/27495.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:maximum_message_size`, optional, maximum body length of a message sent to the queue, measured in bytes, by default is 65536 (64 KB);
* `:logging_enabled`, optional, whether to enable MNS server logging, by default is false.
"""
@spec create_topic(topic_name :: String.t(), opts :: Keyword.t()) :: result
def create_topic(topic_name, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Topic.create(topic_name, opts) |> request(config_overrides)
end
@doc """
Modify the attributes of an existing topic.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/140704.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:maximum_message_size`, optional, maximum body length of a message sent to the queue, measured in bytes, by default is 65536 (64 KB);
* `:logging_enabled`, optional, whether to enable MNS server logging, by default is false.
"""
@spec set_topic_attributes(topic_url :: String.t(), opts :: Keyword.t()) :: result
def set_topic_attributes(topic_url, opts) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Topic.set_topic_attributes(topic_url, opts) |> request(config_overrides)
end
@doc """
Get the attributes of an existing topic.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/140711.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec get_topic_attributes(topic_url :: String.t()) :: result
def get_topic_attributes(topic_url, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Topic.get_topic_attributes(topic_url) |> request(config_overrides)
end
@doc """
Delete an existing topic.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/140713.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec delete_topic(topic_url :: String.t()) :: result
def delete_topic(topic_url, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Topic.delete(topic_url) |> request(config_overrides)
end
@doc """
List the topics of an account.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/140714.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:topic_name_prefix`, optional, search for the topic name starting with this prefix;
* `:number`, optional, maximum number of results returned for a single request, the valid value range in 1..1000, by default is 1000;
* `:marker`, optional, a similar pagination cursor when list a large topics list, which is acquired from the `NextMarker` returned in the previous request.
"""
@spec list_topics(opts :: Keyword.t()) :: result
def list_topics(opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Topic.list_topics(opts) |> request(config_overrides)
end
@doc """
Create a subscription to a topic.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/27496.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:filter_tag`, optional, a string no more than 16 characters, there is no message filter set by default;
* `:notify_strategy`, optional, `"BACKOFF_RETRY"` or `"EXPONENTIAL_DECAY_RETRY"`, as `"BACKOFF_RETRY"` by default;
* `:notify_content_format`, optional, `"XML"`, `"JSON"`, or `"SIMPLIFIED"`, as `"XML"` by default
"""
@spec subscribe(
topic_url :: String.t(),
subscription_name :: String.t(),
endpoint :: String.t(),
opts :: Keyword.t()
) :: result
def subscribe(topic_url, subscription_name, endpoint, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Topic.subscribe(topic_url, subscription_name, endpoint, opts) |> request(config_overrides)
end
@doc """
Modify `notify_strategy` of subscription attribute, the value of `notify_strategy`
can be set as `"BACKOFF_RETRY"` or `"EXPONENTIAL_DECAY_RETRY"`.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/140719.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec set_subscription_attributes(
topic_url :: String.t(),
subscription_name :: String.t(),
notify_strategy :: String.t(),
opts :: Keyword.t()
) :: result
def set_subscription_attributes(topic_url, subscription_name, notify_strategy, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
topic_url
|> Topic.set_subscription_attributes(subscription_name, notify_strategy)
|> request(config_overrides)
end
@doc """
Get subscription attributes.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/140720.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec get_subscription_attributes(
topic_url :: String.t(),
subscription_name :: String.t(),
opts :: Keyword.t()
) :: result
def get_subscription_attributes(topic_url, subscription_name, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
topic_url
|> Topic.get_subscription_attributes(subscription_name)
|> request(config_overrides)
end
@doc """
Cancel a subscription.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/140721.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details.
"""
@spec unsubscribe(topic_url :: String.t(), subscription_name :: String.t(), opts :: Keyword.t()) ::
result
def unsubscribe(topic_url, subscription_name, opts \\ []) do
config_overrides = Keyword.get(opts, :config_overrides, [])
Topic.unsubscribe(topic_url, subscription_name) |> request(config_overrides)
end
@doc """
List the subscriptions to a topic, support pagination query.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/140718.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:subscription_name_prefix`, optional, search for the subscription name starting with this prefix;
* `:number`, optional, maximum number of results returned for a single request, the valid value range in 1..1000, by default is 1000;
* `:marker`, optional, a similar pagination cursor when list a large subscriptions list, which is acquired from the `NextMarker` returned in the previous request.
"""
@spec list_subscriptions(topic_url :: String.t(), opts :: Keyword.t()) :: result
def list_subscriptions(topic_url, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Topic.list_subscriptions(topic_url, opts) |> request(config_overrides)
end
@doc """
Publish a message to a specified topic, the message is pushed to endpoints for consumption.
[Alibaba Cloud API Docs](https://www.alibabacloud.com/help/doc-detail/27497.htm){:target="_blank"}
## Options
* `:config_overrides`, optional, the options in `config_overrides`, please see `request/2` for details;
* `:message_tag`, optional, a string no more than 16 characters, there is no message tag set by default;
* `:message_attributes`, optional, a string of message attributes, only be useable for email or SMS push, please see API documents for details.
"""
@spec publish_topic_message(
topic_url :: String.t(),
message_body :: String.t(),
opts :: Keyword.t()
) :: result
def publish_topic_message(topic_url, message_body, opts \\ []) do
{config_overrides, opts} = Keyword.pop(opts, :config_overrides, [])
Topic.publish_message(topic_url, message_body, opts) |> request(config_overrides)
end
@doc false
def format_opts_to_headers(opts) do
Enum.reduce(opts, [], fn {key, value}, acc ->
header = format_header(key, value)
if header != nil, do: [header | acc], else: acc
end)
end
@doc false
defp format_header(:topic_name_prefix, value) do
{"x-mns-prefix", "#{value}"}
end
defp format_header(:queue_name_prefix, value) do
{"x-mns-prefix", "#{value}"}
end
defp format_header(:subscription_name_prefix, value) do
{"x-mns-prefix", "#{value}"}
end
defp format_header(:number, value) do
{"x-mns-ret-number", "#{value}"}
end
defp format_header(:marker, value) do
{"x-mns-marker", value}
end
defp format_header(_key, _value) do
nil
end
end
|
lib/ex_aliyun/mns.ex
| 0.854399 | 0.723138 |
mns.ex
|
starcoder
|
defmodule Support.Setup do
@moduledoc """
Functions that are designed to be used as chained `setup` callbacks.
Each callback takes certain values from the `context` state and merges in new or updated values.
Because they only get supplied the `context` as a parameter, each function has a contract around
the keys that are used.
## Categories
* `insert` functions create database records
* `request` functions navigate to web pages
## Examples
Inserts a user, creates a tweak for that user, and then stars that tweak for that user:
```
setup [:insert_user, :insert_tweak, :insert_star]
```
"""
import Phoenix.ConnTest
import AtomTweaks.Factory
alias AtomTweaks.Accounts
alias AtomTweaks.Tweaks
alias AtomTweaksWeb.Router.Helpers, as: Routes
alias Plug.Test, as: PlugTest
@endpoint AtomTweaksWeb.Endpoint
@doc """
Inserts a release note.
## Outputs
* `:note` -- Release note that was inserted into the database
"""
def insert_release_note(context)
def insert_release_note(_context) do
note = insert(:note)
{:ok, note: note}
end
@doc """
Inserts a site admin user into the database.
## Outputs
* `:user` - Site admin user record
"""
def insert_site_admin(context)
def insert_site_admin(_context) do
user = insert(:user, site_admin: true)
{:ok, user: user}
end
@doc """
Stars a tweak by a user.
## Inputs
* `:user` - User to star the tweak
* `:tweak` - Tweak to be starred
## Outputs
* `:star` - Star record
"""
def insert_star(context)
def insert_star(%{user: user, tweak: tweak}) do
{:ok, star} = Accounts.star_tweak(user, tweak)
{:ok, star: star}
end
@doc """
Inserts a single tweak for a user.
## Inputs
Inserts a tweak for the first of:
1. `:current_user`
1. `:user`
1. Inserts a new user
## Outputs
* `:tweak` - New tweak record
* `:user` - User record that created the tweak
"""
def insert_tweak(context)
def insert_tweak(%{current_user: user}) do
tweak = insert(:tweak, user: user)
{:ok, tweak: tweak}
end
def insert_tweak(%{user: user}) do
tweak = insert(:tweak, user: user)
{:ok, tweak: tweak}
end
def insert_tweak(_context) do
user = insert(:user)
tweak = insert(:tweak, user: user)
{:ok, tweak: tweak, user: user}
end
def insert_tokens(context)
def insert_tokens(%{user: user}) do
tokens = insert_list(3, :token, user: user)
{:ok, tokens: tokens}
end
@doc """
Inserts a user into the database.
## Outputs
* `:user` - New user record
"""
def insert_user(context)
def insert_user(_context) do
user = insert(:user)
{:ok, user: user}
end
@doc """
Inserts a user with three tweaks into the database.
## Outputs
* `:user` - New user record
* `:tweaks` - New tweak records
"""
def insert_user_with_tweaks(context)
def insert_user_with_tweaks(_context) do
user = insert(:user)
tweaks = insert_list(3, :tweak, user: user)
{:ok, user: user, tweaks: tweaks}
end
@doc """
Forks a tweak.
## Inputs
* `:tweaks`
## Outputs
* `:fork_user` -- User that forked the tweak
* `:fork_tweak` -- New fork tweak
* `:forked_tweak` -- Tweak that was forked
"""
def fork_tweak(%{tweaks: tweaks}) do
fork_user = insert(:user)
forked_tweak = hd(tweaks)
{:ok, fork_tweak} = Tweaks.fork_tweak(forked_tweak, fork_user)
{:ok, fork_user: fork_user, fork_tweak: fork_tweak, forked_tweak: forked_tweak}
end
@doc """
Inserts an init tweak.
## Outputs
* `:init_tweak` -- Tweak that was created
"""
def insert_init_tweak(context)
def insert_init_tweak(_context) do
tweak = insert(:tweak, type: "init")
{:ok, init_tweak: tweak}
end
@doc """
Inserts a style tweak.
## Outputs
* `:style_tweak` -- Tweak that was created
"""
def insert_style_tweak(context)
def insert_style_tweak(_context) do
tweak = insert(:tweak, type: "style")
{:ok, style_tweak: tweak}
end
@doc """
Creates an invalid set of parameters for a tweak.
## Outputs
* `:tweak_params` -- Invalid params
"""
def invalid_tweak_params(_context) do
{:ok, tweak_params: params_for(:tweak, title: "")}
end
@doc """
Logs in as a user.
## Inputs
* `:conn` -- Connection object
Logs in as (in priority order):
1. `:user`
1. Inserts a new user into the database
## Outputs
* `:current_user` - User record used to log in
"""
def log_in(context)
def log_in(%{conn: conn, user: user}) do
conn = PlugTest.init_test_session(conn, %{current_user: user})
{:ok, conn: conn, current_user: user}
end
def log_in(%{conn: conn}) do
user = insert(:user)
conn = PlugTest.init_test_session(conn, %{current_user: user})
{:ok, conn: conn, current_user: user}
end
@doc """
Requests the admin release note edit page.
## Inputs
* `:conn` -- Connection object
* `:note` -- Release note to edit
## Outputs
* `:conn` -- Updated connection object
"""
def request_admin_release_note_edit(context)
def request_admin_release_note_edit(%{conn: conn, note: note}) do
conn = get(conn, Routes.admin_release_note_path(conn, :edit, note))
{:ok, conn: conn}
end
@doc """
Requests the admin release notes index page.
## Inputs
* `:conn` -- Connection object
## Outputs
* `:conn` -- Updated connection object
"""
def request_admin_release_note_index(context)
def request_admin_release_note_index(%{conn: conn}) do
conn = get(conn, Routes.admin_release_note_path(conn, :index))
{:ok, conn: conn}
end
@doc """
Requests the admin release note new page.
## Inputs
* `:conn` -- Connection object
## Outputs
* `:conn` -- Updated connection object
"""
def request_admin_release_note_new(context)
def request_admin_release_note_new(%{conn: conn}) do
conn = get(conn, Routes.admin_release_note_path(conn, :new))
{:ok, conn: conn}
end
@doc """
Requests the admin release note show page.
## Inputs
* `:conn` -- Connection object
* `:note` -- Release note to show
## Outputs
* `:conn` -- Updated connection object
"""
def request_admin_release_note_show(context)
def request_admin_release_note_show(%{conn: conn, note: note}) do
conn = get(conn, Routes.admin_release_note_path(conn, :show, note))
{:ok, conn: conn}
end
@doc """
Creates a new user in the database to request a page.
## Outputs
* `:request_user` - User record that requests a page
"""
def request_user(_context) do
{:ok, request_user: insert(:user)}
end
@doc """
Requests the create tweak page.
## Inputs
* `:conn` -- Connection object
* `:current_user` - Currently logged in user record, if any
* `:request_user` - User record requesting the page
* `:tweak_params` - Parameters to use to create the tweak
## Outputs
* `:conn` - Updated connection object
"""
def request_create_tweak(context)
def request_create_tweak(%{
conn: conn,
current_user: _,
request_user: user,
tweak_params: tweak_params
}) do
params = %{"name" => user.name, "tweak" => tweak_params}
conn = post(conn, Routes.tweak_path(conn, :create), params)
{:ok, conn: conn}
end
def request_create_tweak(%{conn: conn, current_user: user, tweak_params: tweak_params}) do
params = %{"name" => user.name, "tweak" => tweak_params}
conn = post(conn, Routes.tweak_path(conn, :create), params)
{:ok, conn: conn}
end
def request_create_tweak(%{conn: conn, tweak_params: tweak_params, user: user}) do
params = %{"name" => user.name, "tweak" => tweak_params}
conn = post(conn, Routes.tweak_path(conn, :create), params)
{:ok, conn: conn}
end
@doc """
Requests the edit tweak page.
## Inputs
* `:conn` - `Plug.Conn` object
* `:tweak` - Tweak record to edit
## Outputs
* `:conn` - `Plug.Conn` object after the page is rendered
"""
def request_edit_tweak(context)
def request_edit_tweak(%{conn: conn, tweak: tweak}) do
conn = get(conn, Routes.tweak_path(conn, :edit, tweak))
{:ok, conn: conn}
end
@doc """
Requests the new tweak page.
## Inputs
* `:conn` - `Plug.Conn` object
## Outputs
* `:conn` - `Plug.Conn` object after the page is rendered
"""
def request_new_tweak(context)
def request_new_tweak(%{conn: conn}) do
conn = get(conn, Routes.tweak_path(conn, :new))
{:ok, conn: conn}
end
@doc """
Requests the show tweak page.
## Inputs
* `:conn` - `Plug.Conn` object
* `:tweak` - Tweak record to show
## Outputs
* `:conn` - `Plug.Conn` object after the page is rendered
"""
def request_show_tweak(context)
def request_show_tweak(%{conn: conn, tweak: tweak}) do
conn = get(conn, Routes.tweak_path(conn, :show, tweak))
{:ok, conn: conn}
end
@doc """
Requests the show user page.
## Inputs
* `:conn` - `Plug.Conn` object
* `:current_user` - Currently logged in user record
* `:request_user` - User record to show
* `:user` - User record to show
## Outputs
* `:conn` - `Plug.Conn` object after the page is rendered
* `:path` - Path that was navigated to
"""
def request_show_user(context)
def request_show_user(%{conn: conn, current_user: _, request_user: user}) do
path = Routes.user_path(conn, :show, user.name)
conn = get(conn, path)
{:ok, conn: conn, path: path}
end
def request_show_user(%{conn: conn, user: user}) do
path = Routes.user_path(conn, :show, user.name)
conn = get(conn, path)
{:ok, conn: conn, path: path}
end
@doc """
Navigates to the stars page for a user.
## Inputs
* `:conn` - `Plug.Conn` object
* `:user` - User whose stars we will view
## Outputs
* `:conn` - Updated `Plug.Conn` object
* `:path` - Path that was navigated to
"""
def request_stars(context)
def request_stars(%{conn: conn, user: user}) do
path = Routes.user_star_path(conn, :index, user)
conn = get(conn, path)
{:ok, conn: conn, path: path}
end
def request_forks(context)
def request_forks(%{conn: conn, forked_tweak: forked_tweak}) do
path = Routes.tweak_fork_path(conn, :index, forked_tweak)
conn = get(conn, path)
{:ok, conn: conn, path: path}
end
def request_forks(%{conn: conn, tweak: tweak}) do
path = Routes.tweak_fork_path(conn, :index, tweak)
conn = get(conn, path)
{:ok, conn: conn, path: path}
end
@doc """
Requests the about page.
"""
def request_page_about(context)
def request_page_about(%{conn: conn}) do
conn = get(conn, Routes.page_path(conn, :about))
{:ok, conn: conn}
end
@doc """
Requests the home page.
"""
def request_page_index(context)
def request_page_index(%{conn: conn}) do
conn = get(conn, Routes.page_path(conn, :index))
{:ok, conn: conn}
end
@doc """
Requests the release notes page.
"""
def request_page_release_notes(context)
def request_page_release_notes(%{conn: conn}) do
conn = get(conn, Routes.page_path(conn, :release_notes))
{:ok, conn: conn}
end
def request_user_token_create(context)
def request_user_token_create(%{conn: conn, token_params: token_params, user: user}) do
conn = post(conn, Routes.user_token_path(conn, :create, user), token: token_params)
{:ok, conn: conn}
end
@doc """
Requests the user's token index page.
"""
def request_user_token_index(context)
def request_user_token_index(%{conn: conn, user: user}) do
conn = get(conn, Routes.user_token_path(conn, :index, user))
{:ok, conn: conn}
end
@doc """
Requests the user's new token page.
"""
def request_user_token_new(context)
def request_user_token_new(%{conn: conn, user: user}) do
conn = get(conn, Routes.user_token_path(conn, :new, user))
{:ok, conn: conn}
end
@doc """
Generates valid tweak parameters.
"""
def valid_tweak_params(_context) do
{:ok, tweak_params: params_for(:tweak)}
end
def invalid_token_params(context)
def invalid_token_params(%{user: user}) do
{:ok, token_params: params_for(:token, user: user, description: "")}
end
@doc """
Generates valid token parameters.
"""
def valid_token_params(context)
def valid_token_params(%{user: user}) do
{:ok, token_params: params_for(:token, user: user)}
end
end
|
test/support/setup.ex
| 0.845974 | 0.90389 |
setup.ex
|
starcoder
|
defmodule Packet do
defstruct version: Nil, id: Nil, value: 0, children: []
@type t :: %__MODULE__{version: integer(), id: integer(), value: integer(), children: [Packet.t()]}
@spec b_to_i(boolean())::integer()
def b_to_i(b)
def b_to_i(true), do: 1
def b_to_i(false), do: 0
@spec add_version(Packet.t())::integer()
def add_version(packet), do: packet.version + Enum.sum(Enum.map(packet.children, &add_version/1))
@spec calculate_value(Packet.t())::integer()
def calculate_value(packet)
def calculate_value(%Packet{id: 0, children: children}), do: Enum.sum(Enum.map(children, &calculate_value/1))
def calculate_value(%Packet{id: 1, children: children}), do: Enum.product(Enum.map(children, &calculate_value/1))
def calculate_value(%Packet{id: 2, children: children}), do: Enum.min(Enum.map(children, &calculate_value/1))
def calculate_value(%Packet{id: 3, children: children}), do: Enum.max(Enum.map(children, &calculate_value/1))
def calculate_value(%Packet{id: 4, value: value}), do: value
def calculate_value(%Packet{id: 5, children: [a, b]}), do: b_to_i(calculate_value(a) > calculate_value(b))
def calculate_value(%Packet{id: 6, children: [a, b]}), do: b_to_i(calculate_value(a) < calculate_value(b))
def calculate_value(%Packet{id: 7, children: [a, b]}), do: b_to_i(calculate_value(a) == calculate_value(b))
end
defmodule Decoder do
@type bit::integer()
def hex_to_bits(input) do
String.to_integer(input, 16) |> Integer.to_string(2)
|> String.pad_leading(String.length(input) * 4, "0")
|> String.graphemes()
end
@spec bits_to_int([bit()])::integer()
def bits_to_int(bits), do: Enum.join(bits, "") |> String.to_integer(2)
@spec pop_value([bit()], [bit()])::{[bit()], [bit()]}
def pop_value(bits, acc)
def pop_value(["1",a1, a2, a3, a4| leftover], acc), do: pop_value(leftover, acc ++ [a1,a2,a3,a4])
def pop_value(["0",a1, a2, a3, a4| leftover], acc), do: {acc ++ [a1,a2,a3,a4], leftover}
@spec multi_packet([bit()], integer(), [Packet.t()])::{[Packet.t()],[bit()]}
def multi_packet(bits, num_packets\\-1, packets\\[])
def multi_packet([], _num_packets, packets), do: {packets, []}
def multi_packet(bits, 0, packets), do: {packets, bits}
def multi_packet(bits, num_packets, packets) do
{packet, leftover} = decode(bits)
multi_packet(leftover, num_packets - 1, packets ++ [packet])
end
@spec decode_children([bit()])::{[Packet.t()], [bit()]}
def decode_children(bits)
def decode_children(["1" | bits ]) do
{size, bits} = Enum.split(bits, 11)
size = bits_to_int(size)
multi_packet(bits, size)
end
def decode_children(["0" | bits ]) do
{size, bits} = Enum.split(bits, 15)
size = bits_to_int(size)
{packets, leftover} = Enum.split(bits, size)
{children, []} = multi_packet(packets)
{children, leftover}
end
@spec decode([bit()])::{Packet.t(), [bit()]}
def decode(input)
def decode([v1,v2,v3,"1","0","0"|lit_value]) do
version = bits_to_int([v1,v2,v3])
id = 4
{value_bits, leftover_bits} = pop_value(lit_value, [])
value = bits_to_int(value_bits)
{%Packet{version: version, id: id, value: value}, leftover_bits}
end
def decode([v1,v2,v3,id1,id2,id3|bits]) do
version = bits_to_int([v1,v2,v3])
id = bits_to_int([id1,id2,id3])
{children, leftover} = decode_children(bits)
{%Packet{version: version, id: id, children: children}, leftover}
end
end
|
lib/decoder.ex
| 0.745676 | 0.57517 |
decoder.ex
|
starcoder
|
defmodule XepCache do
@moduledoc """
A wrapper around Erlang's depcache, an in-memory caching server.
depcache bases its caching around ETS but can also switch to using
the in-process dictionary for maintaining a process-local cache.
Convenient functions are provided for getting/setting cache
values, with ttl and cache key dependencies, as well as a memo
function for caching the result of function executions.
## Options
Most functions take an `opts` argument which holds various options.
* `:ttl` - the maximum time-to-live of the cached value. When the
cached value is older than the time, it will be marked as expired.
* `:deps` - an array of cache keys which the given key depends
on. This can be used to invalidate an entire tree of keys just by
flushing one "parent" key on which these all depend.
* `:server` - the name of the depcache server. By default, the
`xepcache` application starts a single depcache instance, registered
under the default name `:depcache`. This is also the default
argument for this option, so it can be omitted usually.
"""
use Application
@default :depcache
@default_ttl 3600
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
worker(:depcache, [@default, [memory_max: 512]])
]
opts = [strategy: :one_for_one, name: XepCache.Supervisor]
Supervisor.start_link(children, opts)
end
def get(key), do: get(key, [server: @default])
@doc """
Fetch the key from the cache, return the data or nil if not found.
"""
def get(key, opts) do
return_value(:depcache.get(key, server(opts)))
end
def set(key, value), do: set(key, value, [server: @default])
@doc """
Add a key to the depcache with a given value. :ttl option gives the maximum key expiry; :deps option gives an array of cache keys which this key depends on.
"""
def set(key, value, opts) do
return_value(:depcache.set(key, value, ttl(opts), deps(opts), server(opts)))
end
def flush_all(), do: flush_all(@default)
@doc """
Flush all keys from the caches for given server or the default server
"""
def flush_all(server) do
:depcache.flush(server)
end
def flush(key), do: flush(key, server: @default)
@doc """
Flush the key and all keys depending on the key
"""
def flush(key, opts) do
:depcache.flush(key, server(opts))
end
@doc """
Check if we use a local process dict cache
"""
def in_process(), do: :depcache.in_process
@doc """
Enable or disable the in-process caching using the process dictionary
"""
def in_process(flag), do: :depcache.in_process(flag)
def memo(fun), do: memo(fun, nil, server: @default)
def memo(fun, key), do: memo(fun, key, server: @default)
@doc """
Adds the result of the given function to the depcache. The function is only called when there is a cache miss; otherwise, the cached value is returned.
"""
def memo(fun, key, opts) do
:depcache.memo(fun, param(key || opts[:key]), ttl(opts), deps(opts), server(opts))
end
defp server(opts), do: opts[:server] || @default
defp ttl(opts), do: opts[:ttl] || @default_ttl
defp deps(opts), do: opts[:deps] || []
defp return_value(:undefined), do: nil
defp return_value(:ok), do: :ok
defp return_value({:ok, value}), do: value
defp param(nil), do: :undefined
defp param(value), do: value
end
|
lib/xepcache.ex
| 0.838151 | 0.468426 |
xepcache.ex
|
starcoder
|
defmodule Gyx.Environments.Pure.Blackjack do
@moduledoc """
This is an environment implementation of the game of
[Blackjack](https://en.wikipedia.org/wiki/Blackjack) as
described in [Sutton and Barto RL book](http://incompleteideas.net/book/RLbook2018.pdf)
***Example 5.1*** cited below.

***Exctract from [Sutton and Barto RL book](http://incompleteideas.net/book/RLbook2018.pdf):***
The object of the popular casino card game of *blackjack* is toobtain
cards the sum of whose numerical values is as great as possible without
exceeding `21`.
All face cards count as `10`, and an ace can count either
as `1` or as `11`. We considerthe version in which each player competes
independently against the dealer. The gamebegins with two cards dealt
to both dealer and player. One of the dealer’s cards is faceup
and the other is face down. If the player has `21` immediately
(an ace and a 10-card),it is called anatural. He then wins unless
the dealer also has a natural, in which case thegame is a draw. If
the player does not have a natural, then he can request
additionalcards, one by one (hits), until he either stops (sticks)
or exceeds `21` (goes bust). If he goesbust, he loses; if he sticks,
then it becomes the dealer’s turn. The dealer hits or sticksaccording
to a fixed strategy without choice: he sticks on any sum of 17 or
greater, andhits otherwise. If the dealer goes bust, then the
player wins; otherwise, the outcome -win,lose, or draw- is
determined by whose final sum is closer to `21`.
Playing blackjack is naturally formulated as an episodic finite MDP. Each game
ofblackjack is an episode. Rewards of `+1`,`-1`, and `0` are given
for winning, losing, anddrawing, respectively. All rewards
within a game are zero, and we do not discount (`gamma = 1`); therefore
these terminal rewards are also the returns. The player’s actions
are to hit orto stick. The states depend on the player’s cards
and the dealer’s showing card. Weassume that cards are dealt from an
infinite deck (i.e., with replacement) so that there isno advantage
to keeping track of the cards already dealt. If the player
holds an ace thathe could count as `11` without going bust, then
the ace is said to beusable. In this caseit is always counted as
11 because counting it as 1 would make the sum `11` or less, in
which case there is no decision to be made because, obviously,
the player should alwayshit. Thus, the player makes decisions
on the basis of three variables: his current sum(12–21),
the dealer’s one showing card (ace–10), and whether or not he
holds a usableace. This makes for a total of `200` states.
> This implementation must behave as
[OpenAI Gym Blackjack-v0 implementation](https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py).
"""
alias Gyx.Core.{Env, Exp}
alias Gyx.Core.Spaces.{Discrete, Tuple}
use Env
use GenServer
require Logger
defstruct player: [],
dealer: [],
player_sum: nil,
dealer_sum: nil,
action_space: nil,
observation_space: nil,
done: nil
@type t :: %__MODULE__{
player: list(),
dealer: list(),
action_space: Discrete.t(),
observation_space: Tuple.t(),
done: bool()
}
# card values
@deck [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
@impl true
def init(_) do
{:ok,
%__MODULE__{
player: draw_hand(),
dealer: draw_hand(),
action_space: %Discrete{n: 2},
observation_space: %Tuple{
spaces: [%Discrete{n: 32}, %Discrete{n: 11}, %Discrete{n: 2}]
},
done: false
}}
end
def start_link(_, opts) do
Logger.info("Starting Environment: " <> inspect(__MODULE__), ansi_color: :magenta)
GenServer.start_link(__MODULE__, [], opts)
end
@impl true
def reset(environment) do
GenServer.call(environment, :reset)
end
def get_state_abstraction(environment) do
GenServer.call(environment, :get_state_abstraction)
end
def handle_call(:get_state_abstraction, _from, state = %__MODULE__{player: p, dealer: d}) do
Logger.debug(inspect(state))
{:reply, %{state | player_sum: Enum.sum(p), dealer_sum: Enum.sum(d)}, state}
end
def handle_call({:act, action = 0}, _from, state = %__MODULE__{}) do
next_state = %{state | dealer: get_until(state.dealer)}
experience = %Exp{
state: env_state_transformer(state),
action: action,
next_state: env_state_transformer(next_state),
reward: 0,
done: true,
info: %{}
}
reward = cmp(score(next_state.player), score(next_state.dealer)) + is_natural(state.player)
case is_bust(next_state.dealer) do
true -> {:reply, %{experience | reward: 1.0}, next_state}
false -> {:reply, %{experience | reward: reward}, next_state}
end
end
def handle_call({:act, action = 1}, _from, state = %__MODULE__{}) do
next_state = %{state | player: [draw_card() | state.player]}
case is_bust(next_state.player) do
true ->
{:reply,
%Exp{
state: env_state_transformer(state),
action: action,
next_state: env_state_transformer(next_state),
reward: -1,
done: true,
info: %{}
}, next_state}
_ ->
{:reply,
%Exp{
state: env_state_transformer(state),
action: action,
next_state: env_state_transformer(next_state),
reward: 0,
done: false,
info: %{}
}, next_state}
end
end
@impl true
def handle_call(:reset, _from, _state) do
new_env_state = %__MODULE__{
player: draw_hand(),
dealer: draw_hand(),
action_space: %Discrete{n: 2},
observation_space: %Tuple{
spaces: [%Discrete{n: 32}, %Discrete{n: 11}, %Discrete{n: 2}]
}
}
{:reply, %Exp{}, new_env_state}
end
def handle_call(:observe, _from, state), do: {:reply, env_state_transformer(state), state}
defp env_state_transformer(%__MODULE__{player: p, dealer: d}) do
{Enum.sum(p), Enum.sum(d)}
end
defp draw_card(), do: @deck |> Enum.random()
defp draw_hand(), do: [draw_card(), draw_card()]
defp get_until(hand, v \\ 17) do
new_card = draw_card()
case Enum.sum(hand ++ [new_card]) < v do
true -> get_until([new_card | hand])
_ -> [new_card | hand]
end
end
defp cmp(a, b) do
case a > b do
true -> 1.0
_ -> -1.0
end
end
defp is_bust(hand), do: Enum.sum(hand) > 21
defp score(hand) do
case is_bust(hand) do
true -> 0
false -> Enum.sum(hand)
end
end
defp is_natural(hand, plus \\ 0.5) do
case Enum.sort(hand) == [1, 10] do
true -> plus
_ -> 0.0
end
end
end
|
lib/environments/pure/blackjack.ex
| 0.906165 | 0.74803 |
blackjack.ex
|
starcoder
|
defmodule Helios.Registry.Distribution.StaticQuorumRing do
@moduledoc """
A quorum is the minimum number of nodes that a distributed cluster has to
obtain in order to be allowed to perform an operation. This can be used to
enforce consistent operation in a distributed system.
## Quorum size
You must configure this distribution strategy and specify its minimum quorum
size:
config :my_app, MyApp.Endpoint,
registry:[
distribution_strategy: {
Helios.Registry.Distribution.StaticQuorumRing,
:init,
[5] # default static quorum size is 2
}
]
It defines the minimum number of nodes that must be connected in the cluster
to allow process registration and distribution.
If there are fewer nodes currently available than the quorum size, any calls
to `Helios.Registry.register_name/6` will return `{:error, :no_node_available}`
until enough nodes have started.
You can configure the `:kernel` application to wait for cluster formation
before starting your application during node start up. The
`sync_nodes_optional` configuration specifies which nodes to attempt to
connect to within the `sync_nodes_timeout` window, defined in milliseconds,
before continuing with startup. There is also a `sync_nodes_mandatory` setting
which can be used to enforce all nodes are connected within the timeout window
or else the node terminates.
config :kernel,
sync_nodes_optional: [:"[email protected]", :"[email protected]"],
sync_nodes_timeout: 60_000
The `sync_nodes_timeout` can be configured as `:infinity` to wait indefinitely
for all nodes to connect. All involved nodes must have the same value for
`sync_nodes_timeout`.
### Example
In a 9 node cluster you would configure the `:static_quorum_size` as 5. During
a network split of 4 and 5 nodes, processes on the side with 5 nodes
will continue running, whereas processes on the other 4 nodes will be stopped.
Be aware that in the running 5 node cluster, no more failures can be handled
because the remaining cluster size would be less than the required 5 node
minimum. All running processes would be stopped in the case of another single
node failure.
"""
@behaviour Helios.Registry.Distribution.Strategy
alias Helios.Registry.Distribution.StaticQuorumRing
defstruct [:static_quorum_size, :ring]
def init(static_quorum_size \\ 2) do
%StaticQuorumRing{
static_quorum_size: static_quorum_size,
ring: HashRing.new()
}
end
def add_node(quorum, node) do
%StaticQuorumRing{quorum | ring: HashRing.add_node(quorum.ring, node)}
end
def add_node(quorum, node, weight) do
%StaticQuorumRing{quorum | ring: HashRing.add_node(quorum.ring, node, weight)}
end
def add_nodes(quorum, nodes) do
%StaticQuorumRing{quorum | ring: HashRing.add_nodes(quorum.ring, nodes)}
end
def remove_node(quorum, node) do
%StaticQuorumRing{quorum | ring: HashRing.remove_node(quorum.ring, node)}
end
@doc """
Maps a key to a specific node via the current distribution strategy.
If the available nodes in the cluster are fewer than the minimum node count it returns `:undefined`.
"""
def key_to_node(%StaticQuorumRing{static_quorum_size: static_quorum_size, ring: ring}, key) do
case length(ring.nodes) do
node_count when node_count < static_quorum_size -> :undefined
_ -> HashRing.key_to_node(ring, key)
end
end
end
|
lib/helios/registry/distribution/static_quorum_ring.ex
| 0.865764 | 0.656493 |
static_quorum_ring.ex
|
starcoder
|
defmodule ExifParser.Tag do
@moduledoc """
Tiff Tag parser. Parses the 12 bytes that represent the information contained
in the TIFF tags.
| | |
|-----------|---------|
| tag_id | 2 bytes |
| type_id | 2 bytes |
| tag_count | 4 bytes |
| tag_vaue | 4 bytes |
The tag_id if referenced in the lookup table and tag_name field is updated
with a more human readable atom value.
The value is updated to the decoded value according to the data type.
"""
defstruct tag_id: nil,
tag_name: nil,
data_type: nil,
data_count: nil,
value: nil
@type t :: %__MODULE__{
tag_id: non_neg_integer,
tag_name: atom,
data_type: Value.data_types(),
data_count: non_neg_integer,
value: any
}
defp value_offset_correction(value, tag_length, endian, start_of_tiff)
when tag_length > 4 do
value_offset = :binary.decode_unsigned(value, endian)
<<_::binary-size(value_offset), new_value::binary-size(tag_length), _::binary>> =
start_of_tiff
new_value
end
defp value_offset_correction(value, _tag_length, _endian, _start_of_tiff) do
value
end
@doc """
The method parses the a binary buffer that contains a tag.
+ The tag_id, type_id and tag_count are decode by converting binary to integer.
+ The tag_id and tag_type is used to look up the tag_name.
+ The value of the tag is decoded using the type_id and tag_cousing the tag_type and tag_count.
The tag_type is set to :tiff by default.
The result can be either a
{:ok, Tag} or {:error, String}
"""
@spec parse(
tag_buffer :: binary,
endian :: :little | :big,
start_of_tiff :: non_neg_integer,
tag_type :: ExifParser.Tag.LookUp.tag_type()
) :: {:ok, __MODULE__} | {:error, String.t()}
def parse(tag_buffer, header, start_of_tiff, tag_type \\ :tiff)
def parse(
<<tag_id::binary-size(2), type_id::binary-size(2), tag_count::binary-size(4),
value::binary-size(4)>>,
endian,
start_of_tiff,
tag_type
) do
tag_id= :binary.decode_unsigned(tag_id, endian)
data_type =
:binary.decode_unsigned(type_id, endian)
|> ExifParser.Tag.Value.type_id_to_data_type()
tag_count = :binary.decode_unsigned(tag_count, endian)
tag_length = ExifParser.Tag.Value.data_type_to_byte_length(data_type, tag_count)
value = value_offset_correction(value, tag_length, endian, start_of_tiff)
%__MODULE__{tag_id: tag_id, data_type: data_type, data_count: tag_count, value: value}
|> ExifParser.Tag.LookUp.look_up_name(tag_type)
|> ExifParser.Tag.Value.decode_tag(endian)
|> parse_sub_ifd(start_of_tiff, endian)
end
def parse(_, _, _, _), do: {}
defp parse_sub_ifd(
%__MODULE__{tag_name: tag_name, value: sub_ifd_offset} = tag,
start_of_tiff,
endian
)
when tag_name in [:exif, :gps, :interoperability] do
[sub_ifd | []]=
ExifParser.ImageFileDirectory.parse_ifds(
endian,
start_of_tiff,
sub_ifd_offset,
tag_name
)
%__MODULE__{tag | value: sub_ifd}
end
defp parse_sub_ifd(tag,_,_), do: tag
end
|
lib/exif_parser/tag.ex
| 0.70069 | 0.544014 |
tag.ex
|
starcoder
|
defmodule Ash.Flow.Dsl do
@create %Ash.Dsl.Entity{
name: :create,
describe: """
Declares a step that will call a create action on a resource.
""",
examples: [
"""
create :create_post, MyApp.Post, :create
"""
],
modules: [:resource, :touches_resources],
target: Ash.Flow.Step.Create,
args: [:name, :resource, :action],
schema: Ash.Flow.Step.Create.schema()
}
@update %Ash.Dsl.Entity{
name: :update,
describe: """
Declares a step that will call a update action on a resource.
""",
examples: [
"""
update :update_post, MyApp.Post, :update do
record result(:get_post)
end
"""
],
modules: [:resource, :touches_resources],
target: Ash.Flow.Step.Update,
args: [:name, :resource, :action],
schema: Ash.Flow.Step.Update.schema()
}
@destroy %Ash.Dsl.Entity{
name: :destroy,
describe: """
Declares a step that will call a destroy action on a resource.
""",
examples: [
"""
destroy :destroy_post, MyApp.Post, :destroy
"""
],
modules: [:resource, :touches_resources],
target: Ash.Flow.Step.Destroy,
args: [:name, :resource, :action],
schema: Ash.Flow.Step.Destroy.schema()
}
@read %Ash.Dsl.Entity{
name: :read,
describe: """
Declares a step that will call a read action on a resource.
""",
examples: [
"""
read :destroy_post, MyApp.Post, :destroy
"""
],
modules: [:resource, :touches_resources],
target: Ash.Flow.Step.Read,
args: [:name, :resource, :action],
schema: Ash.Flow.Step.Read.schema()
}
@run_flow %Ash.Dsl.Entity{
name: :run_flow,
describe: """
Runs another flow as part of the current flow.
The return value of the flow is the return value of the step.
""",
examples: [
"""
run_flow :get_org, GetOrgByName do
input %{
name: arg(:org_name)
}
"""
],
modules: [:resource, :touches_resources],
target: Ash.Flow.Step.RunFlow,
args: [:name, :flow],
schema: Ash.Flow.Step.RunFlow.schema()
}
@custom %Ash.Dsl.Entity{
name: :custom,
describe: """
Runs a custom step module.
See `Ash.Flow.Step` for the necessary callbacks and more information.
""",
examples: [
"""
custom :do_custom_thing, MyApp.DoCustomThing do
input %{...}
end
""",
"""
custom :do_custom_thing, {MyApp.DoCustomThing, opt1: :foo, opt2: :bar} do
input %{...}
end
"""
],
modules: [:custom, :touches_resources],
target: Ash.Flow.Step.Custom,
args: [:name, :custom],
schema: Ash.Flow.Step.Custom.schema()
}
@argument %Ash.Dsl.Entity{
name: :argument,
describe: """
An argument to be passed into the flow
""",
examples: [
"""
argument :params, :map do
default %{}
end
""",
"""
argument :retries, :integer do
allow_nil? false
end
"""
],
modules: [:type],
target: Ash.Flow.Argument,
args: [:name, :type],
schema: Ash.Flow.Argument.schema()
}
@flow %Ash.Dsl.Section{
name: :flow,
describe: """
Details about the flow itself, like description and the successful return type.
""",
entities: [
@argument
],
schema: [
api: [
type: {:behaviour, Ash.Api},
doc: "An api to use by default when calling actions"
],
description: [
type: :string,
doc: "A description of the flow"
],
returns: [
type: :any,
doc: """
The step or step who's output to return.
If given a single step, then the result of the step is returned. If given multiple, then a map of step name to result is returned.
If nothing is provided, then the last step is returned.
To rename keys in the map of step names to results, use a keyword list, where the key is the step and the value is what should be in
the returned map.
For example:
`returns :step_name`
`returns [:step_one, :step_two]`
`returns [step_one: :one, step_two: :two]`
"""
]
]
}
@step_entities [@create, @update, @destroy, @read, @run_flow, @custom]
@transaction %Ash.Dsl.Entity{
name: :transaction,
describe: """
Runs a set of steps in a transaction.
""",
schema: Ash.Flow.Step.Transaction.schema(),
target: Ash.Flow.Step.Transaction,
args: [:name],
recursive_as: :steps,
entities: [
steps: @step_entities
],
modules: [:touches_resources],
examples: [
"""
transaction :create_users do
create :create_user, User, :create do
input %{
first_name: {Faker.Person, :first_name, []},
last_name: {Faker.Person, :last_name, []}
}
end
update :update_user, User, :update do
record
end
over range(1, arg(:count))
output :create_user
create :create_user, Org, :create do
input %{
first_name: {Faker.Person, :first_name, []},
last_name: {Faker.Person, :last_name, []}
}
end
end
"""
]
}
@map %Ash.Dsl.Entity{
name: :map,
describe: """
Runs a set of steps for each item in a provided list.
""",
schema: Ash.Flow.Step.Map.schema(),
target: Ash.Flow.Step.Map,
args: [:name, :over],
recursive_as: :steps,
modules: [:touches_resources],
entities: [
steps: @step_entities
],
examples: [
"""
map :create_users do
over range(1, arg(:count))
output :create_user
create :create_user, Org, :create do
input %{
first_name: {Faker.Person, :first_name, []},
last_name: {Faker.Person, :last_name, []}
}
end
end
"""
]
}
transaction = %{@transaction | entities: [steps: [@map | @step_entities]]}
map = %{@map | entities: [steps: [@transaction | @step_entities]]}
@transaction transaction
@map map
@steps %Ash.Dsl.Section{
name: :steps,
describe: """
The steps to run.
""",
examples: [
"""
steps do
# invokes a create action
create :create_post, MyApp.Post, :create
end
"""
],
imports: [Ash.Flow.StepHelpers],
entities: [@map, @transaction] ++ @step_entities
}
@transformers [
Ash.Flow.Transformers.SetApi,
Ash.Flow.Transformers.ValidateUniqueNames,
Ash.Flow.Transformers.SetTypes,
Ash.Flow.Transformers.ValidateNoEmptySteps
]
@sections [@flow, @steps]
@moduledoc """
The built in flow DSL.
# Table of Contents
#{Ash.Dsl.Extension.doc_index(@sections)}
#{Ash.Dsl.Extension.doc(@sections)}
"""
use Ash.Dsl.Extension,
sections: @sections,
transformers: @transformers
end
|
lib/ash/flow/dsl.ex
| 0.762336 | 0.674783 |
dsl.ex
|
starcoder
|
defmodule ExSaga.Stage do
@moduledoc """
"""
use ExSaga.Stepper, compensation_event_name: [:starting, :compensation]
alias ExSaga.{DryRun, Event, Hook, Retry, State, Stepable, Utils}
@typedoc """
"""
@type id :: term
@typedoc """
"""
@type name :: atom
@typedoc """
"""
@type full_name :: [name, ...]
@typedoc """
"""
@type effect :: term
@typedoc """
"""
@type effects :: %{optional(name) => effect}
@typedoc """
"""
@type stage :: term
@typedoc """
"""
@type transaction_result ::
{:ok, effect}
| {:error, reason :: term}
| {:abort, reason :: term}
@typedoc """
"""
@type compensation_result ::
:ok
| :abort
| {:retry, Retry.retry_opts()}
| {:continue, effect}
defstruct transaction: nil,
compensation: nil,
state: %State{}
@type t :: %__MODULE__{
transaction: (effects_so_far :: effects -> transaction_result),
compensation:
(reason :: term, effect_to_compensate :: effect, effects_so_far :: effects -> compensation_result),
state: State.t()
}
@doc """
"""
@spec get_full_name(t, full_name) :: full_name
def get_full_name(stage, parent_full_name) do
%{state: %{name: name}} = stage
parent_full_name ++ [name]
end
@doc """
"""
@spec execute_transaction(t, Event.t(), Stepable.opts()) :: Event.t()
def execute_transaction(stage, event, opts \\ []) do
%{state: %{effects_so_far: effects_so_far}} = stage
opts = DryRun.from_stepable(event, opts, {:ok, nil})
result =
case DryRun.maybe_execute(stage.transaction, [effects_so_far], opts) do
{:ok, result} -> {:ok, result}
{:error, reason} -> {:error, reason}
{:abort, reason} -> {:abort, reason}
otherwise -> {:error, {:unsupported_transaction_result_form, otherwise}}
end
Event.update(event,
name: [:completed, :transaction],
context: result
)
end
@doc """
"""
@spec execute_compensation(t, Event.t(), Stepable.opts()) :: Event.t()
def execute_compensation(stage, event, opts \\ []) do
%{state: %{effects_so_far: effects_so_far, reason: reason}} = stage
effect = Event.get_effect(event, effects_so_far)
opts = DryRun.from_stepable(event, opts, :ok)
result =
case DryRun.maybe_execute(stage.compensation, [reason, effect, effects_so_far], opts) do
:ok -> :ok
:abort -> :abort
{:retry, retry_opts} -> {:retry, retry_opts}
{:continue, effect} -> {:continue, effect}
otherwise -> {:error, {:unsupported_compensation_result_form, otherwise}}
end
Event.update(event,
name: [:completed, :compensation],
context: result
)
end
@impl ExSaga.Stepper
def handle_step(%{state: %State{hooks_left: []}} = stage, %Event{name: [:starting, :transaction]} = event, opts) do
event = execute_transaction(stage, event, opts)
{:continue, event, %{stage | state: %{stage.state | hooks_left: Hook.merge_hooks(stage.state, opts)}}}
end
def handle_step(%{state: %State{hooks_left: []}} = stage, %Event{name: [:completed, :transaction]} = event, opts) do
%{state: %{name: name, effects_so_far: effects_so_far}} = stage
case event.context do
{:ok, result} ->
{:ok, Map.put(effects_so_far, name, result)}
{status, reason} when status in [:error, :abort] ->
event =
Event.update(event,
name: [:starting, :compensation],
context: {status, reason, Event.get_effect(event, effects_so_far), effects_so_far}
)
stage = if status == :abort, do: %{stage | state: %{stage.state | abort?: true}}, else: stage
{:continue, event,
%{stage | state: %{stage.state | hooks_left: Hook.merge_hooks(stage.state, opts), reason: reason}}}
end
end
def handle_step(%{state: %State{hooks_left: []}} = stage, %Event{name: [:starting, :compensation]} = event, opts) do
{_, reason, _, _} = event.context
event = execute_compensation(stage, event, opts)
{:continue, event,
%{stage | state: %{stage.state | hooks_left: Hook.merge_hooks(stage.state, opts), reason: reason}}}
end
def handle_step(%{state: %State{hooks_left: []}} = stage, %Event{name: [:completed, :compensation]} = event, opts) do
%{state: %{name: name, effects_so_far: effects_so_far, reason: reason}} = stage
case event.context do
:ok ->
{:error, reason, effects_so_far}
:abort ->
{:abort, reason, effects_so_far}
{:retry, retry_opts} ->
case Retry.start_retry(stage.state, event, retry_opts) do
%Event{} = event ->
{:continue, event,
%{stage | state: %{stage.state | hooks_left: Hook.merge_hooks(stage.state, opts), reason: nil}}}
nil ->
{:abort, reason, effects_so_far}
end
{:continue, effect} ->
{:ok, Map.put(effects_so_far, name, effect)}
{:error, reason} ->
event =
Event.update(event,
name: [:starting, :error_handler],
context: {reason, event, effects_so_far}
)
{:continue, event, %{stage | state: %{stage.state | hooks_left: Hook.merge_hooks(stage.state, opts)}}}
end
end
def handle_step(_stage, _event, _opts) do
nil
end
defimpl Stepable do
alias ExSaga.Stage
def get_name(%{state: %{name: name}}, opts) do
parent_full_name = Keyword.get(opts, :parent_full_name, [])
parent_full_name ++ [name]
end
def get_name(_stepable, _opts) do
[]
end
def step_from(stage, {:ok, effects_so_far}, opts) do
full_name = Stepable.get_name(stage, opts)
event =
Event.create(
id: Keyword.get(opts, :id),
stage_name: full_name,
name: [:starting, :transaction],
context: effects_so_far,
stage: Stage
)
stage = %{stage | state: State.reset(stage.state)}
{:continue, event,
%{
stage
| state: %{
stage.state
| hooks_left: Hook.merge_hooks(stage.state, opts),
effects_so_far: effects_so_far,
abort?: false,
reason: nil
}
}}
end
def step_from(stage, {status, reason, effects_so_far}, opts)
when status in [:error, :abort] do
abort? = if status == :abort, do: true, else: false
full_name = Stepable.get_name(stage, opts)
effect = Utils.get_in(effects_so_far, tl(full_name))
event =
Event.create(
id: Keyword.get(opts, :id),
stage_name: full_name,
name: [:starting, :compensation],
context: {status, reason, effect, effects_so_far},
stage: Stage
)
stage = %{stage | state: State.reset(stage.state)}
{:continue, event,
%{
stage
| state: %{
stage.state
| hooks_left: Hook.merge_hooks(stage.state, opts),
effects_so_far: effects_so_far,
abort?: abort?,
reason: reason
}
}}
end
def step_from(stage, _result, _opts) do
# TODO: error handler
{:continue, nil, stage}
end
def step(stage, event, opts) do
Stage.step(stage, event, opts)
end
end
end
|
lib/ex_saga/stage.ex
| 0.871816 | 0.40028 |
stage.ex
|
starcoder
|
defmodule RobotSimulator do
@valid_directions [:north, :east, :south, :west]
@doc """
Create a Robot Simulator given an initial direction and position.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec create(direction :: atom, position :: {integer, integer}) :: any
def create(direction \\ :north, position \\ {0, 0}) do
cond do
Enum.member?(@valid_directions, direction) == false ->
{:error, "invalid direction"}
invalid?(position) ->
{:error, "invalid position"}
true ->
%{
:direction => direction,
:position => position
}
end
end
defp invalid?({x, y}), do: (is_integer(x) and is_integer(y) == true) == false
defp invalid?(_), do: true
@doc """
Simulate the robot's movement given a string of instructions.
Valid instructions are: "R" (turn right), "L", (turn left), and "A" (advance)
"""
@spec simulate(robot :: any, instructions :: String.t()) :: any
def simulate(robot, "L" <> instructions) do
case robot.direction do
:east -> Map.update!(robot, :direction, fn _ -> :north end) |> simulate(instructions)
:west -> Map.update!(robot, :direction, fn _ -> :south end) |> simulate(instructions)
:north -> Map.update!(robot, :direction, fn _ -> :west end) |> simulate(instructions)
:south -> Map.update!(robot, :direction, fn _ -> :east end) |> simulate(instructions)
end
end
def simulate(robot, "R" <> instructions) do
case robot.direction do
:east -> Map.update!(robot, :direction, fn _ -> :south end) |> simulate(instructions)
:west -> Map.update!(robot, :direction, fn _ -> :north end) |> simulate(instructions)
:north -> Map.update!(robot, :direction, fn _ -> :east end) |> simulate(instructions)
:south -> Map.update!(robot, :direction, fn _ -> :west end) |> simulate(instructions)
end
end
def simulate(robot, "A" <> instructions) do
case robot.direction do
:east ->
Map.update!(robot, :position, fn {x, y} -> {x + 1, y} end) |> simulate(instructions)
:west ->
Map.update!(robot, :position, fn {x, y} -> {x - 1, y} end) |> simulate(instructions)
:north ->
Map.update!(robot, :position, fn {x, y} -> {x, y + 1} end) |> simulate(instructions)
:south ->
Map.update!(robot, :position, fn {x, y} -> {x, y - 1} end) |> simulate(instructions)
end
end
def simulate(robot, "") do
robot
end
def simulate(_, _) do
{:error, "invalid instruction"}
end
@doc """
Return the robot's direction.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec direction(robot :: any) :: atom
def direction(robot) do
robot.direction
end
@doc """
Return the robot's position.
"""
@spec position(robot :: any) :: {integer, integer}
def position(robot) do
robot.position
end
end
|
robot-simulator/lib/robot_simulator.ex
| 0.904596 | 0.86212 |
robot_simulator.ex
|
starcoder
|
defmodule Adventofcode.Day07TheSumOfItsParts.PartTwo do
use Adventofcode
@enforce_keys [:steps, :dependencies, :workers, :delay]
defstruct second: -1,
workers: [],
steps: [],
completed: [],
dependencies: [],
delay: 0,
print: false
def steps_completion_time(input, options) do
input
|> new(options)
|> print_header
|> iterate
|> Map.get(:second)
end
def parse(input) do
input
|> String.trim_trailing("\n")
|> String.split("\n")
|> Enum.map(&parse_step/1)
end
def new(input, options) do
requirements = parse(input)
steps = steps_list(requirements)
dependencies = build_dependencies(requirements)
print = Keyword.get(options, :print, false)
delay = Keyword.get(options, :delay, 60)
workers = Keyword.get(options, :workers, 5)
workers = Enum.map(1..workers, fn _ -> {nil, 0} end)
%__MODULE__{
workers: workers,
steps: steps,
dependencies: dependencies,
delay: delay,
print: print
}
end
def build_dependencies(requirements) do
deps = requirements |> steps_list |> Enum.map(&{&1, []}) |> Enum.into(%{})
Enum.reduce(requirements, deps, fn [a, b], acc ->
Map.update(acc, b, [a], &[a | &1])
end)
end
def iterate(state) do
next_state = tick(state)
workers_done = Enum.all?(next_state.workers, fn {_, time} -> time == 0 end)
if next_state.steps == [] && workers_done do
next_state
else
iterate(next_state)
end
end
def tick(state) do
state
|> increment_second
|> decrement_time_ongoing_tasks
|> gather_completed_tasks
|> assign_new_tasks
|> print
end
defp increment_second(state), do: %{state | second: state.second + 1}
defp decrement_time_ongoing_tasks(state) do
workers =
Enum.map(state.workers, fn
{nil, 0} -> {nil, 0}
{task, time_left} -> {task, time_left - 1}
end)
%{state | workers: workers}
end
def gather_completed_tasks(state) do
{workers, completed} =
Enum.map_reduce(
state.workers,
state.completed,
&do_gather_completed_tasks/2
)
%{state | workers: workers, completed: completed}
end
def assign_new_tasks(state) do
# Determine ready tasks
workers_free = Enum.count(state.workers, &(elem(&1, 1) == 0))
ready = ready_list(state) |> Enum.take(workers_free)
{workers, left} =
Enum.map_reduce(
state.workers,
ready,
&assign_new_tasks(&1, &2, state.delay)
)
unless left == [], do: raise(ArgumentError, message: "left #{inspect(left)}")
steps = state.steps -- ready
%{state | workers: workers, steps: steps}
end
# Worker has no current task, nothing to do
defp do_gather_completed_tasks({nil, 0}, done) do
{{nil, 0}, done}
end
# Worker has time left on a task, nothing to do
defp do_gather_completed_tasks({task, time_left}, done) when time_left > 0 do
{{task, time_left}, done}
end
# Worker has completed task, move to done
defp do_gather_completed_tasks({task, 0}, done) do
{{nil, 0}, done ++ [task]}
end
# Worker has no current task, so give it a new one
defp assign_new_tasks({nil, 0}, [new_task | ready], delay) do
{{new_task, delay + time_needed(new_task)}, ready}
end
# Worker already has a task
defp assign_new_tasks({task, time_left}, ready, _delay) do
{{task, time_left}, ready}
end
def time_needed(task) do
hd(String.to_charlist(task)) - ?@
end
defp steps_list(requirements) do
requirements
|> Enum.flat_map(& &1)
|> Enum.sort()
|> Enum.uniq()
end
def ready_list(state) do
state.steps
|> Enum.filter(&do_ready?(&1, state))
|> Enum.sort()
end
defp do_ready?(step, state) do
state.dependencies
|> Map.get(step)
|> Enum.all?(&(&1 in state.completed))
end
def parse_step(line) do
case Regex.scan(~r/^.+ (\w) .+ (\w) .+$/, line) do
[[_, a, b]] -> [a, b]
end
end
def print_header(%{print: false} = state), do: state
def print_header(state) do
workers =
state.workers
|> Enum.with_index()
|> Enum.map_join("", fn {_, index} -> " Worker #{index + 1}" end)
IO.puts("\nSecond#{workers} Done")
state
end
def print(%{print: false} = state), do: state
def print(state) do
worker_columns =
Enum.map_join(state.workers, " ", fn
{nil, _} ->
" . "
{task, _} ->
" #{task} "
end)
second =
case state.second do
s when s < 10 -> " #{s} "
s when s < 100 -> " #{s} "
s when s < 1000 -> " #{s} "
s -> "#{s} "
end
IO.puts("#{second} #{worker_columns} #{Enum.join(state.completed, "")}")
state
end
end
|
lib/day_07_the_sum_of_its_parts_part_two.ex
| 0.535098 | 0.533215 |
day_07_the_sum_of_its_parts_part_two.ex
|
starcoder
|
defmodule Ecto.Adapters.Jamdb.Oracle do
@moduledoc """
Adapter module for Oracle. `Ecto.Adapters.SQL` callbacks implementation.
It uses `jamdb_oracle` for communicating to the database.
## Features
* Using prepared statement functionality, the SQL statement you want
to run is precompiled and stored in a database object, and you can run it
as many times as required without compiling it every time it is run. If the data in the
statement changes, you can use bind variables as placeholders for the data and then
provide literal values at run time.
* Using bind variables:
`{"select 1+:1, sysdate, rowid from dual where 1=:1"`, `[1]}`
* Calling stored procedure:
`{"begin proc(:1, :2, :3); end;"`, `[1.0, 2.0, 3.0]}`
* Calling stored function:
`{"begin :1 := func(:2); end;"`, `[{:out, :varchar}, "one hundred"]}`
* Using cursor variable:
`{"begin open :1 for select * from tabl where dat>:2; end;"`, `[:cursor, {2016, 8, 1}]}`
* Using returning clause:
`{"insert into tabl values (tablid.nextval, sysdate) return id into :1"`, `[{:out, :number}]}`
`YourApp.Repo.insert_all(Post,[[id: 100]], [returning: [:created_at], out: [:date]])`
* Update batching:
`{:batch, "insert into tabl values (:1, :2, :3)"`, `[[1, 2, 3],[4, 5, 6],[7, 8, 9]]}`
* Row prefetching:
`{:fetch, "select * from tabl where id>:1"`, `[1]}`
`{:fetch, cursor, row_format, last_row}`
## Options
Adapter options split in different categories described
below. All options can be given via the repository
configuration:
config :your_app, YourApp.Repo,
...
### Connection options
* `:hostname` - Server hostname (Name or IP address of the database server)
* `:port` - Server port (Number of the port where the server listens for requests)
* `:database` - Database (Database service name or SID with colon as prefix)
* `:username` - Username (Name for the connecting user)
* `:password` - User password (Password for the connecting user)
* `:parameters` - Keyword list of connection parameters
* `:socket_options` - Options to be given to the underlying socket
* `:timeout` - The default timeout to use on queries, defaults to `15000`
* `:charset` - Name that is used in multibyte encoding
### Pool options
* `:pool` - The connection pool module, defaults to `DBConnection.ConnectionPool`
* `:pool_size` - The size of the pool, defaults to `1`
* `:idle_interval` - The ping interval to validate an idle connection, defaults to `1000`
### Connection parameters
* `:autocommit` - Mode that issued an automatic COMMIT operation
* `:fetch` - Number of rows to fetch from the server
* `:sdu` - Size of session data unit
* `:role` - Mode that is used in an internal logon
* `:prelim` - Mode that is permitted when the database is down
### Output parameters
Using syntax for keyword lists: `[{:out, :cursor}]`, `[out: :cursor]`
Oracle types | Literal syntax in params
:------------------------------- | :-----------------------
`NUMBER`,`FLOAT`,`BINARY_FLOAT` | `:number`, `:integer`, `:float`, `:decimal`
`CHAR`, `VARCHAR2` | `:varchar`, `:char`, `:string`
`NCHAR`, `NVARCHAR2` | `:nvarchar`, `:nchar`, `:binary`
`DATE` | `:date`
`TIMESTAMP` | `:timestamp`
`TIMESTAMP WITH TIME ZONE` | `:timestamptz`
`SYS_REFCURSOR` | `:cursor`
### Primitive types
The primitive types are:
Ecto types | Oracle types | Literal syntax in params
:---------------------- | :------------------------------- | :-----------------------
`:id`, `:integer` | `NUMBER (*,0)` | 1, 2, 3
`:float` | `NUMBER`,`FLOAT`,`BINARY_FLOAT` | 1.0, 2.0, 3.0
`:decimal` | `NUMBER`,`FLOAT`,`BINARY_FLOAT` | [`Decimal`](https://hexdocs.pm/decimal)
`:string`, `:binary` | `CHAR`, `VARCHAR2`, `CLOB` | "one hundred"
`:string`, `:binary` | `NCHAR`, `NVARCHAR2`, `NCLOB` | "百元", "万円"
`{:array, :integer}` | `RAW`, `BLOB` | 'E799BE'
`:naive_datetime` | `DATE`, `TIMESTAMP` | [`NaiveDateTime`](https://hexdocs.pm/elixir)
`:utc_datetime` | `TIMESTAMP WITH TIME ZONE` | [`DateTime`](https://hexdocs.pm/elixir)
#### Examples
iex> Ecto.Adapters.SQL.query(YourApp.Repo, "select 1+:1, sysdate, rowid from dual where 1=:1 ", [1])
{:ok, %{num_rows: 1, rows: [[2, ~N[2016-08-01 13:14:15], "AAAACOAABAAAAWJAAA"]]}}
"""
use Ecto.Adapters.SQL, driver: Jamdb.Oracle, migration_lock: nil
@behaviour Ecto.Adapter.Storage
@behaviour Ecto.Adapter.Structure
@impl true
def storage_up(_opts), do: err()
@impl true
def storage_down(_opts), do: err()
@impl true
def structure_dump(_default, _config), do: err()
@impl true
def structure_load(_default, _config), do: err()
@impl true
def supports_ddl_transaction? do
false
end
defp err, do: {:error, false}
end
defmodule Ecto.Adapters.Jamdb.Oracle.Connection do
@moduledoc false
@behaviour Ecto.Adapters.SQL.Connection
@impl true
def child_spec(opts) do
DBConnection.child_spec(Jamdb.Oracle, opts)
end
@impl true
def execute(conn, query, params, opts) do
DBConnection.execute(conn, query!(query, ""), params, opts)
end
@impl true
def prepare_execute(conn, name, query, params, opts) do
DBConnection.prepare_execute(conn, query!(query, name), params, opts)
end
@impl true
def stream(conn, query, params, opts) do
DBConnection.stream(conn, query!(query, ""), params, opts)
end
@impl true
def query(conn, query, params, opts) do
case DBConnection.prepare_execute(conn, query!(query, ""), params, opts) do
{:ok, _, result} -> {:ok, result}
{:error, err} -> err
end
end
defp query!(sql, name) when is_binary(sql) or is_list(sql) do
%Jamdb.Oracle.Query{statement: IO.iodata_to_binary(sql), name: name}
end
defp query!(%{} = query, _name) do
query
end
defdelegate all(query), to: Jamdb.Oracle.Query
defdelegate update_all(query), to: Jamdb.Oracle.Query
defdelegate delete_all(query), to: Jamdb.Oracle.Query
defdelegate insert(prefix, table, header, rows, on_conflict, returning), to: Jamdb.Oracle.Query
defdelegate update(prefix, table, fields, filters, returning), to: Jamdb.Oracle.Query
defdelegate delete(prefix, table, filters, returning), to: Jamdb.Oracle.Query
@impl true
def to_constraints(_err), do: []
@impl true
def execute_ddl(err), do: error!(err)
@impl true
def ddl_logs(err), do: error!(err)
defp error!(msg) do
raise DBConnection.ConnectionError, "#{inspect msg}"
end
end
|
lib/jamdb_oracle_ecto.ex
| 0.764276 | 0.682514 |
jamdb_oracle_ecto.ex
|
starcoder
|
defmodule ElixirConsole.Autocomplete do
@moduledoc """
Encapsulates all the logic related with the autocomplete feature
"""
alias ElixirConsole.Documentation
@max_command_length 10_000
@doc """
Get a list of suggestions with all the possible words that could fit in the
command that is being typed by the user.
"""
def get_suggestions(value, caret_position, bindings) do
word_to_autocomplete = word_to_autocomplete(value, caret_position)
modules_or_functions = modules_or_functions_from_docs(word_to_autocomplete)
bindings
|> all_suggestions_candidates(modules_or_functions)
|> filter_suggestions(word_to_autocomplete)
end
defp all_suggestions_candidates(bindings, modules_or_functions) do
bindings_variable_names(bindings) ++ elixir_library_names(modules_or_functions)
end
defp bindings_variable_names(bindings) do
bindings
|> Enum.map(fn {name, _} -> Atom.to_string(name) end)
|> Enum.sort()
end
defp modules_or_functions_from_docs(word_to_autocomplete) do
cond do
String.match?(word_to_autocomplete, ~r/^[A-Z]\w*\.\w*$/) ->
:functions
String.match?(word_to_autocomplete, ~r/^[a-z]/) ->
:kernel_functions
true ->
:modules
end
end
defp elixir_library_names(modules_or_functions) do
modules_or_functions
|> retrieve_names_from_documentation()
|> Enum.sort()
end
defp retrieve_names_from_documentation(:functions), do: Documentation.get_functions_names()
defp retrieve_names_from_documentation(:kernel_functions),
do: Documentation.get_kernel_functions_names()
defp retrieve_names_from_documentation(:modules), do: Documentation.get_modules_names()
defp filter_suggestions(candidates, word_to_autocomplete) do
candidates
|> Enum.filter(&String.starts_with?(&1, word_to_autocomplete))
|> Enum.take(10)
end
@doc """
Returns a modified version of the command input value with an autocompleted
word. It means that the `suggestion` value is used to replace the word that
ends in the `caret_position` position of the provided `value`.
It returns a tuple with the new input command (modified with the autocompleted
word) and the new caret position (right after the last character of the
autocompleted word)
"""
def autocompleted_input(value, caret_position, autocompleted_word) do
word_to_autocomplete = word_to_autocomplete(value, caret_position)
{
calculate_new_input_value(value, caret_position, word_to_autocomplete, autocompleted_word),
calculate_new_caret_position(caret_position, word_to_autocomplete, autocompleted_word)
}
end
defp word_to_autocomplete(value, caret_position) do
{value_until_caret, _} = split_command_for_autocomplete(value, caret_position)
value_until_caret |> String.split() |> List.last() || ""
end
defp split_command_for_autocomplete(value, caret_position) do
{String.slice(value, 0, caret_position),
String.slice(value, caret_position, @max_command_length)}
end
defp calculate_new_caret_position(caret_position, word_to_autocomplete, autocompleted_word) do
String.length(autocompleted_word) - String.length(word_to_autocomplete) + caret_position
end
defp calculate_new_input_value(
input_value,
caret_position,
word_to_autocomplete,
autocompleted_word
) do
{value_until_caret, value_from_caret} =
split_command_for_autocomplete(input_value, caret_position)
Regex.replace(~r/\.*#{word_to_autocomplete}$/, value_until_caret, autocompleted_word) <>
value_from_caret
end
end
|
lib/elixir_console/autocomplete.ex
| 0.681727 | 0.461381 |
autocomplete.ex
|
starcoder
|
defmodule Notifications.Formatters.Utils do
@moduledoc """
Utils module for commonly reused functions within the notifications namespace
"""
use Timex
@spec format_date_string(nil | String.t) :: String.t
def format_date_string(date_string) when date_string == "" or date_string == nil, do: ""
def format_date_string(date_string) do
utc_timezone = Timezone.get("UTC", Timex.now)
date_string
|> Timex.parse!("{ISO:Extended:Z}")
|> Timezone.convert(utc_timezone)
|> Timex.format!("%FT%T.%06fZ", :strftime)
end
@spec truncate_slack_message(nil | String.t) :: String.t | nil
@doc """
Truncate a string to 1500 characters and append '...'
Note that for edge cases (1500-1502 length) this will give us back a
string slightly longer than the original because of the '...'
"""
def truncate_slack_message(<<message :: binary-size(1500), _ :: binary>>), do: message <> "..."
def truncate_slack_message(message), do: message
def failed_critical_control?(%Notifications.Profile.Control{} = control) do
critical_control?(control) &&
Enum.any?(control.failed_results, &failed_test?(&1))
end
def critical_control?(%Notifications.Profile.Control{} = control), do: control.impact >= 0.7
def failed_test?(%{status: "failed"}), do: true
def failed_test?(_), do: false
def maybe_markdown_url("", title), do: title
def maybe_markdown_url(url, title), do: "<#{url}|#{title}>"
@doc """
This will take a container (list, struct, or map) and recursively convert
any struct values it contains into maps while retaining non-structs as-is.
Elixir provides Map.from_struct, but it will not make the conversion recursively.
"""
def to_map(%{} = value), do: to_map(:ignore, value)
def to_map(value) when is_list(value) do
to_map(:ignore, value)
end
# Notes on the internals:
# Elixir map comprehension returns a list, not a map. The Enum.map
# function accepts maps, but also returns a list. We'll dip down into
# erlang's "map" module: map:map constructs a new map;
# and for consistency we'll use the erlang list module for handling lists.
defp to_map(_, []), do: []
defp to_map(_, list) when is_list(list), do: :lists.map(&to_map(:ignore, &1), list)
defp to_map(_, %_struct{} = struct), do: to_map(:ignore, Map.from_struct(struct))
defp to_map(_, %{} = map), do: :maps.map(&to_map(&1, &2), map)
defp to_map(_, other), do: other
end
|
components/notifications-service/server/lib/formatters/utils.ex
| 0.784484 | 0.401981 |
utils.ex
|
starcoder
|
defmodule LexibombServer.WordList do
@moduledoc """
Manages access to the list of valid gameplay words.
"""
@doc """
Starts an agent linked to the current process to store a normalized version
of `word_list`.
The default word list is based on <NAME>'s
_[Yet Another Word List](https://github.com/elasticdog/yawl)_ (YAWL) project.
"""
@spec start_link(MapSet.t) :: Agent.on_start
def start_link(word_list \\ default_list) do
words = Enum.map(word_list, &normalize/1)
prefixes = prefixes(words)
Agent.start_link(fn -> {words, prefixes} end, name: __MODULE__)
end
@doc """
Retrieves the default word list from the agent.
"""
@spec get :: MapSet.t
def get do
Agent.get(__MODULE__, &(&1))
end
@doc """
Checks if the word list contains `word`.
"""
@spec member?(String.t) :: boolean
def member?(word) do
Agent.get(__MODULE__, fn {words, _} ->
normalize(word) in words
end)
end
@doc """
Checks if the given `prefix` is a valid prefix in the word list.
"""
@spec prefix?(String.t) :: boolean
def prefix?(prefix) do
Agent.get(__MODULE__, fn {_, prefixes} ->
normalize(prefix) in prefixes
end)
end
@doc """
Returns the set of all prefixes of each word in the given `word_list`.
## Examples
iex> word_list = MapSet.new(["HELLO", "HELP", "HELPER"])
iex> LexibombServer.WordList.prefixes(word_list)
#MapSet<["", "H", "HE", "HEL", "HELL", "HELP", "HELPE"]>
"""
@spec prefixes(Enum.t) :: MapSet.t
def prefixes(word_list) do
Enum.reduce(word_list, MapSet.new([""]), fn word, prefixes ->
word |> do_prefixes |> MapSet.union(prefixes)
end)
end
@spec do_prefixes(String.t) :: MapSet.t
defp do_prefixes(word) do
for i <- 1..byte_size(word) - 1, into: %MapSet{} do
<<prefix::binary-size(i), _::binary>> = word
prefix
end
end
@spec default_list :: MapSet.t
defp default_list do
Application.app_dir(:lexibomb_server, "priv/word.list")
|> File.stream!
|> Stream.map(&String.rstrip/1)
|> MapSet.new
end
@spec normalize(String.t) :: String.t
defp normalize(word) do
String.upcase(word)
end
end
|
apps/lexibomb_server/lib/lexibomb_server/word_list.ex
| 0.864768 | 0.422594 |
word_list.ex
|
starcoder
|
defmodule Day10.Node.Configuration do
@moduledoc """
%Configuration{} struct describes where a node should send its
high and its low value(s) to. Destination is described as a String
that consists of the node type (bot or output) and its unique identifier.
"""
@type destination() :: String.t()
@type t() :: %__MODULE__{
low_destination: destination(),
high_destination: destination()
}
@enforce_keys [:low_destination, :high_destination]
defstruct([:low_destination, :high_destination])
def new(low_destination, high_destination) do
%__MODULE__{
low_destination: low_destination,
high_destination: high_destination
}
end
end
defmodule Day10.Node.State do
@moduledoc """
`%State{}` describes the current state of the node. In practice that means
the chips is collected and optionally a `%Configuration{}`.
"""
alias Day10.Node.Configuration
@type chips() :: list(integer())
@type t() :: %__MODULE__{
identifier: String.t(),
chips: chips(),
configuration: Configuration.t() | nil
}
defstruct identifier: nil, chips: [], configuration: nil
@spec new(String.t(), Configuration.t() | nil) :: t()
def new(identifier, configuration \\ nil) do
%__MODULE__{
identifier: identifier,
chips: [],
configuration: configuration
}
end
end
defmodule Day10.Node do
@min_chip_count 2
# which chips should trigger the probe
@probe [17, 61]
use GenServer
alias Day10.Node.{Configuration, State}
def child_spec(opts) do
%{
id: Keyword.get(opts, :identifier),
start: {__MODULE__, :start_link, [opts]}
}
end
def start_link(_args, opts) do
identifier = Keyword.get(opts, :identifier)
type = Keyword.get(opts, :type)
configuration = Keyword.get(opts, :configuration)
state = State.new(identifier, configuration)
opts = [name: node_name(type, identifier)]
GenServer.start_link(__MODULE__, state, opts)
end
@impl true
def init(args) do
{:ok, args}
end
@impl true
def handle_call({:receive, chipnumber}, _from, state) do
# store the received chip.
state = Map.put(state, :chips, [chipnumber | state.chips])
# run the chips distribution step if the node is legible to do so.
if should_distribute_chips(state) do
distribute_chips(state)
end
{:reply, :ok, state}
end
@impl true
def handle_call(:list, _from, state) do
{:reply, state.chips, state}
end
# only distribute the chips if the node is configured to do so and we
# have collected up-to n chips.
defp should_distribute_chips(state) do
state.configuration != nil && length(state.chips) >= @min_chip_count
end
defp distribute_chips(state) do
[lowest, highest] = Enum.sort(state.chips)
%Configuration{
low_destination: low_destination,
high_destination: high_destination
} = state.configuration
if [lowest, highest] == @probe do
IO.puts("bot #{state.identifier} is responsible for comparing #{lowest} and #{highest}")
end
# redistribute the received chips accordingly
receive(low_destination, lowest)
receive(high_destination, highest)
end
# public API
def receive(bot, chipnumber) do
GenServer.call(bot, {:receive, chipnumber})
end
def list(bot) do
GenServer.call(bot, :list)
end
def node_name(type, identifier) do
Module.concat(__MODULE__, "#{type}-#{identifier}")
end
end
|
advent-of-code-2016/day_10/lib/node.ex
| 0.878014 | 0.509154 |
node.ex
|
starcoder
|
defmodule Timex.Parse.DateTime.Tokenizers.Directive do
@moduledoc false
alias Timex.Parse.DateTime.Parsers
alias Timex.Parse.DateTime.Tokenizers.Directive
defstruct type: :literal,
value: nil,
modifiers: [],
flags: [],
width: [min: -1, max: nil],
parser: nil,
weight: 0
@type t :: %__MODULE__{}
@doc """
Gets a parsing directive for the given token name, where the token name
is an atom.
## Examples
iex> alias Timex.Parsers.Directive
...> %Directive{type: type, flags: flags} = Directive.get(:year4, "YYYY", padding: :zeros)
...> {type, flags}
{:year4, [padding: :zeros]}
"""
@spec get(atom, String.t(), [{atom, term}] | []) :: Directive.t()
def get(type, directive, opts \\ []) do
min_width = Keyword.get(opts, :min_width, -1)
width = Keyword.get(opts, :width, min: min_width, max: nil)
flags = Keyword.merge(Keyword.get(opts, :flags, []), width)
modifiers = Keyword.get(opts, :modifiers, [])
get(type, directive, flags, modifiers, width)
end
@simple_types [
:year4,
:year2,
:century,
:hour24,
:hour12,
:zname,
:zoffs,
:zoffs_colon,
:zoffs_sec,
:iso_date,
:iso_time,
:iso_week,
:iso_weekday,
:iso_ordinal,
:ansic,
:unix,
:kitchen,
:slashed,
:asn1_utc_time,
:asn1_generalized_time,
:strftime_iso_clock,
:strftime_iso_clock_full,
:strftime_kitchen,
:strftime_iso_shortdate
]
@mapped_types [
iso_year4: :year4,
iso_year2: :year2,
month: :month2,
mshort: :month_short,
mfull: :month_full,
day: :day_of_month,
oday: :day_of_year,
iso_weeknum: :week_of_year,
week_mon: :week_of_year,
week_sun: :week_of_year_sun,
wday_mon: :weekday,
wday_sun: :weekday,
wdshort: :weekday_short,
wdfull: :weekday_full,
min: :minute,
sec: :second,
sec_fractional: :second_fractional,
sec_epoch: :seconds_epoch,
us: :microseconds,
ms: :milliseconds,
am: :ampm,
AM: :ampm,
zabbr: :zname,
iso_8601_extended: :iso8601_extended,
iso_8601_basic: :iso8601_basic,
rfc_822: :rfc822,
rfc_1123: :rfc1123,
rfc_3339: :rfc3339,
strftime_iso_date: :iso_date
]
@mapped_zulu_types [
iso_8601_extended_z: :iso8601_extended,
iso_8601_basic_z: :iso8601_basic,
rfc_822z: :rfc822,
rfc_1123z: :rfc1123,
rfc_3339z: :rfc3339,
asn1_generalized_time_z: :asn1_generalized_time
]
for type <- @simple_types do
def get(unquote(type), directive, flags, mods, width) do
%Directive{
type: unquote(type),
value: directive,
flags: flags,
modifiers: mods,
width: width,
parser: apply(Parsers, unquote(type), [flags])
}
end
end
for {type, parser_fun} <- @mapped_types do
def get(unquote(type), directive, flags, mods, width) do
%Directive{
type: unquote(type),
value: directive,
flags: flags,
modifiers: mods,
width: width,
parser: apply(Parsers, unquote(parser_fun), [flags])
}
end
end
for {type, parser_fun} <- @mapped_zulu_types do
def get(unquote(type), directive, flags, mods, width) do
%Directive{
type: unquote(type),
value: directive,
flags: flags,
modifiers: mods,
width: width,
parser: apply(Parsers, unquote(parser_fun), [[{:zulu, true} | flags]])
}
end
end
def get(:asn1_generalized_time_tz, directive, flags, mods, width) do
%Directive{
type: :asn1_generalized_time_tz,
value: directive,
flags: flags,
modifiers: mods,
width: width,
parser: Parsers.asn1_generalized_time([{:zoffs, true} | flags])
}
end
# Catch-all
def get(type, _directive, _flags, _mods, _width),
do: {:error, "Unrecognized directive type: #{type}."}
end
|
lib/parse/datetime/tokenizers/directive.ex
| 0.880129 | 0.514644 |
directive.ex
|
starcoder
|
defmodule Militerm.Util.Yaml do
@moduledoc """
A simple YAML writer that lets us get the data from the components and display it for editing.
"""
@doc """
## Examples
iex> Yaml.write_to_string("string") |> to_string
"string"
iex> Yaml.write_to_string(["one", "two", "three"]) |> to_string
"- one\n- two\n- three"
iex> Yaml.write_to_string(%{"one" => "foo", "two" => ["three", "four"], "three" => %{1 => 2, 3 => 4}}) |> to_string
"one: foo\ntwo:\n - three\n - four\nthree:\n 1: 2\n 3: 4"
"""
def write_to_string(data), do: write_to_string(data, 0)
def write_to_string(%MapSet{} = mapset, level) do
write_to_string(MapSet.to_list(mapset), level)
end
def write_to_string(map, level) when is_map(map) do
map
|> Enum.map(fn
{k, map_or_list} when is_list(map_or_list) or is_map(map_or_list) ->
[
String.duplicate(" ", level),
to_string(k),
":\n",
write_to_string(map_or_list, level + 1)
]
{k, v} ->
[String.duplicate(" ", level), to_string(k), ": ", write_to_string(v, level), "\n"]
end)
end
def write_to_string(list, level) when is_list(list) do
list
|> Enum.map(fn
item when is_list(item) or is_map(item) ->
[String.duplicate(" ", level), "-\n", write_to_string(item, level + 1)]
item ->
[String.duplicate(" ", level), "- ", write_to_string(item, level), "\n"]
end)
end
def write_to_string(string, level) when is_binary(string) do
indent = String.duplicate(" ", level + 1)
cond do
String.length(string) > 50 ->
# wrap the string as much as possible and indent by level
[
">"
| string
|> wrap(108 - 2 * level)
|> Enum.map(fn line -> ["\n", indent, line] end)
]
String.contains?(string, ~w(: " ')) ->
[
?",
string
|> String.replace("\\", "\\\\")
|> String.replace("\"", "\\\""),
?"
]
string in ~w[on off true false yes no nil null] ->
[?", string, ?"]
:else ->
string
end
end
def write_to_string(true, _), do: "true"
def write_to_string(false, _), do: "false"
def write_to_string(nil, _), do: "null"
def write_to_string(other, _), do: to_string(other)
def wrap(string, width) do
string
|> String.split(" ", trim: true)
|> Enum.chunk_while(
[],
fn
word, [] ->
{:cont, [word]}
word, acc ->
if IO.iodata_length([word, " " | acc]) > width do
{:cont, Enum.reverse(acc), [word]}
else
{:cont, [word, " " | acc]}
end
end,
fn
[] -> {:cont, []}
acc -> {:cont, Enum.reverse(acc), []}
end
)
end
end
|
lib/militerm/util/yaml.ex
| 0.77552 | 0.576333 |
yaml.ex
|
starcoder
|
defmodule AWS.ApplicationAutoScaling do
@moduledoc """
With Application Auto Scaling, you can configure automatic scaling for the
following resources:
* Amazon ECS services
* Amazon EC2 Spot Fleet requests
* Amazon EMR clusters
* Amazon AppStream 2.0 fleets
* Amazon DynamoDB tables and global secondary indexes throughput
capacity
* Amazon Aurora Replicas
* Amazon SageMaker endpoint variants
* Custom resources provided by your own applications or services
* Amazon Comprehend document classification and entity recognizer
endpoints
* AWS Lambda function provisioned concurrency
* Amazon Keyspaces (for Apache Cassandra) tables
* Amazon Managed Streaming for Apache Kafka cluster storage
## API Summary
The Application Auto Scaling service API includes three key sets of actions:
* Register and manage scalable targets - Register AWS or custom
resources as scalable targets (a resource that Application Auto Scaling can
scale), set minimum and maximum capacity limits, and retrieve information on
existing scalable targets.
* Configure and manage automatic scaling - Define scaling policies
to dynamically scale your resources in response to CloudWatch alarms, schedule
one-time or recurring scaling actions, and retrieve your recent scaling activity
history.
* Suspend and resume scaling - Temporarily suspend and later resume
automatic scaling by calling the
[RegisterScalableTarget](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) API action for any Application Auto Scaling scalable target. You can suspend and
resume (individually or in combination) scale-out activities that are triggered
by a scaling policy, scale-in activities that are triggered by a scaling policy,
and scheduled scaling.
To learn more about Application Auto Scaling, including information about
granting IAM users required permissions for Application Auto Scaling actions,
see the [Application Auto Scaling User
Guide](https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html).
"""
@doc """
Deletes the specified scaling policy for an Application Auto Scaling scalable
target.
Deleting a step scaling policy deletes the underlying alarm action, but does not
delete the CloudWatch alarm associated with the scaling policy, even if it no
longer has an associated action.
For more information, see [Delete a Step Scaling Policy](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html#delete-step-scaling-policy)
and [Delete a Target Tracking Scaling Policy](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html#delete-target-tracking-policy)
in the *Application Auto Scaling User Guide*.
"""
def delete_scaling_policy(client, input, options \\ []) do
request(client, "DeleteScalingPolicy", input, options)
end
@doc """
Deletes the specified scheduled action for an Application Auto Scaling scalable
target.
For more information, see [Delete a Scheduled Action](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html#delete-scheduled-action)
in the *Application Auto Scaling User Guide*.
"""
def delete_scheduled_action(client, input, options \\ []) do
request(client, "DeleteScheduledAction", input, options)
end
@doc """
Deregisters an Application Auto Scaling scalable target when you have finished
using it.
To see which resources have been registered, use
[DescribeScalableTargets](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html).
Deregistering a scalable target deletes the scaling policies and the scheduled
actions that are associated with it.
"""
def deregister_scalable_target(client, input, options \\ []) do
request(client, "DeregisterScalableTarget", input, options)
end
@doc """
Gets information about the scalable targets in the specified namespace.
You can filter the results using `ResourceIds` and `ScalableDimension`.
"""
def describe_scalable_targets(client, input, options \\ []) do
request(client, "DescribeScalableTargets", input, options)
end
@doc """
Provides descriptive information about the scaling activities in the specified
namespace from the previous six weeks.
You can filter the results using `ResourceId` and `ScalableDimension`.
"""
def describe_scaling_activities(client, input, options \\ []) do
request(client, "DescribeScalingActivities", input, options)
end
@doc """
Describes the Application Auto Scaling scaling policies for the specified
service namespace.
You can filter the results using `ResourceId`, `ScalableDimension`, and
`PolicyNames`.
For more information, see [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html)
and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html)
in the *Application Auto Scaling User Guide*.
"""
def describe_scaling_policies(client, input, options \\ []) do
request(client, "DescribeScalingPolicies", input, options)
end
@doc """
Describes the Application Auto Scaling scheduled actions for the specified
service namespace.
You can filter the results using the `ResourceId`, `ScalableDimension`, and
`ScheduledActionNames` parameters.
For more information, see [Scheduled Scaling](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html)
in the *Application Auto Scaling User Guide*.
"""
def describe_scheduled_actions(client, input, options \\ []) do
request(client, "DescribeScheduledActions", input, options)
end
@doc """
Creates or updates a scaling policy for an Application Auto Scaling scalable
target.
Each scalable target is identified by a service namespace, resource ID, and
scalable dimension. A scaling policy applies to the scalable target identified
by those three attributes. You cannot create a scaling policy until you have
registered the resource as a scalable target.
Multiple scaling policies can be in force at the same time for the same scalable
target. You can have one or more target tracking scaling policies, one or more
step scaling policies, or both. However, there is a chance that multiple
policies could conflict, instructing the scalable target to scale out or in at
the same time. Application Auto Scaling gives precedence to the policy that
provides the largest capacity for both scale out and scale in. For example, if
one policy increases capacity by 3, another policy increases capacity by 200
percent, and the current capacity is 10, Application Auto Scaling uses the
policy with the highest calculated capacity (200% of 10 = 20) and scales out to
30.
We recommend caution, however, when using target tracking scaling policies with
step scaling policies because conflicts between these policies can cause
undesirable behavior. For example, if the step scaling policy initiates a
scale-in activity before the target tracking policy is ready to scale in, the
scale-in activity will not be blocked. After the scale-in activity completes,
the target tracking policy could instruct the scalable target to scale out
again.
For more information, see [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html)
and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html)
in the *Application Auto Scaling User Guide*.
If a scalable target is deregistered, the scalable target is no longer available
to execute scaling policies. Any scaling policies that were specified for the
scalable target are deleted.
"""
def put_scaling_policy(client, input, options \\ []) do
request(client, "PutScalingPolicy", input, options)
end
@doc """
Creates or updates a scheduled action for an Application Auto Scaling scalable
target.
Each scalable target is identified by a service namespace, resource ID, and
scalable dimension. A scheduled action applies to the scalable target identified
by those three attributes. You cannot create a scheduled action until you have
registered the resource as a scalable target.
When start and end times are specified with a recurring schedule using a cron
expression or rates, they form the boundaries of when the recurring action
starts and stops.
To update a scheduled action, specify the parameters that you want to change. If
you don't specify start and end times, the old values are deleted.
For more information, see [Scheduled Scaling](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html)
in the *Application Auto Scaling User Guide*.
If a scalable target is deregistered, the scalable target is no longer available
to run scheduled actions. Any scheduled actions that were specified for the
scalable target are deleted.
"""
def put_scheduled_action(client, input, options \\ []) do
request(client, "PutScheduledAction", input, options)
end
@doc """
Registers or updates a scalable target.
A scalable target is a resource that Application Auto Scaling can scale out and
scale in. Scalable targets are uniquely identified by the combination of
resource ID, scalable dimension, and namespace.
When you register a new scalable target, you must specify values for minimum and
maximum capacity. Current capacity will be adjusted within the specified range
when scaling starts. Application Auto Scaling scaling policies will not scale
capacity to values that are outside of this range.
After you register a scalable target, you do not need to register it again to
use other Application Auto Scaling operations. To see which resources have been
registered, use
[DescribeScalableTargets](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html). You can also view the scaling policies for a service namespace by using
[DescribeScalableTargets](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html).
If you no longer need a scalable target, you can deregister it by using
[DeregisterScalableTarget](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DeregisterScalableTarget.html).
To update a scalable target, specify the parameters that you want to change.
Include the parameters that identify the scalable target: resource ID, scalable
dimension, and namespace. Any parameters that you don't specify are not changed
by this update request.
"""
def register_scalable_target(client, input, options \\ []) do
request(client, "RegisterScalableTarget", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "application-autoscaling"}
host = build_host("application-autoscaling", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AnyScaleFrontendService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/application_auto_scaling.ex
| 0.936561 | 0.610512 |
application_auto_scaling.ex
|
starcoder
|
defmodule ElixirRigidPhysics.Collision.AABB do
@moduledoc """
Module to handle [axis-aligned bounding boxes](https://en.wikipedia.org/wiki/Minimum_bounding_box#Axis-aligned_minimum_bounding_box).
Handles converting bodies to AABBs, checking overlaps, and so forth.
"""
require Record
Record.defrecord(:aabb, min: {0.0, 0.0, 0.0}, max: {0.0, 0.0, 0.0})
@type aabb :: record(:aabb, min: {number, number, number}, max: {number, number, number})
require ElixirRigidPhysics.Dynamics.Body, as: Body
require ElixirRigidPhysics.Geometry.Sphere, as: Sphere
require ElixirRigidPhysics.Geometry.Capsule, as: Capsule
require ElixirRigidPhysics.Geometry.Box, as: Box
require ElixirRigidPhysics.Geometry.Hull, as: Hull
alias Graphmath.Quatern
@doc """
Creates a world-space AABB given a body record.
## Examples
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Sphere, as: Sphere
iex> require ElixirRigidPhysics.Dynamics.Body, as: Body
iex> s = Sphere.sphere(radius: 3.0)
iex> b = Body.create(s, position: {2,1,2}, orientation: {1.0, 0.0, 0.0, 0.0})
iex> AABB.create_world_from_body(b)
{:aabb, {-1.0, -2.0, -1.0,},{5.0, 4.0, 5.0}}
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Capsule, as: Capsule
iex> require ElixirRigidPhysics.Dynamics.Body, as: Body
iex> s = Capsule.capsule(axial_length: 4.0, cap_radius: 1.0)
iex> b = Body.create(s, position: {3.0, 4.0, 5.0}, orientation: {1.0, 0.0, 0.0, 0.0})
iex> AABB.create_world_from_body(b)
{:aabb, {2.0, 1.0, 4.0}, {4.0, 7.0, 6.0}}
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Capsule, as: Capsule
iex> require ElixirRigidPhysics.Dynamics.Body, as: Body
iex> s = Capsule.capsule(axial_length: 4.0, cap_radius: 1.0)
iex> b = Body.create(s, position: {0.0, 0.0, 0.0}, orientation: {1, 0.0, 0.0, 0.0})
iex> AABB.create_world_from_body(b)
{:aabb, {-1.0, -3.0, -1.0}, {1.0, 3.0, 1.0} }
iex> sqrt_half = :math.sqrt(0.5)
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Capsule, as: Capsule
iex> require ElixirRigidPhysics.Dynamics.Body, as: Body
iex> s = Capsule.capsule(axial_length: 4.0, cap_radius: 1.0)
iex> b = Body.create(s, position: {0.0, 0.0, 0.0}, orientation: {sqrt_half, sqrt_half, 0.0, 0.0})
iex> {:aabb, min, max} = AABB.create_world_from_body(b)
iex> Graphmath.Vec3.equal(min, {-1.0, -1.0, -3.0}, 0.000001 ) and Graphmath.Vec3.equal(max, {1.0, 1.0, 3.0}, 0.000001 )
true
iex> sqrt_half = :math.sqrt(0.5)
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Capsule, as: Capsule
iex> require ElixirRigidPhysics.Dynamics.Body, as: Body
iex> s = Capsule.capsule(axial_length: 4.0, cap_radius: 1.0)
iex> b = Body.create(s, position: {1.0, 2.0, 3.0}, orientation: {sqrt_half, sqrt_half, 0.0, 0.0})
iex> {:aabb, min, max} = AABB.create_world_from_body(b)
iex> Graphmath.Vec3.equal(min, {0.0, 1.0, 0.0}, 0.000001 ) and Graphmath.Vec3.equal(max, {2.0, 3.0, 6.0}, 0.000001 )
true
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Box, as: Box
iex> require ElixirRigidPhysics.Dynamics.Body, as: Body
iex> s = Box.box(width: 1.0, height: 2.0, depth: 3.0)
iex> b = Body.create(s, position: {0.0, 0.0, 0.0}, orientation: Graphmath.Quatern.from_axis_angle(:math.pi()/4, {1.0,0.0,0.0}))
iex> {:aabb, min, max} = AABB.create_world_from_body(b)
iex> newmin = {-0.5, -1.76777, -1.76777}
iex> newmax = {0.5, 1.76777, 1.76777}
iex> Graphmath.Vec3.equal(min, newmin, 0.0001 ) and Graphmath.Vec3.equal(max, newmax, 0.00001 )
true
"""
@spec create_world_from_body(Body.body()) :: aabb()
def create_world_from_body(
Body.body(shape: Sphere.sphere(radius: r) = _sphere, position: {px, py, pz})
) do
aabb(
min: {px - r, py - r, pz - r},
max: {px + r, py + r, pz + r}
)
end
@near_infinite 1.0e280
def create_world_from_body(
Body.body(shape: shape, position: {px, py, pz}, orientation: orientation)
) do
# get local-space AABB
aabb(min: {minx, miny, minz} = min, max: {maxx, maxy, maxz} = max) =
create_local_from_shape(shape)
# recreate all corners of AABB, using naming from octants ( https://en.wikipedia.org/wiki/Octant_(solid_geometry) )
# +x, +y, +z
c1 = max
# -x, +y, +z
c2 = {minx, maxy, maxz}
# -x, -y, +z
c3 = {minx, miny, maxz}
# +x, -y, +z
c4 = {maxx, miny, maxz}
# +x, +y, -z
c5 = {maxx, maxy, minz}
# -x, +y, -z
c6 = {minx, maxy, minz}
# -x, -y, -z
c7 = min
# +x, -y, -z
c8 = {maxx, miny, minz}
# rotate to match world frame
c1p = Quatern.transform_vector(orientation, c1)
c2p = Quatern.transform_vector(orientation, c2)
c3p = Quatern.transform_vector(orientation, c3)
c4p = Quatern.transform_vector(orientation, c4)
c5p = Quatern.transform_vector(orientation, c5)
c6p = Quatern.transform_vector(orientation, c6)
c7p = Quatern.transform_vector(orientation, c7)
c8p = Quatern.transform_vector(orientation, c8)
# calculate new AABB
{{minxp, minyp, minzp}, {maxxp, maxyp, maxzp}} =
[c1p, c2p, c3p, c4p, c5p, c6p, c7p, c8p]
|> Enum.reduce(
{{@near_infinite, @near_infinite, @near_infinite},
{-@near_infinite, -@near_infinite, -@near_infinite}},
fn {x, y, z}, {{minx, miny, minz}, {maxx, maxy, maxz}} ->
{
{min(x, minx), min(y, miny), min(z, minz)},
{max(x, maxx), max(y, maxy), max(z, maxz)}
}
end
)
# offset by position
aabb(
min: {minxp + px, minyp + py, minzp + pz},
max: {maxxp + px, maxyp + py, maxzp + pz}
)
end
@doc """
Creates an AABB from a geometry record, in local space centered on origin.
Width is the length on the x-axis, height is the length on the y-axis, and depth is the length on the z-axis.
## Examples
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Box, as: Box
iex> AABB.create_local_from_shape( Box.box(width: 1.0, height: 2.0, depth: 3.0))
{:aabb, {-0.5, -1.0, -1.5}, {0.5, 1.0, 1.5}}
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Sphere, as: Sphere
iex> AABB.create_local_from_shape( Sphere.sphere(radius: 1.0))
{:aabb, {-1.0, -1.0, -1.0}, {1.0, 1.0, 1.0}}
iex> AABB.create_local_from_shape( Sphere.sphere(radius: 2.0))
{:aabb, {-2.0, -2.0, -2.0}, {2.0, 2.0, 2.0}}
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Capsule, as: Capsule
iex> AABB.create_local_from_shape( Capsule.capsule(axial_length: 4.0, cap_radius: 1.0))
{:aabb, {-1.0, -3.0, -1.0}, {1.0, 3.0, 1.0}}
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> require ElixirRigidPhysics.Geometry.Hull, as: Hull
iex> AABB.create_local_from_shape( Hull.create_box(2,3,4))
{:aabb, {-1.0, -1.5, -2.0}, {1.0, 1.5, 2.0}}
"""
@spec create_local_from_shape(Box.box() | Sphere.sphere() | Capsule.capsule()) :: aabb()
def create_local_from_shape(Box.box(width: w, height: h, depth: d)) do
aabb(
min: {-w / 2, -h / 2, -d / 2},
max: {w / 2, h / 2, d / 2}
)
end
def create_local_from_shape(Sphere.sphere(radius: r)) do
aabb(
min: {-r, -r, -r},
max: {r, r, r}
)
end
def create_local_from_shape(Capsule.capsule(axial_length: al, cap_radius: cr)) do
half_height = cr + al / 2.0
aabb(
min: {-cr, -half_height, -cr},
max: {cr, half_height, cr}
)
end
def create_local_from_shape(Hull.hull(faces: faces)) do
{{minxp, minyp, minzp}, {maxxp, maxyp, maxzp}} =
faces
|> Enum.flat_map(fn a -> a end)
|> Enum.reduce(
{{@near_infinite, @near_infinite, @near_infinite},
{-@near_infinite, -@near_infinite, -@near_infinite}},
fn {x, y, z}, {{minx, miny, minz}, {maxx, maxy, maxz}} ->
{
{min(x, minx), min(y, miny), min(z, minz)},
{max(x, maxx), max(y, maxy), max(z, maxz)}
}
end
)
aabb(
min: {minxp, minyp, minzp},
max: {maxxp, maxyp, maxzp}
)
end
@doc """
Checks if two AABBs are overlapping.
## Examples
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> a = AABB.aabb( min: {0,0,0}, max: {1,1,1} )
iex> b = AABB.aabb( min: {2,2,2}, max: {3,3,3} )
iex> AABB.overlaps?(a,b)
false
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> a = AABB.aabb( min: {0,0,0}, max: {1,1,1} )
iex> b = AABB.aabb( min: {0,0,2}, max: {1,1,3} )
iex> AABB.overlaps?(a,b)
false
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> a = AABB.aabb( min: {0,0,0}, max: {1,1,1} )
iex> b = AABB.aabb( min: {0,0,0}, max: {1,1,1} )
iex> AABB.overlaps?(a,b)
true
iex> require ElixirRigidPhysics.Collision.AABB, as: AABB
iex> a = AABB.aabb( min: {0,0,0}, max: {1,1,1} )
iex> b = AABB.aabb( min: {0.5,0.5,0.5}, max: {1.5,1.5,1.5} )
iex> AABB.overlaps?(a,b)
true
"""
@spec overlaps?(aabb(), aabb()) :: boolean()
def overlaps?(
aabb(min: {aminx, aminy, aminz}, max: {amaxx, amaxy, amaxz}),
aabb(min: {bminx, bminy, bminz}, max: {bmaxx, bmaxy, bmaxz})
) do
cond do
amaxx < bminx or aminx > bmaxx -> false
amaxy < bminy or aminy > bmaxy -> false
amaxz < bminz or aminz > bmaxz -> false
true -> true
end
end
end
|
lib/collision/aabb.ex
| 0.916027 | 0.605595 |
aabb.ex
|
starcoder
|
defmodule Andy.Profiles.Rover.GMDefs.IntentionsOfOther do
@moduledoc "The GM definition for :intentions_of_other"
alias Andy.GM.{GenerativeModelDef, Intention, Conjecture, Round}
import Andy.GM.Utils
import Andy.Utils, only: [now: 0]
require Logger
@moves ~w{go_forward go_backward turn_right turn_left turn move panic}a
def gm_def() do
%GenerativeModelDef{
name: :intentions_of_other,
conjectures: [
conjecture(:other_panicking),
conjecture(:other_panicking)
],
# allow all conjectures to be activated
contradictions: [],
priors: %{
other_panicking: %{about: :other, values: %{is: false}},
other_homing_on_food: %{about: :other, values: %{is: false}}
},
intentions: %{
say_other_panicking: %Intention{
intent_name: :say,
valuator: panicking_opinion_valuator(),
repeatable: false
},
say_other_homing_on_food: %Intention{
intent_name: :say,
valuator: homing_on_food_opinion_valuator(),
repeatable: false
}
}
}
end
# Conjectures
# opinion
defp conjecture(:other_panicking) do
%Conjecture{
name: :other_panicking,
# Only activate if actively observing the robot
activator: opinion_activator(:other),
self_activated: true,
predictors: [
no_change_predictor(:observed,
default: %{
is: false,
proximity: :unknown,
direction: :unknown,
duration: 0,
recently_believed_or_tried?: false
}
)
],
valuator: other_panicking_belief_valuator(),
intention_domain: [:say_other_panicking]
}
end
# opinion
defp conjecture(:other_homing_on_food) do
%Conjecture{
name: :other_homing_on_food,
activator: opinion_activator(:other),
self_activated: true,
predictors: [
no_change_predictor(:observed,
default: %{
is: false,
proximity: :unknown,
direction: :unknown,
duration: 0,
recently_believed_or_tried?: false
}
)
],
valuator: other_homing_on_food_belief_valuator(),
intention_domain: [:say_other_homing_on_food]
}
end
# Conjecture belief valuators
defp other_panicking_belief_valuator() do
fn conjecture_activation, [_round | previous_rounds] ->
about = conjecture_activation.about
observations =
previous_rounds
|> Round.rounds_since(now() - 15_000)
|> Round.longest_round_sequence(fn round ->
not Enum.any?(Round.intent_names(round), &(&1 in @moves))
end)
|> perceived_values(about, :observed, matching: %{is: true})
observation_count = observations |> Enum.count()
proximity_reversals =
Enum.map(observations, &Map.get(&1, :proximity, :unknown)) |> reversals()
direction_reversals =
Enum.map(observations, &Map.get(&1, :direction, :unknown)) |> reversals()
panicking? =
observation_count > 4 and
proximity_reversals > 3 and
direction_reversals > 2
Logger.info(
"Other panicking is #{panicking?} from observation_count=#{observation_count} > 4, proximity_reversals=#{
proximity_reversals
} > 3, direction_reversals=#{direction_reversals} > 3"
)
%{is: panicking?}
end
end
defp other_homing_on_food_belief_valuator() do
fn conjecture_activation, [_round | previous_rounds] ->
about = conjecture_activation.about
observations =
previous_rounds
|> Round.rounds_since(now() - 15_000)
|> Round.longest_round_sequence(fn round ->
not Enum.any?(Round.intent_names(round), &(&1 in @moves))
end)
|> perceived_values(about, :observed, matching: %{is: true})
observation_count = observations |> Enum.count()
proximities = Enum.map(observations, &Map.get(&1, :proximity, :unknown))
proximity_changes = proximities |> count_changes()
proximity_reversals = proximities |> reversals()
direction_reversals =
Enum.map(observations, &Map.get(&1, :direction, :unknown)) |> reversals()
homing? =
observation_count > 4 and
proximity_changes > 4 and
proximity_reversals <= 1 and
direction_reversals <= 1
Logger.info(
"Other homing is #{homing?} from observation_count=#{observation_count} > 4, proximity_changes=#{
proximity_changes
} > 4, proximity_reversals=#{proximity_reversals} <= 1>, direction_reversals=#{
direction_reversals
} <= 1"
)
{believed_proximity, believed_direction} =
case observations do
[] ->
:unknown
[%{proximity: proximity, direction: direction} | _] ->
{proximity, direction}
end
%{is: homing?, proximity: believed_proximity, direction: believed_direction}
end
end
# Intention valuators
defp panicking_opinion_valuator() do
fn %{is: panicking?} ->
if panicking?, do: saying("#{Andy.name_of_other()} is freaking out"), else: nil
end
end
defp homing_on_food_opinion_valuator() do
fn %{is: homing?} ->
if homing?, do: saying("#{Andy.name_of_other()} has found food"), else: nil
end
end
end
|
lib/andy/profiles/rover/gm_defs/intentions_of_other.ex
| 0.829388 | 0.447883 |
intentions_of_other.ex
|
starcoder
|
defmodule Timeularex do
@moduledoc """
NOTE: Timeularex is in early stages of development. Use in production
should be considered with caution.
Timeularex is an API client for the [Timeular public API](http://developers.timeular.com/public-api/)
Timular is a service to improve time-tracking of activities.
## Configuration
The client provides two options for configuration. The first involves the
typical setting of variables in your `config.exs` file:
config :timeularex,
api_key: <<API_KEY>>,
api_secret: <<API_SECRET>>,
api_url: "https://api.timeular.com/api/v2"
Additionally, you can utilize api_key/1 and api_secret/1 functions in the
`TImeularex.Config` module.
Your API key and secret can be retrieved from your [account app settings](https://profile.timeular.com/#/app/account)
"""
use Application
alias Timeularex.Client
alias Timeularex.Resources
def start(_type, _args) do
children = [
Timeularex.Client
]
opts = [strategy: :one_for_one, name: Timeularex.Supervisor]
Supervisor.start_link(children, opts)
end
def init(_) do
IO.puts("test")
end
# Timeular API
@doc """
List all the integrations associated with the connected account.
"""
def integrations do
"/integrations"
|> Client.request(:get)
end
@doc """
List all activities associated with the connected account.
"""
def activities do
"/activities"
|> Client.request(:get)
end
@doc """
Create a new Activity. The activity should have a name and color. A Name
doesn’t have to be unique. Optionally, you can also provide an Integration
to which the activity will belong to. You can obtain list of enabled
Integrations with integrations/0.
"""
def create_activity(%Resources.Activity{} = activity) do
"/activities"
|> Client.request(:post, activity)
end
@doc """
Update an existing Activity's name and/or color.
"""
def update_activity(%Resources.Activity{} = activity) do
"/activities"
|> Client.request(:patch, activity)
end
@doc """
Archive an Activity. Time tracked with the Activity will be preserved.
"""
def archive_activity(activity_id) do
"/activities/#{activity_id}"
|> Client.request(:delete)
end
@doc """
Assign an Activity to a given device side.
"""
def assign_side_activity(device_side, activity_id) do
"/activities/#{activity_id}/device-side/#{device_side}"
|> Client.request(:post)
end
@doc """
Unassigns an Activity associated with a given device side.
"""
def unassign_side_activity(device_side, activity_id) do
"/activities/#{activity_id}/device-side/#{device_side}"
|> Client.request(:delete)
end
@doc """
List all archived Activities.
"""
def archived_activities do
"/archived_activities"
|> Client.request(:get)
end
@doc """
List all devices.
"""
def devices do
"/devices"
|> Client.request(:get)
end
@doc """
Sets the status of a device to active.
"""
def activate_device(device_serial) do
"/devices/#{device_serial}/active"
|> Client.request(:post)
end
@doc """
Sets the status of a device to inactive.
"""
def deactivate_device(device_serial) do
"/devices/#{device_serial}/active"
|> Client.request(:delete)
end
@doc """
Update the name of a device.
"""
def update_device_name(device_serial, name) do
"/devices/#{device_serial}"
|> Client.request(:patch, %{name: name})
end
@doc """
Remove a device from the list of known devices. Use activate_device/1 to
make the device active again.
"""
def remove_device(device_serial) do
"/devices/#{device_serial}"
|> Client.request(:delete)
end
@doc """
Disable a device.
"""
def disable_device(device_serial) do
"/devices/#{device_serial}/disabled"
|> Client.request(:post)
end
@doc """
Enable a device.
"""
def enable_device(device_serial) do
"/devices/#{device_serial}/disabled"
|> Client.request(:delete)
end
@doc """
Returns information regarding what is currently being tracked.
"""
def tracking do
"/tracking"
|> Client.request(:get)
end
@doc """
Start tracking a given Activity.
"""
def start_tracking(activity_id) do
current_datetime = DateTime.utc_now() |> timeular_datetime_format
"/tracking/#{activity_id}/start"
|> Client.request(:post, %{startedAt: current_datetime})
end
@doc """
Edit the notes associated an Activity being tracked.
"""
def edit_tracking_note(activity_id, %Resources.Note{} = note) do
"/tracking/#{activity_id}"
|> Client.request(:patch, note)
end
@doc """
Stop tracking a given Activity.
"""
def stop_tracking(activity_id) do
current_datetime = DateTime.utc_now() |> timeular_datetime_format()
"/tracking/#{activity_id}/stop"
|> Client.request(:post, %{stoppedAt: current_datetime})
end
@doc """
Return all time entries that falls between a given time range.
"""
def time_entries(stopped_after, started_before) do
"/time-entries/#{stopped_after}/#{started_before}"
|> Client.request(:get)
end
@doc """
Return a time entry by ID.
"""
def time_entry(entry_id) do
"/time-entries/#{entry_id}"
|> Client.request(:get)
end
def reports(start_timestamp, stop_timestamp) do
"/report/#{start_timestamp}/#{stop_timestamp}"
|> Client.request(:get)
end
def tags_and_mentions do
"/tags-and-mentions"
|> Client.request(:get)
end
# Helpers
defp timeular_datetime_format(datetime) do
datetime
|> DateTime.truncate(:millisecond)
|> DateTime.to_iso8601()
|> String.trim_trailing("Z")
end
end
|
lib/timeularex.ex
| 0.815747 | 0.61115 |
timeularex.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.