hexsha
stringlengths 40
40
| size
int64 2
991k
| ext
stringclasses 2
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
208
| max_stars_repo_name
stringlengths 6
106
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
33.5k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
208
| max_issues_repo_name
stringlengths 6
106
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
16.3k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
208
| max_forks_repo_name
stringlengths 6
106
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
6.91k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
991k
| avg_line_length
float64 1
36k
| max_line_length
int64 1
977k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73273b53693f5cfdd4ac973e666f3ace8295f542
| 3,311 |
exs
|
Elixir
|
lib/elixir/test/elixir/gen_server_test.exs
|
gsphanikumar/elixir
|
6ca225da4e016200a462888348ff1c3feb625b78
|
[
"Apache-2.0"
] | 4 |
2015-12-22T02:46:39.000Z
|
2016-04-26T06:11:09.000Z
|
lib/elixir/test/elixir/gen_server_test.exs
|
alco/elixir
|
4407170349aa12c58664cab2122374167e827f5e
|
[
"Apache-2.0"
] | null | null | null |
lib/elixir/test/elixir/gen_server_test.exs
|
alco/elixir
|
4407170349aa12c58664cab2122374167e827f5e
|
[
"Apache-2.0"
] | null | null | null |
Code.require_file "test_helper.exs", __DIR__
defmodule GenServerTest do
use ExUnit.Case, async: true
defmodule Stack do
use GenServer
def handle_call(:pop, _from, [h|t]) do
{:reply, h, t}
end
def handle_call(request, from, state) do
super(request, from, state)
end
def handle_cast({:push, item}, state) do
{:noreply, [item|state]}
end
def handle_cast(request, state) do
super(request, state)
end
def terminate(_reason, _state) do
# There is a race condition if the agent is
# restarted too fast and it is registered.
try do
self |> Process.info(:registered_name) |> elem(1) |> Process.unregister
rescue
_ -> :ok
end
:ok
end
end
test "start_link/2, call/2 and cast/2" do
{:ok, pid} = GenServer.start_link(Stack, [:hello])
{:links, links} = Process.info(self, :links)
assert pid in links
assert GenServer.call(pid, :pop) == :hello
assert GenServer.cast(pid, {:push, :world}) == :ok
assert GenServer.call(pid, :pop) == :world
assert GenServer.stop(pid) == :ok
assert GenServer.cast({:global, :foo}, {:push, :world}) == :ok
assert GenServer.cast({:via, :foo, :bar}, {:push, :world}) == :ok
assert GenServer.cast(:foo, {:push, :world}) == :ok
end
test "nil name" do
{:ok, pid} = GenServer.start_link(Stack, [:hello], name: nil)
assert Process.info(pid, :registered_name) == {:registered_name, []}
end
test "start/2" do
{:ok, pid} = GenServer.start(Stack, [:hello])
{:links, links} = Process.info(self, :links)
refute pid in links
GenServer.stop(pid)
end
test "abcast/3" do
{:ok, _} = GenServer.start_link(Stack, [], name: :stack)
assert GenServer.abcast(:stack, {:push, :hello}) == :abcast
assert GenServer.call({:stack, node()}, :pop) == :hello
assert GenServer.abcast([node, :foo@bar], :stack, {:push, :world}) == :abcast
assert GenServer.call(:stack, :pop) == :world
GenServer.stop(:stack)
end
test "multi_call/4" do
{:ok, _} = GenServer.start_link(Stack, [:hello, :world], name: :stack)
assert GenServer.multi_call(:stack, :pop) ==
{[{node(), :hello}], []}
assert GenServer.multi_call([node, :foo@bar], :stack, :pop) ==
{[{node, :world}], [:foo@bar]}
GenServer.stop(:stack)
end
test "whereis/1" do
name = :whereis_server
{:ok, pid} = GenServer.start_link(Stack, [], name: name)
assert GenServer.whereis(name) == pid
assert GenServer.whereis({name, node()}) == pid
assert GenServer.whereis({name, :another_node}) == {name, :another_node}
assert GenServer.whereis(pid) == pid
assert GenServer.whereis(:whereis_bad_server) == nil
{:ok, pid} = GenServer.start_link(Stack, [], name: {:global, name})
assert GenServer.whereis({:global, name}) == pid
assert GenServer.whereis({:global, :whereis_bad_server}) == nil
assert GenServer.whereis({:via, :global, name}) == pid
assert GenServer.whereis({:via, :global, :whereis_bad_server}) == nil
end
test "stop/3" do
{:ok, pid} = GenServer.start(Stack, [])
assert GenServer.stop(pid, :normal) == :ok
{:ok, _} = GenServer.start(Stack, [], name: :stack)
assert GenServer.stop(:stack, :normal) == :ok
end
end
| 29.300885 | 81 | 0.618242 |
73276e7cc7e6b7a51ce86fb7078ce3d049c46e35
| 906 |
exs
|
Elixir
|
config/dev.exs
|
kuma/teslamate
|
ea175fddb49cc08070182455e0073c3dcfcb3b4c
|
[
"MIT"
] | 1 |
2021-11-03T02:08:43.000Z
|
2021-11-03T02:08:43.000Z
|
config/dev.exs
|
kuma/teslamate
|
ea175fddb49cc08070182455e0073c3dcfcb3b4c
|
[
"MIT"
] | 171 |
2020-07-08T18:42:57.000Z
|
2022-03-23T00:55:30.000Z
|
config/dev.exs
|
kuma/teslamate
|
ea175fddb49cc08070182455e0073c3dcfcb3b4c
|
[
"MIT"
] | 1 |
2021-03-26T15:46:37.000Z
|
2021-03-26T15:46:37.000Z
|
import Config
config :teslamate, TeslaMateWeb.Endpoint,
debug_errors: true,
code_reloader: true,
check_origin: false,
watchers: [
node: [
"node_modules/webpack/bin/webpack.js",
"--mode",
"development",
"--stats-colors",
"--watch",
"--watch-options-stdin",
cd: Path.expand("../assets", __DIR__)
]
],
live_reload: [
patterns: [
~r"priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$",
~r"priv/gettext/.*(po)$",
~r"lib/teslamate_web/(live|views)/.*(ex)$",
~r"lib/teslamate_web/templates/.*(eex)$",
~r"grafana/dashboards/.*(json)$"
]
]
config :logger, :console, format: "$metadata[$level] $message\n"
config :phoenix, :stacktrace_depth, 20
config :phoenix, :plug_init_mode, :runtime
config :teslamate, TeslaMate.Repo, show_sensitive_data_on_connection_error: true
config :teslamate, disable_token_refresh: true
| 25.885714 | 80 | 0.639073 |
7327f9775d24fe0cd29b85c84f1b27aad4712f67
| 6,576 |
ex
|
Elixir
|
lib/fika/compiler/erl_translate.ex
|
fika-lang/fika
|
15bffc30daed744670bb2c0fba3e674055adac47
|
[
"Apache-2.0"
] | 220 |
2020-09-12T18:16:29.000Z
|
2022-03-15T14:39:05.000Z
|
lib/fika/compiler/erl_translate.ex
|
fika-lang/fika
|
15bffc30daed744670bb2c0fba3e674055adac47
|
[
"Apache-2.0"
] | 60 |
2020-09-23T14:20:36.000Z
|
2021-03-08T08:55:57.000Z
|
lib/fika/compiler/erl_translate.ex
|
fika-lang/fika
|
15bffc30daed744670bb2c0fba3e674055adac47
|
[
"Apache-2.0"
] | 25 |
2020-09-19T09:06:10.000Z
|
2021-08-24T23:48:39.000Z
|
defmodule Fika.Compiler.ErlTranslate do
def translate(ast, module_name_str, file) do
line = 1
file = String.to_charlist(file)
module_name = erl_module_name(module_name_str)
module_header = [
{:attribute, line, :file, {file, line}},
{:attribute, line, :module, module_name}
]
functions = ast[:function_defs]
{exports, function_declaration} = to_forms(functions)
module_header ++ exports ++ function_declaration
end
def translate_expression(exp) do
translate_exp(exp)
end
def erl_module_name(module_name_str) do
module_name_str
|> String.replace("/", ".")
|> String.to_atom()
end
defp to_forms(functions) do
Enum.reduce(functions, {[], []}, fn function, {exports, decs} ->
{:function, [position: {line, _, _}], {name, args, _type, exps}} = function
arity = length(args)
export = {:attribute, line, :export, [{name, arity}]}
dec = {:function, line, name, arity, [translate_clauses(args, line, exps)]}
{[export | exports], [dec | decs]}
end)
end
defp translate_clauses(args, line, exps) do
{:clause, line, translate_exps(args), [], translate_exps(exps)}
end
defp translate_exps(exps) do
Enum.map(exps, &translate_exp/1)
end
defp translate_exp({:call, {bin_op, {line, _, _}}, [arg1, arg2], _module})
when bin_op in [:+, :-, :*, :/, :<, :>, :>=, :==] do
{:op, line, bin_op, translate_exp(arg1), translate_exp(arg2)}
end
defp translate_exp({:call, {:<=, {line, _, _}}, [arg1, arg2], _module}) do
{:op, line, :"=<", translate_exp(arg1), translate_exp(arg2)}
end
defp translate_exp({:call, {:!=, {line, _, _}}, [arg1, arg2], _module}) do
{:op, line, :"/=", translate_exp(arg1), translate_exp(arg2)}
end
defp translate_exp({:call, {:!, {line, _, _}}, [arg], _module}) do
{:op, line, :not, translate_exp(arg)}
end
defp translate_exp({:call, {:-, {line, _, _}}, [arg], _module}) do
{:op, line, :-, translate_exp(arg)}
end
defp translate_exp({:call, {:|, {line, _, _}}, [arg1, arg2], _module}) do
{:op, line, :or, translate_exp(arg1), translate_exp(arg2)}
end
defp translate_exp({:call, {:&, {line, _, _}}, [arg1, arg2], _module}) do
{:op, line, :and, translate_exp(arg1), translate_exp(arg2)}
end
defp translate_exp({:call, {name, {line, _, _}}, args, nil}) do
{:call, line, {:atom, line, name}, translate_exps(args)}
end
defp translate_exp({:call, {name, {line, _, _}}, args, module}) do
m_f = {:remote, line, {:atom, line, erl_module_name(module)}, {:atom, line, name}}
{:call, line, m_f, translate_exps(args)}
end
# Call function ref using an identifier
defp translate_exp({:call, {identifier, {line, _, _}}, args}) do
{:call, line, translate_exp(identifier), translate_exps(args)}
end
defp translate_exp({:ext_call, {line, _, _}, {m, f, args, _}}) do
m_f = {:remote, line, {:atom, line, m}, {:atom, line, f}}
{:call, line, m_f, translate_exps(args)}
end
defp translate_exp({:integer, {line, _, _}, value}) do
{:integer, line, value}
end
defp translate_exp({:boolean, {line, _, _}, value}) do
{:atom, line, value}
end
defp translate_exp({:atom, {line, _, _}, value}) do
{:atom, line, value}
end
defp translate_exp({{:=, {line, _, _}}, pattern, exp}) do
{:match, line, translate_exp(pattern), translate_exp(exp)}
end
defp translate_exp({:identifier, {line, _, _}, name}) do
{:var, line, name}
end
defp translate_exp({{:identifier, {line, _, _}, name}, {:type, _, _}}) do
{:var, line, name}
end
defp translate_exp({:string, {line, _, _}, [value]}) when is_binary(value) do
string = {:string, line, String.to_charlist(value)}
{:bin, line, [{:bin_element, line, string, :default, :default}]}
end
defp translate_exp({:string, {line, _, _}, str_elements}) do
translated_exps =
str_elements
|> Enum.map(fn
value when is_binary(value) ->
string = {:string, line, String.to_charlist(value)}
{:bin_element, line, string, :default, :default}
exp ->
interpolation = translate_exp(exp)
{:bin_element, line, interpolation, :default, [:binary]}
end)
{:bin, line, translated_exps}
end
defp translate_exp({:list, {line, _, _}, value}) do
do_translate_list(value, line)
end
defp translate_exp({:tuple, {line, _, _}, value}) do
{:tuple, line, translate_exps(value)}
end
defp translate_exp({:record, {line, _, _}, name, k_vs}) do
k_vs =
Enum.map(k_vs, fn {{:identifier, {l, _, _}, k}, v} ->
{:map_field_assoc, l, {:atom, l, k}, translate_exp(v)}
end)
k_vs = add_record_meta(k_vs, name, line)
{:map, line, k_vs}
end
defp translate_exp({:map, {line, _, _}, key_values}) do
key_values =
Enum.map(key_values, fn {{_, {l, _, _}, _} = k, v} ->
{:map_field_assoc, l, translate_exp(k), translate_exp(v)}
end)
{:map, line, key_values}
end
defp translate_exp({:function_ref, {line, _, _}, {module, function, arg_types}}) do
arity = length(arg_types)
f =
if module do
{:function, {:atom, line, erl_module_name(module)}, {:atom, line, function},
{:integer, line, arity}}
else
{:function, function, arity}
end
{:fun, line, f}
end
defp translate_exp({{:if, {line, _, _}}, condition, if_block, else_block}) do
{
:case,
line,
translate_exp(condition),
[
{:clause, line, [{:atom, line, true}], [], translate_exps(if_block)},
{:clause, line, [{:atom, line, false}], [], translate_exps(else_block)}
]
}
end
defp translate_exp({{:case, {line, _, _}}, exp, clauses}) do
{
:case,
line,
translate_exp(exp),
Enum.map(clauses, &translate_case_clause(line, &1))
}
end
defp translate_exp({:anonymous_function, {line, _, _}, args, exps}) do
{:fun, line, {:clauses, [translate_clauses(args, line, exps)]}}
end
defp translate_case_clause(line, [pattern, block]) do
{:clause, line, [translate_exp(pattern)], [], translate_exps(block)}
end
defp add_record_meta(k_vs, name, line) do
name =
if name do
{:atom, 0, String.to_atom(name)}
else
{nil, 0}
end
[{:map_field_assoc, line, {:atom, 0, :__record__}, name} | k_vs]
end
defp do_translate_list([head | rest], line) do
{:cons, line, translate_exp(head), do_translate_list(rest, line)}
end
defp do_translate_list([], line) do
{nil, line}
end
end
| 28.591304 | 86 | 0.604471 |
732841904f8347fd49b2a6b317ffd6e2f8102083
| 6,930 |
ex
|
Elixir
|
core/handler/websocket_state.ex
|
sylph01/antikythera
|
47a93f3d4c70975f7296725c9bde2ea823867436
|
[
"Apache-2.0"
] | 144 |
2018-04-27T07:24:49.000Z
|
2022-03-15T05:19:37.000Z
|
core/handler/websocket_state.ex
|
sylph01/antikythera
|
47a93f3d4c70975f7296725c9bde2ea823867436
|
[
"Apache-2.0"
] | 123 |
2018-05-01T02:54:43.000Z
|
2022-01-28T01:30:52.000Z
|
core/handler/websocket_state.ex
|
sylph01/antikythera
|
47a93f3d4c70975f7296725c9bde2ea823867436
|
[
"Apache-2.0"
] | 14 |
2018-05-01T02:30:47.000Z
|
2022-02-21T04:38:56.000Z
|
# Copyright(c) 2015-2021 ACCESS CO., LTD. All rights reserved.
use Croma
defmodule AntikytheraCore.Handler.WebsocketState do
alias Antikythera.{Time, Conn, Context, ErrorReason}
alias Antikythera.Websocket
alias Antikythera.Websocket.{Frame, FrameList}
alias Antikythera.Context.GearEntryPoint
alias AntikytheraCore.Handler.HelperModules
alias AntikytheraCore.GearLog.{Writer, ContextHelper}
alias AntikytheraCore.{MetricsUploader, GearProcess}
use Croma.Struct,
recursive_new?: true,
fields: [
conn: Conn,
ws_module: Croma.Atom,
gear_impl_state: Croma.Any,
helper_modules: HelperModules,
frames_received: Croma.NonNegInteger,
frames_sent: Croma.NonNegInteger,
error_reason: Croma.TypeGen.nilable(ErrorReason)
]
defun make(
conn :: v[Conn.t()],
{ws_module, _action} :: GearEntryPoint.t(),
helper_modules :: v[HelperModules.t()]
) :: t do
%__MODULE__{
conn: conn,
ws_module: ws_module,
gear_impl_state: nil,
helper_modules: helper_modules,
frames_received: 0,
frames_sent: 0,
error_reason: nil
}
end
@type callback_result :: {:ok, t} | {:reply, FrameList.t(), t}
# taken from :cowboy_websocket.terminate_reason/0, which is not exported
@type cowboy_ws_terminate_reason ::
:normal
| :stop
| :timeout
| :remote
| {:remote, :cow_ws.close_code(), binary}
| {:error, :badencoding | :badframe | :closed | atom}
| {:crash, :error | :exit | :throw, any}
defun init(%__MODULE__{conn: conn, ws_module: ws_module} = state) :: callback_result do
GearProcess.set_max_heap_size()
ContextHelper.set(conn)
%Conn{context: %Context{start_time: start_time}} = conn
log_info(state, start_time, "CONNECTED")
run_callback_and_reply(state, 0, fn -> ws_module.init(conn) end)
end
defun handle_client_message(
%__MODULE__{conn: conn, ws_module: ws_module, gear_impl_state: gear_impl_state} = state,
frame :: v[Frame.t()]
) :: callback_result do
run_callback_and_reply(state, 1, fn ->
ws_module.handle_client_message(gear_impl_state, conn, frame)
end)
end
defun handle_server_message(
%__MODULE__{conn: conn, ws_module: ws_module, gear_impl_state: gear_impl_state} = state,
message :: any
) :: callback_result do
run_callback_and_reply(state, 0, fn ->
ws_module.handle_server_message(gear_impl_state, conn, message)
end)
end
defunp run_callback_and_reply(
state :: v[t],
n_received :: v[non_neg_integer],
f :: (() -> callback_result)
) :: callback_result do
try do
{:ok, f.()}
catch
error_kind, reason -> {{error_kind, reason}, System.stacktrace()}
end
|> case do
{:ok, {new_gear_impl_state, frames_to_send}} ->
n_sent = length(frames_to_send)
new_state =
increment_frames_count(
%__MODULE__{state | gear_impl_state: new_gear_impl_state},
n_received,
n_sent
)
case n_sent do
0 -> {:ok, new_state}
_ -> {:reply, frames_to_send, new_state}
end
{error_tuple, stacktrace} ->
log_error(state, Time.now(), ErrorReason.format(error_tuple, stacktrace))
state_with_error_reason = %__MODULE__{state | error_reason: error_tuple}
{:reply, [:close], state_with_error_reason}
end
end
defun terminate(
%__MODULE__{conn: conn, ws_module: ws_module, gear_impl_state: gear_impl_state} = state,
cowboy_terminate_reason :: cowboy_ws_terminate_reason
) :: any do
now = Time.now()
reason = terminate_reason(state, cowboy_terminate_reason)
log_info(state, now, build_disconnected_log_message(state, reason))
try do
ws_module.terminate(gear_impl_state, conn, reason)
catch
:error, error ->
log_error(state, now, ErrorReason.format({:error, error}, System.stacktrace()))
:throw, value ->
log_error(state, now, ErrorReason.format({:throw, value}, System.stacktrace()))
:exit, reason ->
log_error(state, now, ErrorReason.format({:error, reason}, System.stacktrace()))
end
end
defunp terminate_reason(
%__MODULE__{error_reason: error_reason},
cowboy_terminate_reason :: cowboy_ws_terminate_reason
) :: Websocket.terminate_reason() do
case error_reason do
nil ->
case cowboy_terminate_reason do
{:crash, _kind, reason} -> {:error, reason}
other -> other
end
{_kind, reason} ->
{:error, reason}
end
end
defunp build_disconnected_log_message(
%__MODULE__{
conn: %Conn{context: %Context{start_time: start_time}},
frames_received: frames_received,
frames_sent: frames_sent
},
reason :: Websocket.terminate_reason()
) :: String.t() do
"DISCONNECTED connected_at=#{Time.to_iso_timestamp(start_time)} frames_received=#{
frames_received
} frames_sent=#{frames_sent} reason=#{inspect(reason)}"
end
for level <- [:info, :error] do
# credo:disable-for-next-line Credo.Check.Warning.UnsafeToAtom
defunp unquote(:"log_#{level}")(
%__MODULE__{
conn: %Conn{context: %Context{context_id: context_id}},
helper_modules: %HelperModules{logger: logger}
},
time :: v[Time.t()],
message :: v[String.t()]
) :: :ok do
Writer.unquote(level)(logger, time, context_id, "<websocket> " <> message)
end
end
defunp increment_frames_count(
%__MODULE__{frames_received: frames_received, frames_sent: frames_sent} = state,
n_received :: v[non_neg_integer],
n_sent :: v[non_neg_integer]
) :: t do
submit_metrics_if_any(state, n_received, n_sent)
%__MODULE__{
state
| frames_received: frames_received + n_received,
frames_sent: frames_sent + n_sent
}
end
defunp submit_metrics_if_any(
%__MODULE__{
conn: %Conn{context: %Context{executor_pool_id: epool_id}},
helper_modules: %HelperModules{metrics_uploader: uploader}
},
n_received :: v[non_neg_integer],
n_sent :: v[non_neg_integer]
) :: :ok do
case build_metrics(n_received, n_sent) do
[] -> :ok
list -> MetricsUploader.submit(uploader, list, epool_id)
end
end
defp build_metrics(0, 0), do: []
defp build_metrics(0, n_s), do: [{"websocket_frames_sent", :sum, n_s}]
defp build_metrics(n_r, 0), do: [{"websocket_frames_received", :sum, n_r}]
defp build_metrics(n_r, n_s),
do: [{"websocket_frames_received", :sum, n_r}, {"websocket_frames_sent", :sum, n_s}]
end
| 32.535211 | 98 | 0.632756 |
73286125b0c0ebcbdcb7ec2d391b34607951d4aa
| 2,728 |
ex
|
Elixir
|
lib/scenic/primitive/text.ex
|
bruceme/scenic
|
bd8a1e63c122c44cc263e1fb5dfab2547ce8ef43
|
[
"Apache-2.0"
] | null | null | null |
lib/scenic/primitive/text.ex
|
bruceme/scenic
|
bd8a1e63c122c44cc263e1fb5dfab2547ce8ef43
|
[
"Apache-2.0"
] | null | null | null |
lib/scenic/primitive/text.ex
|
bruceme/scenic
|
bd8a1e63c122c44cc263e1fb5dfab2547ce8ef43
|
[
"Apache-2.0"
] | null | null | null |
#
# Created by Boyd Multerer on 2017-05-06.
# Copyright © 2017-2021 Kry10 Limited. All rights reserved.
#
defmodule Scenic.Primitive.Text do
@moduledoc """
Draw text on the screen.
## Data
`text`
The data for a Text primitive is a bitstring
* `text` - the text to draw
## Styles
This primitive recognizes the following styles
* [`hidden`](Scenic.Primitive.Style.Hidden.html) - show or hide the primitive
* [`fill`](Scenic.Primitive.Style.Fill.html) - fill in the area of the text. Only solid colors!
* [`font`](Scenic.Primitive.Style.Font.html) - name (or key) of font to use
* [`font_size`](Scenic.Primitive.Style.FontSize.html) - point size of the font
* [`font_blur`](Scenic.Primitive.Style.FontBlur.html) - option to blur the characters
* [`text_align`](Scenic.Primitive.Style.TextAlign.html) - alignment of lines of text
* [`text_height`](Scenic.Primitive.Style.TextHeight.html) - spacing between lines of text
## Usage
You should add/modify primitives via the helper functions in
[`Scenic.Primitives`](Scenic.Primitives.html#text/3)
```elixir
graph
|> text( "Some example text", fill: :green, font: :roboto_mono, font_size: 64 )
```
"""
use Scenic.Primitive
alias Scenic.Script
alias Scenic.Primitive
alias Scenic.Primitive.Style
@type t :: String.t()
@type styles_t :: [
:hidden
| :scissor
| :font
| :font_size
| :line_height
| :text_align
| :text_base
| :line_height
]
@styles [
:hidden,
:scissor,
:font,
:font_size,
:line_height,
:text_align,
:text_base,
:line_height
]
@impl Primitive
@spec validate(text :: t()) :: {:ok, t()} | {:error, String.t()}
def validate(text) when is_bitstring(text) do
{:ok, text}
end
def validate(data) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Text specification
Received: #{inspect(data)}
#{IO.ANSI.yellow()}
The data for Text must be a String#{IO.ANSI.default_color()}
"""
}
end
# --------------------------------------------------------
@doc """
Returns a list of styles recognized by this primitive.
"""
@impl Primitive
@spec valid_styles() :: styles_t()
def valid_styles(), do: @styles
# --------------------------------------------------------
# compiling Text is a special case and is handled in Scenic.ViewPort.GraphCompiler
@doc false
@impl Primitive
@spec compile(primitive :: Primitive.t(), styles :: Style.t()) :: Script.t()
def compile(%Primitive{module: __MODULE__}, _styles) do
raise "compiling Text is a special case and is handled in Scenic.ViewPort.GraphCompiler"
end
end
| 26.745098 | 97 | 0.616935 |
7328aa17bfcdc6861d95e14634fadb2cf861f8e9
| 540 |
exs
|
Elixir
|
config/config.exs
|
hippware/wocky_queue
|
45e7121245525763fb4ca90cb22d476a55eaa952
|
[
"MIT"
] | 12 |
2019-08-30T19:10:54.000Z
|
2022-03-19T13:53:19.000Z
|
config/config.exs
|
hippware/wocky_queue
|
45e7121245525763fb4ca90cb22d476a55eaa952
|
[
"MIT"
] | 19 |
2019-03-06T17:28:11.000Z
|
2020-02-18T17:00:28.000Z
|
config/config.exs
|
hippware/wocky_queue
|
45e7121245525763fb4ca90cb22d476a55eaa952
|
[
"MIT"
] | 2 |
2019-12-17T12:51:20.000Z
|
2021-10-14T18:21:02.000Z
|
use Mix.Config
if Mix.env() == :test do
config :dawdle, start_pollers: true
config :dawdle_db, ecto_repos: [DawdleDB.Repo]
config :dawdle_db, DawdleDB.Repo,
database: {:system, "DAWDLEDB_DB_DATABASE", "dawdle_db_test"},
username: {:system, "DAWDLEDB_DB_USERNAME", "postgres"},
password: {:system, "DAWDLEDB_DB_PASSWORD", "password"},
hostname: {:system, "DAWDLEDB_DB_HOSTNAME", "localhost"},
show_sensitive_data_on_connection_error: true,
pool: Ecto.Adapters.SQL.Sandbox
config :logger, level: :info
end
| 30 | 66 | 0.716667 |
7328bb72e7a91508eb4316e2bf689154acd1976a
| 1,560 |
ex
|
Elixir
|
projects/api/lib/margaret_web/schema/followable_types.ex
|
strattadb/margaret
|
dde5d7b42f6d9b4d320069a0117136dae03b13b5
|
[
"MIT"
] | 82 |
2017-11-06T01:00:55.000Z
|
2020-12-09T10:35:29.000Z
|
projects/api/lib/margaret_web/schema/followable_types.ex
|
dbstratta/margaret
|
dde5d7b42f6d9b4d320069a0117136dae03b13b5
|
[
"MIT"
] | 98 |
2017-11-06T22:57:32.000Z
|
2020-07-03T04:46:39.000Z
|
projects/api/lib/margaret_web/schema/followable_types.ex
|
strattadb/margaret
|
dde5d7b42f6d9b4d320069a0117136dae03b13b5
|
[
"MIT"
] | 10 |
2017-11-16T05:31:58.000Z
|
2020-10-29T18:02:35.000Z
|
defmodule MargaretWeb.Schema.FollowableTypes do
@moduledoc """
The Followable GraphQL interface.
"""
use Absinthe.Schema.Notation
use Absinthe.Relay.Schema.Notation, :modern
alias MargaretWeb.Resolvers
@followable_implementations [
:user,
:publication
]
interface :followable do
field(:id, non_null(:id))
@desc "The followers of the followable."
field(:followers, :follower_connection)
@desc "Returns a boolean indicating whether the viewing user can follow this followable."
field(:viewer_can_follow, non_null(:boolean))
@desc "Returns a boolean indicating whether the viewing user has followed this followable."
field(:viewer_has_followed, non_null(:boolean))
resolve_type(&Resolvers.Nodes.resolve_type/2)
end
object :followable_mutations do
@desc "Follows a followable."
payload field(:follow) do
input do
field(:followable_id, non_null(:id))
end
output do
field(:followable, non_null(:followable))
end
middleware(Absinthe.Relay.Node.ParseIDs, followable_id: @followable_implementations)
resolve(&Resolvers.Followable.resolve_follow/2)
end
@desc "Follows a followable."
payload field(:unfollow) do
input do
field(:followable_id, non_null(:id))
end
output do
field(:followable, non_null(:followable))
end
middleware(Absinthe.Relay.Node.ParseIDs, followable_id: @followable_implementations)
resolve(&Resolvers.Followable.resolve_unfollow/2)
end
end
end
| 25.57377 | 95 | 0.703846 |
7328d4cddc2c44c91f4eaf93a650865297fe619b
| 1,847 |
ex
|
Elixir
|
lib/xml_json/aws_api/deserializer.ex
|
MathiasWedeken/xml_json
|
772f110987501e1f0d8065311110069c1f4e0a20
|
[
"MIT"
] | 2 |
2020-10-08T06:49:02.000Z
|
2020-10-29T16:24:20.000Z
|
lib/xml_json/aws_api/deserializer.ex
|
MathiasWedeken/xml_json
|
772f110987501e1f0d8065311110069c1f4e0a20
|
[
"MIT"
] | 2 |
2020-10-08T12:15:38.000Z
|
2021-06-01T18:15:31.000Z
|
lib/xml_json/aws_api/deserializer.ex
|
MathiasWedeken/xml_json
|
772f110987501e1f0d8065311110069c1f4e0a20
|
[
"MIT"
] | 2 |
2020-10-08T11:56:39.000Z
|
2021-06-01T16:42:43.000Z
|
defmodule XmlJson.AwsApi.Deserializer do
@moduledoc """
AWS implementation for deserialization from a Xml into Map
"""
alias XmlJson.SaxHandler
@spec deserialize(binary(), map()) :: {:ok, map()} | {:error, Saxy.ParseError.t()}
def deserialize(xml, opts) do
case SaxHandler.parse_string(xml) do
{:ok, element} ->
{:ok, %{element.name => walk_element(element, opts)}}
error ->
error
end
end
defp walk_element(element, opts) do
update_children(%{}, element, opts)
|> update_text(element)
|> update_attributes(element, opts)
end
defp update_children(aws, %{children: children}, opts) do
accumulate_children(aws, children, opts)
end
defp update_children(_aws, _no_children, _opts), do: nil
defp update_text(aws, %{text: ""}), do: aws
defp update_text(aws, %{text: "\n"}), do: aws
defp update_text(_aws, %{text: text}) when is_binary(text), do: handle_empty(text)
defp update_text(_aws, %{text: text}), do: text
defp update_text(aws, _empty_element), do: aws
defp update_attributes(aws, _ignored, _opts), do: aws
defp handle_empty(text) do
case String.trim(text) do
"" -> nil
trimmed -> trimmed
end
end
defp accumulate_children(aws, children, %{list_element_names: names} = opts) do
case Enum.reduce(children, aws, &accumulate_child(&1, &2, opts)) do
map when map_size(map) == 1 ->
[{name, value}] = Map.to_list(map)
if name in names do
List.wrap(value)
else
map
end
map ->
map
end
end
defp accumulate_child(element, object, opts) do
walked = walk_element(element, opts)
Map.update(object, element.name, walked, &accumulate_list(&1, walked))
end
defp accumulate_list(value, walked) do
List.wrap(value) ++ [walked]
end
end
| 25.652778 | 84 | 0.644288 |
7328f72e8106257b31cde3fef0c6d7902f9b34e7
| 239 |
exs
|
Elixir
|
test/controllers/band_controller_test.exs
|
retgoat/band-indigo.com
|
b8a1cf58ec766858cc2319441fa8628ad763529a
|
[
"MIT"
] | null | null | null |
test/controllers/band_controller_test.exs
|
retgoat/band-indigo.com
|
b8a1cf58ec766858cc2319441fa8628ad763529a
|
[
"MIT"
] | null | null | null |
test/controllers/band_controller_test.exs
|
retgoat/band-indigo.com
|
b8a1cf58ec766858cc2319441fa8628ad763529a
|
[
"MIT"
] | null | null | null |
defmodule BandIndigo.BandControllerTest do
use BandIndigo.ConnCase
test "GET /" do
conn = get conn(), "/band"
assert html_response(conn, 200) =~ "Indigo is an instrumental playing band based in Novosibirsk, Russia."
end
end
| 26.555556 | 109 | 0.723849 |
7328fe635d36f7459a16deae0386d739bdf90a77
| 114 |
exs
|
Elixir
|
test/grexst_test.exs
|
maxbeizer/grexst
|
15151ab138098c68ca77d38d38f1e14f803e04d5
|
[
"MIT"
] | null | null | null |
test/grexst_test.exs
|
maxbeizer/grexst
|
15151ab138098c68ca77d38d38f1e14f803e04d5
|
[
"MIT"
] | null | null | null |
test/grexst_test.exs
|
maxbeizer/grexst
|
15151ab138098c68ca77d38d38f1e14f803e04d5
|
[
"MIT"
] | null | null | null |
defmodule GrexstTest do
use ExUnit.Case
doctest Grexst
test "the truth" do
assert 1 + 1 == 2
end
end
| 12.666667 | 23 | 0.666667 |
73291781ba976fb63e76aa4cc9c71fe51f1e3fab
| 2,000 |
ex
|
Elixir
|
clients/apigee/lib/google_api/apigee/v1/model/google_cloud_apigee_v1_runtime_trace_sampling_config.ex
|
pojiro/elixir-google-api
|
928496a017d3875a1929c6809d9221d79404b910
|
[
"Apache-2.0"
] | 1 |
2021-12-20T03:40:53.000Z
|
2021-12-20T03:40:53.000Z
|
clients/apigee/lib/google_api/apigee/v1/model/google_cloud_apigee_v1_runtime_trace_sampling_config.ex
|
pojiro/elixir-google-api
|
928496a017d3875a1929c6809d9221d79404b910
|
[
"Apache-2.0"
] | 1 |
2020-08-18T00:11:23.000Z
|
2020-08-18T00:44:16.000Z
|
clients/apigee/lib/google_api/apigee/v1/model/google_cloud_apigee_v1_runtime_trace_sampling_config.ex
|
pojiro/elixir-google-api
|
928496a017d3875a1929c6809d9221d79404b910
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Apigee.V1.Model.GoogleCloudApigeeV1RuntimeTraceSamplingConfig do
@moduledoc """
NEXT ID: 3 RuntimeTraceSamplingConfig represents the detail settings of distributed tracing. Only the fields that are defined in the distributed trace configuration can be overridden using the distribute trace configuration override APIs.
## Attributes
* `sampler` (*type:* `String.t`, *default:* `nil`) - Sampler of distributed tracing. OFF is the default value.
* `samplingRate` (*type:* `number()`, *default:* `nil`) - Field sampling rate. This value is only applicable when using the PROBABILITY sampler. The supported values are > 0 and <= 0.5.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:sampler => String.t() | nil,
:samplingRate => number() | nil
}
field(:sampler)
field(:samplingRate)
end
defimpl Poison.Decoder,
for: GoogleApi.Apigee.V1.Model.GoogleCloudApigeeV1RuntimeTraceSamplingConfig do
def decode(value, options) do
GoogleApi.Apigee.V1.Model.GoogleCloudApigeeV1RuntimeTraceSamplingConfig.decode(value, options)
end
end
defimpl Poison.Encoder,
for: GoogleApi.Apigee.V1.Model.GoogleCloudApigeeV1RuntimeTraceSamplingConfig do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 38.461538 | 240 | 0.7515 |
7329287fa1ac0db9e807493e14b09c9312865610
| 1,617 |
ex
|
Elixir
|
apps/idp/src/idp/accounts/account_forms.ex
|
lbrty/idp-backend
|
81d5f10ef6177a1e678b994331c5a09abbdca8d6
|
[
"Apache-2.0"
] | null | null | null |
apps/idp/src/idp/accounts/account_forms.ex
|
lbrty/idp-backend
|
81d5f10ef6177a1e678b994331c5a09abbdca8d6
|
[
"Apache-2.0"
] | null | null | null |
apps/idp/src/idp/accounts/account_forms.ex
|
lbrty/idp-backend
|
81d5f10ef6177a1e678b994331c5a09abbdca8d6
|
[
"Apache-2.0"
] | null | null | null |
defmodule Idp.Accounts.AccountForms do
use Idp.Base.Model
use Idp.Base.Query
alias Idp.Accounts.User
alias Idp.Validators
@password_min_length Application.get_env(:idp, :password_min_length)
def base(%User{} = user, attrs) do
fields = ~w(email full_name is_active is_admin)a
user
|> cast(attrs, fields ++ [:is_consultant])
|> validate_required(fields)
|> validate_format(:email, ~r/.*@.*/)
|> unique_constraint(:email)
end
def new(%User{} = user, params) do
user
|> base(params)
|> cast(params, [:password])
|> validate_required([:password])
|> validate_length(:password, min: @password_min_length)
|> put_password_hash()
end
def update(%User{} = user, params), do: user |> base(params)
def change_password(user, params) do
fields = ~w(password password_hash new_password new_password_confirmation)a
user
|> cast(params, fields)
|> validate_required(fields)
|> Validators.check_password(params[:password])
|> Validators.validate_password_confirmation()
|> validate_length(:new_password, min: @password_min_length)
|> update_password()
end
defp update_password(changes) do
case changes.valid? do
true ->
changes
|> put_change(:password, changes.changes[:new_password])
|> put_password_hash()
_ ->
changes
end
end
defp put_password_hash(%{changes: %{password: password}} = changeset) do
changeset
|> put_change(:password_hash, Auth.hash_password(password))
end
defp put_password_hash(%{changes: %{}} = changeset), do: changeset
end
| 25.666667 | 79 | 0.672851 |
7329304d13164f5911f33a2e84e78167c9d0284b
| 700 |
ex
|
Elixir
|
lib/andy/platforms/mock_rover/ultrasonic_sensor.ex
|
jfcloutier/andy
|
74b93f734d6f6353356041a603a96ad5aed4b5dc
|
[
"MIT"
] | 7 |
2019-05-29T22:55:25.000Z
|
2021-08-22T18:38:29.000Z
|
lib/andy/platforms/mock_rover/ultrasonic_sensor.ex
|
jfcloutier/andy
|
74b93f734d6f6353356041a603a96ad5aed4b5dc
|
[
"MIT"
] | null | null | null |
lib/andy/platforms/mock_rover/ultrasonic_sensor.ex
|
jfcloutier/andy
|
74b93f734d6f6353356041a603a96ad5aed4b5dc
|
[
"MIT"
] | 1 |
2020-01-25T20:46:43.000Z
|
2020-01-25T20:46:43.000Z
|
defmodule Andy.MockRover.UltrasonicSensor do
@moduledoc "A mock ultrasonic sensor"
@behaviour Andy.Sensing
alias Andy.Device
# actual max is 250 cms
@max_distance 250
def new(port) do
%Device{
mod: __MODULE__,
class: :sensor,
port: port,
path: "/mock/ultrasonic_sensor",
type: :ultrasonic,
mock: true
}
end
### Sensing
def senses(_) do
[:distance]
end
def read(sensor, :distance) do
# TODO - get ready from andy_world
distance_cm(sensor)
end
def sensitivity(_sensor, _sense) do
nil
end
### Private
defp distance_cm(sensor) do
value = Enum.random(0..@max_distance)
{value, sensor}
end
end
| 15.909091 | 44 | 0.637143 |
73293d9ec6331e6ab40f8b46614745627cb0c1f2
| 115 |
ex
|
Elixir
|
test/support/repo.ex
|
skeleton-elixir/skeleton_soft_delete
|
b8333a6b67bd65a7f02a7c9826418095e66ce5c5
|
[
"MIT"
] | 2 |
2020-10-01T22:41:35.000Z
|
2020-10-01T23:01:34.000Z
|
test/support/repo.ex
|
skeleton-elixir/skeleton_soft_delete
|
b8333a6b67bd65a7f02a7c9826418095e66ce5c5
|
[
"MIT"
] | 4 |
2020-06-27T20:13:11.000Z
|
2021-02-20T20:04:14.000Z
|
test/support/repo.ex
|
skeleton-elixir/skeleton_soft_delete
|
b8333a6b67bd65a7f02a7c9826418095e66ce5c5
|
[
"MIT"
] | null | null | null |
defmodule Skeleton.App.Repo do
use Ecto.Repo, otp_app: :skeleton_soft_delete, adapter: Ecto.Adapters.Postgres
end
| 38.333333 | 80 | 0.817391 |
73294629f6ed8c277645f5128a30a3743ef859cf
| 126 |
exs
|
Elixir
|
apps/repositories/test/repositories_test.exs
|
dcbartlett/exFACI
|
ab8ba1c9490dbe22960d4241434452ba0f55f4ef
|
[
"MIT"
] | null | null | null |
apps/repositories/test/repositories_test.exs
|
dcbartlett/exFACI
|
ab8ba1c9490dbe22960d4241434452ba0f55f4ef
|
[
"MIT"
] | null | null | null |
apps/repositories/test/repositories_test.exs
|
dcbartlett/exFACI
|
ab8ba1c9490dbe22960d4241434452ba0f55f4ef
|
[
"MIT"
] | null | null | null |
defmodule RepositoriesTest do
use ExUnit.Case
doctest Repositories
test "the truth" do
assert 1 + 1 == 2
end
end
| 14 | 29 | 0.698413 |
73294b635ee205f60ae007e2e5daea2b0b0b801a
| 95 |
exs
|
Elixir
|
config/config.exs
|
floriank/cloudflare_access
|
a61abce429929237e93b71fb6bd478e07e9c903b
|
[
"MIT"
] | null | null | null |
config/config.exs
|
floriank/cloudflare_access
|
a61abce429929237e93b71fb6bd478e07e9c903b
|
[
"MIT"
] | null | null | null |
config/config.exs
|
floriank/cloudflare_access
|
a61abce429929237e93b71fb6bd478e07e9c903b
|
[
"MIT"
] | null | null | null |
use Mix.Config
if Mix.env() == :test do
config :cloudflare_access, :domain, "duksis.lv"
end
| 15.833333 | 49 | 0.694737 |
73296227507dff6243ea177f23fec1e1fc1573ea
| 2,368 |
ex
|
Elixir
|
clients/private_ca/lib/google_api/private_ca/v1beta1/model/google_api_servicecontrol_v1_log_entry_operation.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | null | null | null |
clients/private_ca/lib/google_api/private_ca/v1beta1/model/google_api_servicecontrol_v1_log_entry_operation.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | null | null | null |
clients/private_ca/lib/google_api/private_ca/v1beta1/model/google_api_servicecontrol_v1_log_entry_operation.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.PrivateCA.V1beta1.Model.GoogleApiServicecontrolV1LogEntryOperation do
@moduledoc """
Additional information about a potentially long-running operation with which a log entry is associated.
## Attributes
* `first` (*type:* `boolean()`, *default:* `nil`) - Optional. Set this to True if this is the first log entry in the operation.
* `id` (*type:* `String.t`, *default:* `nil`) - Optional. An arbitrary operation identifier. Log entries with the same identifier are assumed to be part of the same operation.
* `last` (*type:* `boolean()`, *default:* `nil`) - Optional. Set this to True if this is the last log entry in the operation.
* `producer` (*type:* `String.t`, *default:* `nil`) - Optional. An arbitrary producer identifier. The combination of `id` and `producer` must be globally unique. Examples for `producer`: `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:first => boolean(),
:id => String.t(),
:last => boolean(),
:producer => String.t()
}
field(:first)
field(:id)
field(:last)
field(:producer)
end
defimpl Poison.Decoder,
for: GoogleApi.PrivateCA.V1beta1.Model.GoogleApiServicecontrolV1LogEntryOperation do
def decode(value, options) do
GoogleApi.PrivateCA.V1beta1.Model.GoogleApiServicecontrolV1LogEntryOperation.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.PrivateCA.V1beta1.Model.GoogleApiServicecontrolV1LogEntryOperation do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 38.819672 | 263 | 0.720439 |
73297066342575eee92df5623312fe1d1ad8ce6e
| 3,921 |
exs
|
Elixir
|
spec/hamlex/node/element_spec.exs
|
marnen/hamlex
|
38322997b12972f60a16d40e44f81d0c178e2523
|
[
"MIT"
] | null | null | null |
spec/hamlex/node/element_spec.exs
|
marnen/hamlex
|
38322997b12972f60a16d40e44f81d0c178e2523
|
[
"MIT"
] | null | null | null |
spec/hamlex/node/element_spec.exs
|
marnen/hamlex
|
38322997b12972f60a16d40e44f81d0c178e2523
|
[
"MIT"
] | null | null | null |
defmodule Hamlex.Node.ElementSpec do
use ESpec
import Hamlex.Node.Element
alias Hamlex.Node.Element
describe ".to_html" do
context "name given" do
it "renders a tag of the given name" do
expect(to_html %Element{name: "foo", selectors: [".bar"]}).to eq "<foo class='bar'></foo>"
end
end
context "name not given" do
it "renders a div" do
expect(to_html %Element{selectors: [".bar"]}).to eq "<div class='bar'></div>"
end
end
context "empty tags" do
let :all_html, do: ~w[html4 html5]
let :formats, do: ["xhtml" | all_html]
context "not on the list of void elements" do
let :tag_name, do: "not-a-void-element"
context "no slash" do
it "renders a separate closing tag for all formats" do
for format <- formats do
expect(to_html %Element{name: tag_name}, config: %{format: format}).to eq "<#{tag_name}></#{tag_name}>"
end
end
end
context "trailing slash" do
context "XHTML" do
it "renders a self-closing tag" do
expect(to_html %Element{name: "#{tag_name}/"}, config: %{format: "xhtml"}).to eq "<#{tag_name} />"
end
end
context "HTML 4 and 5" do
it "renders a singleton tag" do
for format <- all_html do
expect(to_html %Element{name: "#{tag_name}/"}, config: %{format: format}).to eq "<#{tag_name}>"
end
end
end
end
end
context "on the list of void elements" do
let :void_elements, do: ~w[area base br col embed hr img input link meta param source track wbr]
context "XHTML" do
it "renders a self-closing tag" do
for tag_name <- void_elements do
expect(to_html %Element{name: tag_name}, config: %{format: "xhtml"}).to eq "<#{tag_name} />"
end
end
end
context "HTML 4 and 5" do
it "renders a singleton tag" do
for tag_name <- void_elements do
for format <- all_html do
expect(to_html %Element{name: tag_name}, config: %{format: format}).to eq "<#{tag_name}>"
end
end
end
end
end
end
context "body" do
context "string" do
it "uses the string as the tag's content" do
expect(to_html %Element{name: "p", body: "string"}).to eq "<p>string</p>"
end
end
end
context "attributes" do
it "renders the attributes in the tag" do
element = %Element{name: "p", selectors: [".class"], attributes: [{"a", {:string, "b"}}, {"c", {:string, "d"}}]}
expect(to_html element).to eq "<p class='class' a='b' c='d'></p>"
end
it "renders atomic attributes as the name alone" do
element = %Element{name: "p", attributes: ["atomic"]}
expect(to_html element).to eq "<p atomic></p>"
end
context "variable attributes" do
it "renders the value from the binding" do
element = %Element{name: "p", attributes: [{"z", {:var, "variable"}}]}
value = "actual value"
expect(to_html element, locals: %{variable: value}).to eq "<p z='#{value}'></p>"
end
end
context "class" do
it "unifies the class attributes with the . selector, in alphabetical order" do
element = %Element{name: "p", selectors: [".b"], attributes: [{"class", {:string, "a c"}}]}
expect(to_html element).to eq "<p class='a b c'></p>"
end
end
context "id" do
it "joins the id attributes to the # selector with underscores" do
element = %Element{name: "p", selectors: ["#z"], attributes: [{"id", {:string, "a"}}]}
expect(to_html element).to eq "<p id='z_a'></p>"
end
end
end
end
end
| 33.512821 | 120 | 0.547564 |
732976c4b322e3e316cba8d559b2e753adad600b
| 427 |
ex
|
Elixir
|
lib/hap/characteristics/occupancy_detected.ex
|
petermm/hap
|
550433a78bccd586ab6a7d8bf85765bfae58b13b
|
[
"MIT"
] | 40 |
2019-10-26T01:58:42.000Z
|
2022-03-09T18:18:39.000Z
|
lib/hap/characteristics/occupancy_detected.ex
|
petermm/hap
|
550433a78bccd586ab6a7d8bf85765bfae58b13b
|
[
"MIT"
] | 11 |
2021-04-02T14:55:02.000Z
|
2021-11-05T13:49:55.000Z
|
lib/hap/characteristics/occupancy_detected.ex
|
petermm/hap
|
550433a78bccd586ab6a7d8bf85765bfae58b13b
|
[
"MIT"
] | 6 |
2020-05-18T09:34:14.000Z
|
2021-11-04T11:14:15.000Z
|
defmodule HAP.Characteristics.OccupancyDetected do
@moduledoc """
Definition of the `public.hap.characteristic.occupancy-detected` characteristic
Valid values:
0: Occupancy is not detected
1: Occupancy is detected
"""
@behaviour HAP.CharacteristicDefinition
def type, do: "71"
def perms, do: ["pr", "ev"]
def format, do: "uint8"
def min_value, do: 0
def max_value, do: 1
def step_value, do: 1
end
| 22.473684 | 81 | 0.711944 |
73298527838706bd22d0ab0de2ca917cb95c7535
| 842 |
ex
|
Elixir
|
lib/asciinema_web/views/layout_view.ex
|
potherca-contrib/asciinema-server
|
c5ac6e45e8f117d4d59c9c33da6b59b448e40f0e
|
[
"Apache-2.0"
] | null | null | null |
lib/asciinema_web/views/layout_view.ex
|
potherca-contrib/asciinema-server
|
c5ac6e45e8f117d4d59c9c33da6b59b448e40f0e
|
[
"Apache-2.0"
] | null | null | null |
lib/asciinema_web/views/layout_view.ex
|
potherca-contrib/asciinema-server
|
c5ac6e45e8f117d4d59c9c33da6b59b448e40f0e
|
[
"Apache-2.0"
] | null | null | null |
defmodule AsciinemaWeb.LayoutView do
use AsciinemaWeb, :view
import AsciinemaWeb.UserView, only: [avatar_url: 1]
def page_title(conn) do
case conn.assigns[:page_title] do
nil -> "asciinema - Record and share your terminal sessions, the right way"
title -> title <> " - asciinema" # TODO return safe string here?
end
end
def body_class(conn) do
action = Phoenix.Controller.action_name(conn)
controller =
conn
|> Phoenix.Controller.controller_module
|> Atom.to_string
|> String.replace(~r/(Elixir\.AsciinemaWeb\.)|(Controller)/, "")
|> String.replace(".", "")
|> Inflex.underscore
|> String.replace("_", " ")
|> Inflex.parameterize
"c-#{controller} a-#{action}"
end
def main_class(conn) do
conn.assigns[:main_class] || "container"
end
end
| 26.3125 | 81 | 0.643705 |
7329b845deba31bc5c886bc45c62191abcd444f2
| 1,323 |
exs
|
Elixir
|
test/chunkr/page_test.exs
|
goodpixel/pager
|
d3ab68add7f7d08af1a80132e2981c802d446b5f
|
[
"MIT"
] | 3 |
2021-09-15T13:21:37.000Z
|
2021-10-14T06:07:49.000Z
|
test/chunkr/page_test.exs
|
goodpixel/pager
|
d3ab68add7f7d08af1a80132e2981c802d446b5f
|
[
"MIT"
] | 2 |
2021-09-20T16:39:56.000Z
|
2021-10-14T18:38:34.000Z
|
test/chunkr/page_test.exs
|
goodpixel/pager
|
d3ab68add7f7d08af1a80132e2981c802d446b5f
|
[
"MIT"
] | 1 |
2021-09-20T16:19:13.000Z
|
2021-09-20T16:19:13.000Z
|
defmodule Chunkr.PageTest do
use ExUnit.Case, async: true
alias Chunkr.{Cursor, Opts, Page}
doctest Chunkr.Page
defmodule MockRepo do
def aggregate(User, :count) do
1_234_567
end
end
defp fake_page() do
opts = %Opts{repo: MockRepo, planner: SomeModule, query: User, cursor_mod: Cursor.Base64}
%Page{
raw_results: [{[:cursor_val_1], :foo_record}, {[:cursor_val_2], :bar_record}],
has_previous_page: :maybe,
has_next_page: :maybe_not,
start_cursor: "sure",
end_cursor: "hrpmh",
opts: opts
}
end
describe "Chunkr.Page.total_count/1" do
test "queries the total non-paginated count" do
page = fake_page()
assert 1_234_567 = Page.total_count(page)
end
end
describe "Chunkr.Page.records/1" do
test "returns just the records" do
page = fake_page()
assert [:foo_record, :bar_record] = Page.records(page)
end
end
describe "Chunkr.Page.cursors_and_records/1" do
test "returns opaque cursors alongside their corresponding records" do
page = fake_page()
cursor1 = Cursor.encode([:cursor_val_1], Cursor.Base64)
cursor2 = Cursor.encode([:cursor_val_2], Cursor.Base64)
assert [{^cursor1, :foo_record}, {^cursor2, :bar_record}] = Page.cursors_and_records(page)
end
end
end
| 27 | 96 | 0.670446 |
7329cfc52d489041bd660b925385cf547736241c
| 5,468 |
exs
|
Elixir
|
.credo.exs
|
arana3/reactive-interaction-gateway
|
793648bcc5b8b05fc53df1f5f97818fb40ca84be
|
[
"Apache-2.0"
] | null | null | null |
.credo.exs
|
arana3/reactive-interaction-gateway
|
793648bcc5b8b05fc53df1f5f97818fb40ca84be
|
[
"Apache-2.0"
] | 132 |
2018-11-26T14:00:54.000Z
|
2022-03-11T04:17:54.000Z
|
.credo.exs
|
arana3/reactive-interaction-gateway
|
793648bcc5b8b05fc53df1f5f97818fb40ca84be
|
[
"Apache-2.0"
] | null | null | null |
# This file contains the configuration for Credo and you are probably reading
# this after creating it with `mix credo.gen.config`.
#
# If you find anything wrong or unclear in this file, please report an
# issue on GitHub: https://github.com/rrrene/credo/issues
#
%{
#
# You can have as many configs as you like in the `configs:` field.
configs: [
%{
#
# Run any config using `mix credo -C <name>`. If no config name is given
# "default" is used.
name: "default",
#
# These are the files included in the analysis:
files: %{
#
# You can give explicit globs or simply directories.
# In the latter case `**/*.{ex,exs}` will be used.
included: ["lib/", "src/", "web/", "apps/"],
excluded: [~r"/_build/", ~r"/deps/"]
},
#
# If you create your own checks, you must specify the source files for
# them here, so they can be loaded by Credo before running the analysis.
requires: [],
#
# Credo automatically checks for updates, like e.g. Hex does.
# You can disable this behaviour below:
check_for_updates: true,
#
# If you want to enforce a style guide and need a more traditional linting
# experience, you can change `strict` to `true` below:
strict: true,
#
# If you want to use uncolored output by default, you can change `color`
# to `false` below:
color: true,
#
# You can customize the parameters of any check by adding a second element
# to the tuple.
#
# To disable a check put `false` as second element:
#
# {Credo.Check.Design.DuplicatedCode, false}
#
checks: [
{Credo.Check.Consistency.ExceptionNames, false},
{Credo.Check.Consistency.LineEndings},
{Credo.Check.Consistency.MultiAliasImportRequireUse},
{Credo.Check.Consistency.ParameterPatternMatching},
{Credo.Check.Consistency.SpaceAroundOperators},
{Credo.Check.Consistency.SpaceInParentheses},
{Credo.Check.Consistency.TabsOrSpaces},
# For some checks, like AliasUsage, you can only customize the priority
# Priority values are: `low, normal, high, higher`
{Credo.Check.Design.AliasUsage, false},
# For others you can set parameters
# If you don't want the `setup` and `test` macro calls in ExUnit tests
# or the `schema` macro in Ecto schemas to trigger DuplicatedCode, just
# set the `excluded_macros` parameter to `[:schema, :setup, :test]`.
{Credo.Check.Design.DuplicatedCode, excluded_macros: []},
# You can also customize the exit_status of each check.
# If you don't want TODO comments to cause `mix credo` to fail, just
# set this value to 0 (zero).
{Credo.Check.Design.TagTODO, exit_status: 2},
{Credo.Check.Design.TagFIXME},
{Credo.Check.Readability.FunctionNames},
{Credo.Check.Readability.LargeNumbers},
{Credo.Check.Readability.MaxLineLength, priority: :low, max_length: 100},
{Credo.Check.Readability.ModuleAttributeNames},
{Credo.Check.Readability.ModuleDoc},
{Credo.Check.Readability.ModuleNames},
{Credo.Check.Readability.ParenthesesOnZeroArityDefs},
{Credo.Check.Readability.ParenthesesInCondition},
{Credo.Check.Readability.PredicateFunctionNames},
{Credo.Check.Readability.PreferImplicitTry},
{Credo.Check.Readability.RedundantBlankLines},
{Credo.Check.Readability.StringSigils},
{Credo.Check.Readability.TrailingBlankLine},
{Credo.Check.Readability.TrailingWhiteSpace},
{Credo.Check.Readability.VariableNames},
{Credo.Check.Readability.Semicolons},
{Credo.Check.Refactor.DoubleBooleanNegation},
{Credo.Check.Refactor.CondStatements},
{Credo.Check.Refactor.CyclomaticComplexity},
{Credo.Check.Refactor.FunctionArity},
{Credo.Check.Refactor.MatchInCondition},
{Credo.Check.Refactor.NegatedConditionsInUnless},
{Credo.Check.Refactor.NegatedConditionsWithElse},
{Credo.Check.Refactor.Nesting},
{Credo.Check.Refactor.PipeChainStart, false},
{Credo.Check.Refactor.UnlessWithElse},
{Credo.Check.Warning.BoolOperationOnSameValues},
{Credo.Check.Warning.IExPry},
{Credo.Check.Warning.IoInspect},
{Credo.Check.Warning.OperationOnSameValues},
{Credo.Check.Warning.OperationWithConstantResult},
{Credo.Check.Warning.UnusedEnumOperation},
{Credo.Check.Warning.UnusedFileOperation},
{Credo.Check.Warning.UnusedKeywordOperation},
{Credo.Check.Warning.UnusedListOperation},
{Credo.Check.Warning.UnusedPathOperation},
{Credo.Check.Warning.UnusedRegexOperation},
{Credo.Check.Warning.UnusedStringOperation},
{Credo.Check.Warning.UnusedTupleOperation},
# Controversial and experimental checks (opt-in, just remove `, false`)
#
{Credo.Check.Refactor.ABCSize, false},
{Credo.Check.Refactor.AppendSingleItem, false},
{Credo.Check.Refactor.VariableRebinding, false},
{Credo.Check.Warning.MapGetUnsafePass, false},
# Deprecated checks (these will be deleted after a grace period)
{Credo.Check.Readability.Specs, false}
# Custom checks can be created using `mix credo.gen.check`.
#
]
}
]
}
| 42.061538 | 81 | 0.659108 |
7329d97448a2fc60d5ec2d074ed7ab7bf8ea3785
| 2,367 |
ex
|
Elixir
|
lib/hexpm/parallel/process.ex
|
findmypast/hexfmp
|
38a50f5e1057833fd98748faac230bf4b9cc26a3
|
[
"Apache-2.0"
] | null | null | null |
lib/hexpm/parallel/process.ex
|
findmypast/hexfmp
|
38a50f5e1057833fd98748faac230bf4b9cc26a3
|
[
"Apache-2.0"
] | null | null | null |
lib/hexpm/parallel/process.ex
|
findmypast/hexfmp
|
38a50f5e1057833fd98748faac230bf4b9cc26a3
|
[
"Apache-2.0"
] | null | null | null |
defmodule Hexpm.Parallel.Process do
use GenServer
require Logger
def reduce(fun, args, acc, reducer, opts) do
{:ok, pid} = GenServer.start_link(__MODULE__, new_state(opts))
try do
GenServer.call(pid, {:reduce, fun, args, reducer, acc}, opts[:timeout])
after
GenServer.stop(pid)
end
end
def handle_call(:stop, _from, state) do
{:stop, :normal, :ok, state}
end
def handle_call({:reduce, fun, args, reducer, acc}, from, state) do
state = %{state |
fun: fun,
args: args,
reducer: reducer,
acc: acc,
from: from,
num_jobs: length(args),
num_finished: 0
}
state = run_tasks(state)
{:noreply, state}
end
def handle_info({ref, message}, state) when is_reference(ref) do
state =
%{state | running: Map.delete(state.running, ref),
num_finished: state.num_finished + 1,
acc: state.reducer.({:ok, message}, state.acc)}
|> run_task
|> maybe_reply
{:noreply, state}
end
def handle_info({:DOWN, ref, _, _proc, reason}, state) do
case Map.fetch(state.running, ref) do
{:ok, arg} ->
Logger.error(["Parallel task failed with reason: `", inspect(reason), "` and args: `", inspect(arg), "`"])
%{state | running: Map.delete(state.running, ref),
num_finished: state.num_finished + 1,
acc: state.reducer.({:error, arg}, state.acc)}
|> run_task
|> maybe_reply
:error ->
{:noreply, state}
end
end
defp maybe_reply(%{num_finished: finished, num_jobs: jobs, acc: acc} = state)
when finished >= jobs do
GenServer.reply(state.from, acc)
state
end
defp maybe_reply(state), do: state
defp run_tasks(state) do
Enum.reduce(1..state.max_jobs, state, fn _ix, state ->
run_task(state)
end)
end
defp run_task(state) do
case state.args do
[arg|args] ->
task = Task.Supervisor.async_nolink(Hexpm.Tasks, fn -> state.fun.(arg) end)
%{state | running: Map.put(state.running, task.ref, arg), args: args}
[] ->
state
end
end
defp new_state(opts) do
%{max_jobs: opts[:parallel],
running: Map.new,
num_jobs: nil,
num_finished: nil,
fun: nil,
args: nil,
reducer: nil,
acc: nil,
from: nil}
end
end
| 25.180851 | 114 | 0.588086 |
7329e0979d81cdfa230047d3a2e7f74e2c874cbd
| 2,555 |
ex
|
Elixir
|
clients/document_ai/lib/google_api/document_ai/v1beta3/model/google_cloud_documentai_v1beta2_document_page_visual_element.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | null | null | null |
clients/document_ai/lib/google_api/document_ai/v1beta3/model/google_cloud_documentai_v1beta2_document_page_visual_element.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | 1 |
2020-12-18T09:25:12.000Z
|
2020-12-18T09:25:12.000Z
|
clients/document_ai/lib/google_api/document_ai/v1beta3/model/google_cloud_documentai_v1beta2_document_page_visual_element.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageVisualElement do
@moduledoc """
Detected non-text visual elements e.g. checkbox, signature etc. on the page.
## Attributes
* `detectedLanguages` (*type:* `list(GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageDetectedLanguage.t)`, *default:* `nil`) - A list of detected languages together with confidence.
* `layout` (*type:* `GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageLayout.t`, *default:* `nil`) - Layout for VisualElement.
* `type` (*type:* `String.t`, *default:* `nil`) - Type of the VisualElement.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:detectedLanguages =>
list(
GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageDetectedLanguage.t()
),
:layout =>
GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageLayout.t(),
:type => String.t()
}
field(:detectedLanguages,
as:
GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageDetectedLanguage,
type: :list
)
field(:layout,
as: GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageLayout
)
field(:type)
end
defimpl Poison.Decoder,
for: GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageVisualElement do
def decode(value, options) do
GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageVisualElement.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.DocumentAI.V1beta3.Model.GoogleCloudDocumentaiV1beta2DocumentPageVisualElement do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 36.5 | 212 | 0.748337 |
7329eb25a7e81146018fa7c1aa99ba97bf4d043f
| 1,270 |
exs
|
Elixir
|
mix.exs
|
brndnmtthws/citrine
|
ff815f7ed921abbde658ca4ba7b6aea68b6c0f5f
|
[
"MIT"
] | 8 |
2020-08-06T03:08:31.000Z
|
2021-12-15T08:41:59.000Z
|
mix.exs
|
brndnmtthws/citrine
|
ff815f7ed921abbde658ca4ba7b6aea68b6c0f5f
|
[
"MIT"
] | 1 |
2020-10-18T19:48:09.000Z
|
2020-10-26T16:40:25.000Z
|
mix.exs
|
brndnmtthws/citrine
|
ff815f7ed921abbde658ca4ba7b6aea68b6c0f5f
|
[
"MIT"
] | 2 |
2020-10-23T12:28:21.000Z
|
2021-01-09T20:44:37.000Z
|
defmodule Citrine.MixProject do
use Mix.Project
def project do
[
app: :citrine,
version: "0.1.12",
elixir: "~> 1.10",
start_permanent: Mix.env() == :prod,
deps: deps(),
description: description(),
package: package(),
# Docs
name: "Citrine",
source_url: "https://github.com/brndnmtthws/citrine",
homepage_url: "http://hexdocs.pm/citrine/readme.html",
docs: [
# The main page in the docs
main: "readme",
extras: ["README.md", "LICENSE"]
]
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger],
included_applications: [:mnesia]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:crontab, "~> 1.1"},
{:local_cluster, "~> 1.1", only: [:test]},
{:temp, "~> 0.4", only: [:test]},
{:ex_doc, "~> 0.22", only: :dev, runtime: false}
]
end
defp description() do
"Elixir library for running cron-based scheduled jobs on your Erlang cluster."
end
defp package() do
[
name: "citrine",
licenses: ["MIT"],
links: %{"GitHub" => "https://github.com/brndnmtthws/citrine"}
]
end
end
| 22.678571 | 82 | 0.56378 |
732a031655fd0e9301b12b30b1c3fd2965ad6ea8
| 377 |
exs
|
Elixir
|
priv/repo/migrations/20151224015910_add_sample_models.exs
|
r-icarus/filtrex
|
609239678226729d3dd5349a97e58762b671a0d9
|
[
"MIT"
] | null | null | null |
priv/repo/migrations/20151224015910_add_sample_models.exs
|
r-icarus/filtrex
|
609239678226729d3dd5349a97e58762b671a0d9
|
[
"MIT"
] | null | null | null |
priv/repo/migrations/20151224015910_add_sample_models.exs
|
r-icarus/filtrex
|
609239678226729d3dd5349a97e58762b671a0d9
|
[
"MIT"
] | 1 |
2020-08-04T21:11:18.000Z
|
2020-08-04T21:11:18.000Z
|
defmodule Filtrex.Repo.Migrations.AddSampleModels do
use Ecto.Migration
def change do
create table(:sample_models) do
add :title, :string
add :date_column, :date
add :datetime_column, :datetime
add :upvotes, :integer
add :rating, :float
add :comments, :text
timestamps
end
end
end
| 22.176471 | 52 | 0.591512 |
732a0cffe050fa56515636fb0a6b97917047180c
| 1,518 |
ex
|
Elixir
|
apps/omg_watcher/lib/eventer.ex
|
SingularityMatrix/elixir-omg
|
7db3fcc3adfa303e30ff7703148cc5110b587d20
|
[
"Apache-2.0"
] | null | null | null |
apps/omg_watcher/lib/eventer.ex
|
SingularityMatrix/elixir-omg
|
7db3fcc3adfa303e30ff7703148cc5110b587d20
|
[
"Apache-2.0"
] | null | null | null |
apps/omg_watcher/lib/eventer.ex
|
SingularityMatrix/elixir-omg
|
7db3fcc3adfa303e30ff7703148cc5110b587d20
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 OmiseGO Pte Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule OMG.Watcher.Eventer do
@moduledoc """
Imperative shell for handling events, which are exposed to the client of the Watcher application.
All handling of event triggers that are processed, transformed into events and pushed to Phoenix Channels
for their respective topics is intended to be done here.
See `OMG.API.EventerAPI` for the API to the GenServer
"""
alias OMG.JSONRPC
alias OMG.Watcher.Eventer.Core
alias OMG.Watcher.Web.Endpoint
### Client
def start_link(_args) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
### Server
use GenServer
def init(:ok) do
{:ok, nil}
end
def handle_cast({:emit_events, event_triggers}, state) do
event_triggers
|> Core.pair_events_with_topics()
|> Enum.each(fn {topic, event_name, event} ->
:ok = Endpoint.broadcast!(topic, event_name, JSONRPC.Client.encode(event))
end)
{:noreply, state}
end
end
| 29.192308 | 107 | 0.731884 |
732a23bb0e85a416aaf0493b48329e0d074f0f77
| 1,842 |
ex
|
Elixir
|
web/web.ex
|
ntsai/xadmin
|
82d8be63e69483ff66472481e66f9870face355b
|
[
"MIT"
] | 5 |
2016-08-30T01:23:50.000Z
|
2021-09-22T14:39:00.000Z
|
web/web.ex
|
ntsai/xadmin
|
82d8be63e69483ff66472481e66f9870face355b
|
[
"MIT"
] | null | null | null |
web/web.ex
|
ntsai/xadmin
|
82d8be63e69483ff66472481e66f9870face355b
|
[
"MIT"
] | 1 |
2021-12-10T11:10:55.000Z
|
2021-12-10T11:10:55.000Z
|
defmodule XAdmin.Web do
@moduledoc false
def model do
quote do
use Ecto.Schema
import Ecto
import Ecto.Changeset
import Ecto.Query, only: [from: 1, from: 2]
end
end
def controller do
quote do
use Phoenix.Controller
import Ecto.Model
import Ecto.Query, only: [from: 1, from: 2]
import XAdmin.Router.Helpers
import XAdmin.Utils, only: [admin_path: 0, admin_path: 2, admin_resource_path: 3, admin_association_path: 4]
import XAdmin.Controller
defp set_theme(conn, _) do
assign(conn, :theme, XAdmin.theme)
end
defp set_layout(conn, _) do
layout = Application.get_env(:xadmin, :layout) || "#{conn.assigns.theme.name}.html"
put_layout(conn, layout)
end
end
end
def view do
quote do
require Logger
file_path = __ENV__.file
|> Path.dirname
|> String.split("/views")
|> hd
|> Path.join("templates")
use Phoenix.View, root: file_path
# use Phoenix.View, root: "web/templates"
# Import convenience functions from controllers
import Phoenix.Controller, only: [view_module: 1]
# Use all HTML functionality (forms, tags, etc)
use Phoenix.HTML
import XAdmin.Router.Helpers
#import ExAuth
import XAdmin.ViewHelpers
end
end
def router do
quote do
use Phoenix.Router
end
end
def channel do
quote do
use Phoenix.Channel
# alias Application.get_env(:xadmin, :repo)
# import Application.get_env(:xadmin, :repo)
import Ecto
import Ecto.Query, only: [from: 1, from: 2]
end
end
@doc """
When used, dispatch to the appropriate controller/view/etc.
"""
defmacro __using__(which) when is_atom(which) do
apply(__MODULE__, which, [])
end
end
| 21.172414 | 114 | 0.629207 |
732a307eb92c664ec3f6fb189cd6d6e5482ea40d
| 972 |
exs
|
Elixir
|
test/plug/payment_guard_test.exs
|
versus-systems/shopifex
|
48b1e7e6b3b5cf31010097ad768325f783168124
|
[
"Apache-2.0"
] | null | null | null |
test/plug/payment_guard_test.exs
|
versus-systems/shopifex
|
48b1e7e6b3b5cf31010097ad768325f783168124
|
[
"Apache-2.0"
] | null | null | null |
test/plug/payment_guard_test.exs
|
versus-systems/shopifex
|
48b1e7e6b3b5cf31010097ad768325f783168124
|
[
"Apache-2.0"
] | null | null | null |
defmodule Shopifex.Plug.PaymentGuardTest do
use ShopifexWeb.ConnCase
setup do
conn = build_conn(:get, "/premium-route?foo=bar&fizz=buzz")
{:ok, conn: conn}
end
setup [:shop_in_session]
test "payment guard blocks pay-walled function and redirects to payment route", %{
conn: conn
} do
conn = Shopifex.Plug.PaymentGuard.call(conn, "block")
assert conn.status == 302
assert Plug.Conn.get_resp_header(conn, "location") == [
"/payment/show-plans?guard=block&redirect_after=%2Fpremium-route%3Ffoo%3Dbar%26fizz%3Dbuzz"
]
end
test "payment guard grants access pay-walled function and places guard payment in session", %{
conn: conn,
shop: shop
} do
Shopifex.Shops.create_grant(%{shop: shop, grants: ["premium_access"]})
conn = Shopifex.Plug.PaymentGuard.call(conn, "premium_access")
assert %ShopifexDummy.Shops.Grant{grants: ["premium_access"]} = conn.private.grant_for_guard
end
end
| 28.588235 | 104 | 0.695473 |
732a500f6a9023c2a3cf62648622f53b58daa1a8
| 70,122 |
exs
|
Elixir
|
test/ecto/changeset_test.exs
|
adbatista/ecto
|
ab63701cf9b3ceab6ddf54c87d549abe24e1248a
|
[
"Apache-2.0"
] | 2 |
2021-02-25T15:51:16.000Z
|
2021-02-25T18:42:35.000Z
|
test/ecto/changeset_test.exs
|
adbatista/ecto
|
ab63701cf9b3ceab6ddf54c87d549abe24e1248a
|
[
"Apache-2.0"
] | 1 |
2021-03-09T16:43:23.000Z
|
2021-03-09T16:43:23.000Z
|
test/ecto/changeset_test.exs
|
adbatista/ecto
|
ab63701cf9b3ceab6ddf54c87d549abe24e1248a
|
[
"Apache-2.0"
] | 1 |
2018-06-18T14:47:58.000Z
|
2018-06-18T14:47:58.000Z
|
defmodule Ecto.ChangesetTest do
use ExUnit.Case, async: true
import Ecto.Changeset
defmodule SocialSource do
use Ecto.Schema
@primary_key false
embedded_schema do
field :origin
field :url
end
def changeset(schema \\ %SocialSource{}, params) do
cast(schema, params, ~w(origin url)a)
end
end
defmodule Category do
use Ecto.Schema
schema "categories" do
field :name, :string
has_many :posts, Ecto.ChangesetTest.Post
end
end
defmodule Comment do
use Ecto.Schema
schema "comments" do
belongs_to :post, Ecto.ChangesetTest.Post
end
end
defmodule Post do
use Ecto.Schema
schema "posts" do
field :token, :integer, primary_key: true
field :title, :string, default: ""
field :body
field :uuid, :binary_id
field :color, :binary
field :decimal, :decimal
field :upvotes, :integer, default: 0
field :topics, {:array, :string}
field :virtual, :string, virtual: true
field :published_at, :naive_datetime
field :source, :map
field :permalink, :string, source: :url
belongs_to :category, Ecto.ChangesetTest.Category, source: :cat_id
has_many :comments, Ecto.ChangesetTest.Comment, on_replace: :delete
has_one :comment, Ecto.ChangesetTest.Comment
end
end
defmodule NoSchemaPost do
defstruct [:title, :upvotes]
end
defp changeset(schema \\ %Post{}, params) do
cast(schema, params, ~w(id token title body upvotes decimal color topics virtual)a)
end
defmodule CustomError do
use Ecto.Type
def type, do: :any
def cast(_), do: {:error, message: "custom error message", reason: :foobar}
def load(_), do: :error
def dump(_), do: :error
end
defmodule CustomErrorWithType do
use Ecto.Type
def type, do: :any
def cast(_), do: {:error, message: "custom error message", reason: :foobar, type: :some_type}
def load(_), do: :error
def dump(_), do: :error
end
defmodule CustomErrorWithoutMessage do
use Ecto.Type
def type, do: :any
def cast(_), do: {:error, reason: :foobar}
def load(_), do: :error
def dump(_), do: :error
end
defmodule CustomErrorTest do
use Ecto.Schema
schema "custom_error" do
field :custom_error, CustomError
field :custom_error_without_message, CustomErrorWithoutMessage
field :custom_error_with_type, CustomErrorWithType
field :array_custom_error, {:array, CustomError}
field :map_custom_error, {:map, CustomError}
end
end
## cast/4
test "cast/4: with valid string keys" do
params = %{"title" => "hello", "body" => "world"}
struct = %Post{}
changeset = cast(struct, params, ~w(title body)a)
assert changeset.params == params
assert changeset.data == struct
assert changeset.changes == %{title: "hello", body: "world"}
assert changeset.errors == []
assert validations(changeset) == []
assert changeset.required == []
assert changeset.valid?
end
test "cast/4: with valid atom keys" do
params = %{title: "hello", body: "world"}
struct = %Post{}
changeset = cast(struct, params, ~w(title body)a)
assert changeset.params == %{"title" => "hello", "body" => "world"}
assert changeset.data == struct
assert changeset.changes == %{title: "hello", body: "world"}
assert changeset.errors == []
assert validations(changeset) == []
assert changeset.required == []
assert changeset.valid?
end
test "cast/4: with empty values" do
params = %{"title" => "", "body" => nil}
struct = %Post{title: "foo", body: "bar"}
changeset = cast(struct, params, ~w(title body)a)
assert changeset.changes == %{title: "", body: nil}
end
test "cast/4: with custom empty values" do
params = %{"title" => "empty", "body" => nil}
struct = %Post{title: "foo", body: "bar"}
changeset = cast(struct, params, ~w(title body)a, empty_values: ["empty"])
assert changeset.changes == %{title: "", body: nil}
assert changeset.empty_values == [""]
end
test "cast/4: with matching empty values" do
params = %{"title" => "", "body" => nil}
struct = %Post{title: "", body: nil}
changeset = cast(struct, params, ~w(title body)a)
assert changeset.changes == %{}
end
test "cast/4: with data and types" do
data = {%{title: "hello"}, %{title: :string, upvotes: :integer}}
params = %{"title" => "world", "upvotes" => "0"}
changeset = cast(data, params, ~w(title upvotes)a)
assert changeset.params == params
assert changeset.data == %{title: "hello"}
assert changeset.changes == %{title: "world", upvotes: 0}
assert changeset.errors == []
assert changeset.valid?
assert apply_changes(changeset) == %{title: "world", upvotes: 0}
end
test "cast/4: with data struct and types" do
data = {%NoSchemaPost{title: "hello"}, %{title: :string, upvotes: :integer}}
params = %{"title" => "world", "upvotes" => "0"}
changeset = cast(data, params, ~w(title upvotes)a)
assert changeset.params == params
assert changeset.data == %NoSchemaPost{title: "hello"}
assert changeset.changes == %{title: "world", upvotes: 0}
assert changeset.errors == []
assert changeset.valid?
assert apply_changes(changeset) == %NoSchemaPost{title: "world", upvotes: 0}
end
test "cast/4: with dynamic embed" do
data = {
%{
title: "hello"
},
%{
title: :string,
source: {
:embed,
%Ecto.Embedded{
cardinality: :one,
field: :source,
on_cast: &SocialSource.changeset(&1, &2),
on_replace: :raise,
owner: nil,
related: SocialSource,
unique: true
}
}
}
}
params = %{"title" => "world", "source" => %{"origin" => "facebook", "url" => "http://example.com/social"}}
changeset =
data
|> cast(params, ~w(title)a)
|> cast_embed(:source, required: true)
assert changeset.params == params
assert changeset.data == %{title: "hello"}
assert %{title: "world", source: %Ecto.Changeset{}} = changeset.changes
assert changeset.errors == []
assert changeset.valid?
assert apply_changes(changeset) ==
%{title: "world", source: %Ecto.ChangesetTest.SocialSource{origin: "facebook", url: "http://example.com/social"}}
end
test "cast/4: with changeset" do
base_changeset = cast(%Post{title: "valid"}, %{}, ~w(title)a)
|> validate_required(:title)
|> validate_length(:title, min: 3)
|> unique_constraint(:title)
# No changes
changeset = cast(base_changeset, %{}, ~w())
assert changeset.valid?
assert changeset.changes == %{}
assert changeset.required == [:title]
assert length(validations(changeset)) == 1
assert length(constraints(changeset)) == 1
# Value changes
changeset = cast(changeset, %{body: "new body"}, ~w(body)a)
assert changeset.valid?
assert changeset.changes == %{body: "new body"}
assert changeset.required == [:title]
assert length(validations(changeset)) == 1
assert length(constraints(changeset)) == 1
# Nil changes
changeset = cast(changeset, %{body: nil}, ~w(body)a)
assert changeset.valid?
assert changeset.changes == %{body: nil}
assert changeset.required == [:title]
assert length(validations(changeset)) == 1
assert length(constraints(changeset)) == 1
end
test "cast/4: struct with :invalid parameters" do
changeset = cast(%Post{}, :invalid, ~w(title body)a)
assert changeset.data == %Post{}
assert changeset.params == nil
assert changeset.changes == %{}
assert changeset.errors == []
assert validations(changeset) == []
refute changeset.valid?
end
test "cast/4: changeset with :invalid parameters" do
changeset = cast(%Post{}, %{"title" => "sample"}, ~w(title)a)
changeset = cast(changeset, :invalid, ~w(body)a)
assert changeset.data == %Post{}
assert changeset.params == %{"title" => "sample"}
assert changeset.changes == %{title: "sample"}
assert changeset.errors == []
assert validations(changeset) == []
refute changeset.valid?
end
test "cast/4: field is marked as invalid" do
params = %{"body" => :world}
struct = %Post{}
changeset = cast(struct, params, ~w(body)a)
assert changeset.changes == %{}
assert changeset.errors == [body: {"is invalid", [type: :string, validation: :cast]}]
refute changeset.valid?
end
test "cast/4: field has a custom invalid error message" do
params = %{"custom_error" => :error}
struct = %CustomErrorTest{}
changeset = cast(struct, params, ~w(custom_error)a)
assert changeset.errors == [custom_error: {"custom error message", [type: Ecto.ChangesetTest.CustomError, validation: :cast, reason: :foobar]}]
refute changeset.valid?
end
test "cast/4: ignores the :type parameter in custom errors" do
params = %{"custom_error_with_type" => :error}
struct = %CustomErrorTest{}
changeset = cast(struct, params, ~w(custom_error_with_type)a)
assert changeset.errors == [custom_error_with_type: {"custom error message", [type: Ecto.ChangesetTest.CustomErrorWithType, validation: :cast, reason: :foobar]}]
refute changeset.valid?
end
test "cast/4: field has a custom invalid error message without message" do
params = %{"custom_error_without_message" => :error}
struct = %CustomErrorTest{}
changeset = cast(struct, params, ~w(custom_error_without_message)a)
assert changeset.errors == [custom_error_without_message: {"is invalid", [type: Ecto.ChangesetTest.CustomErrorWithoutMessage, validation: :cast, reason: :foobar]}]
refute changeset.valid?
end
test "cast/4: field has a custom invalid error message on an array field" do
params = %{"array_custom_error" => [:error]}
struct = %CustomErrorTest{}
changeset = cast(struct, params, ~w(array_custom_error)a)
assert changeset.errors == [array_custom_error: {"is invalid", [type: {:array, Ecto.ChangesetTest.CustomError}, validation: :cast]}]
refute changeset.valid?
end
test "cast/4: field has a custom invalid error message on a map field" do
params = %{"map_custom_error" => %{foo: :error}}
struct = %CustomErrorTest{}
changeset = cast(struct, params, ~w(map_custom_error)a)
assert changeset.errors == [map_custom_error: {"is invalid", [type: {:map, Ecto.ChangesetTest.CustomError}, validation: :cast]}]
refute changeset.valid?
end
test "cast/4: fails on invalid field" do
assert_raise ArgumentError, ~r"unknown field `:unknown`", fn ->
cast(%Post{}, %{}, ~w(unknown)a)
end
end
test "cast/4: fails on bad arguments" do
assert_raise Ecto.CastError, ~r"expected params to be a :map, got:", fn ->
cast(%Post{}, %Post{}, ~w(unknown)a)
end
assert_raise Ecto.CastError, ~r"expected params to be a :map, got:", fn ->
cast(%Post{}, "foo", ~w(unknown)a)
end
assert_raise Ecto.CastError, ~r"mixed keys", fn ->
cast(%Post{}, %{"title" => "foo", title: "foo"}, ~w())
end
assert_raise FunctionClauseError, fn ->
cast(%Post{}, %{}, %{})
end
assert_raise FunctionClauseError, fn ->
cast(%Post{}, %{"title" => "foo"}, nil)
end
end
test "cast/4: protects against atom injection" do
assert_raise ArgumentError, fn ->
cast(%Post{}, %{}, ~w(surely_never_saw_this_atom_before)a)
end
end
test "cast/4: required field (via validate_required/2) of wrong type is marked as invalid" do
params = %{"body" => :world}
struct = %Post{}
changeset = cast(struct, params, [:body])
|> validate_required([:body])
assert changeset.changes == %{}
assert changeset.errors == [body: {"is invalid", [type: :string, validation: :cast]}]
refute changeset.valid?
end
test "cast/4: does not validate types in data" do
params = %{}
struct = %Post{title: 100, decimal: "string"}
changeset = cast(struct, params, ~w(title decimal)a)
assert changeset.params == %{}
assert changeset.data == struct
assert changeset.changes == %{}
assert changeset.errors == []
assert validations(changeset) == []
assert changeset.required == []
assert changeset.valid?
end
test "cast/4: semantic comparison" do
changeset = cast(%Post{decimal: Decimal.new(1)}, %{decimal: "1.0"}, ~w(decimal)a)
assert changeset.changes == %{}
changeset = cast(%Post{decimal: Decimal.new(1)}, %{decimal: "1.1"}, ~w(decimal)a)
assert changeset.changes == %{decimal: Decimal.new("1.1")}
changeset = cast(%Post{decimal: nil}, %{decimal: nil}, ~w(decimal)a)
assert changeset.changes == %{}
{data, types} = {%{x: [Decimal.new(1)]}, %{x: {:array, :decimal}}}
changeset = cast({data, types}, %{x: [Decimal.new("1.0")]}, ~w(x)a)
assert changeset.changes == %{}
changeset = cast({data, types}, %{x: [Decimal.new("1.1")]}, ~w(x)a)
assert changeset.changes == %{x: [Decimal.new("1.1")]}
changeset = cast({%{x: [nil]}, types}, %{x: [nil]}, ~w(x)a)
assert changeset.changes == %{}
{data, types} = {%{x: %{decimal: nil}}, %{x: {:map, :decimal}}}
changeset = cast({data, types}, data, ~w(x)a)
assert changeset.changes == %{}
end
## Changeset functions
test "merge/2: merges changes" do
cs1 = cast(%Post{}, %{title: "foo"}, ~w(title)a)
cs2 = cast(%Post{}, %{body: "bar"}, ~w(body)a)
assert merge(cs1, cs2).changes == %{body: "bar", title: "foo"}
cs1 = cast(%Post{}, %{title: "foo"}, ~w(title)a)
cs2 = cast(%Post{}, %{title: "bar"}, ~w(title)a)
changeset = merge(cs1, cs2)
assert changeset.valid?
assert changeset.params == %{"title" => "bar"}
assert changeset.changes == %{title: "bar"}
end
test "merge/2: merges errors" do
cs1 = cast(%Post{}, %{}, ~w(title)a) |> validate_required(:title)
cs2 = cast(%Post{}, %{}, ~w(title body)a) |> validate_required([:title, :body])
changeset = merge(cs1, cs2)
refute changeset.valid?
assert changeset.errors ==
[title: {"can't be blank", [validation: :required]}, body: {"can't be blank", [validation: :required]}]
end
test "merge/2: merges validations" do
cs1 = cast(%Post{}, %{title: "Title"}, ~w(title)a)
|> validate_length(:title, min: 1, max: 10)
cs2 = cast(%Post{}, %{body: "Body"}, ~w(body)a)
|> validate_format(:body, ~r/B/)
changeset = merge(cs1, cs2)
assert changeset.valid?
assert length(validations(changeset)) == 2
assert Enum.find(validations(changeset), &match?({:body, {:format, _}}, &1))
assert Enum.find(validations(changeset), &match?({:title, {:length, _}}, &1))
end
test "merge/2: repo opts" do
cs1 = %Post{} |> change() |> Map.put(:repo_opts, [a: 1, b: 2])
cs2 = %Post{} |> change() |> Map.put(:repo_opts, [b: 3, c: 4])
changeset = merge(cs1, cs2)
assert changeset.repo_opts == [a: 1, b: 3, c: 4]
end
test "merge/2: merges constraints" do
cs1 = cast(%Post{}, %{title: "Title"}, ~w(title)a)
|> unique_constraint(:title)
cs2 = cast(%Post{}, %{body: "Body"}, ~w(body)a)
|> unique_constraint(:body)
changeset = merge(cs1, cs2)
assert changeset.valid?
assert length(constraints(changeset)) == 2
end
test "merge/2: merges parameters" do
empty = cast(%Post{}, %{}, ~w(title)a)
cs1 = cast(%Post{}, %{body: "foo"}, ~w(body)a)
cs2 = cast(%Post{}, %{body: "bar"}, ~w(body)a)
assert merge(cs1, cs2).params == %{"body" => "bar"}
assert merge(cs1, empty).params == %{"body" => "foo"}
assert merge(empty, cs2).params == %{"body" => "bar"}
assert merge(empty, empty).params == %{}
end
test "merge/2: gives required fields precedence over optional ones" do
cs1 = cast(%Post{}, %{}, ~w(title)a) |> validate_required(:title)
cs2 = cast(%Post{}, %{}, ~w(title)a)
changeset = merge(cs1, cs2)
assert changeset.required == [:title]
end
test "merge/2: doesn't duplicate required or optional fields" do
cs1 = cast(%Post{}, %{}, ~w(title body)a) |> validate_required([:title, :body])
cs2 = cast(%Post{}, %{}, ~w(body title)a) |> validate_required([:body, :title])
changeset = merge(cs1, cs2)
assert Enum.sort(changeset.required) == [:body, :title]
end
test "merge/2: merges the :repo field when either one is nil" do
changeset = merge(%Ecto.Changeset{repo: :foo}, %Ecto.Changeset{repo: nil})
assert changeset.repo == :foo
changeset = merge(%Ecto.Changeset{repo: nil}, %Ecto.Changeset{repo: :bar})
assert changeset.repo == :bar
end
test "merge/2: merges the :action field when either one is nil" do
changeset = merge(%Ecto.Changeset{action: :insert}, %Ecto.Changeset{repo: nil})
assert changeset.action == :insert
changeset = merge(%Ecto.Changeset{action: nil}, %Ecto.Changeset{action: :update})
assert changeset.action == :update
end
test "merge/2: fails when the :data, :repo or :action field are not equal" do
cs1 = cast(%Post{title: "foo"}, %{}, ~w(title)a)
cs2 = cast(%Post{title: "bar"}, %{}, ~w(title)a)
assert_raise ArgumentError, "different :data when merging changesets", fn ->
merge(cs1, cs2)
end
assert_raise ArgumentError, "different repos (`:foo` and `:bar`) when merging changesets", fn ->
merge(%Ecto.Changeset{repo: :foo}, %Ecto.Changeset{repo: :bar})
end
assert_raise ArgumentError, "different actions (`:insert` and `:update`) when merging changesets", fn ->
merge(%Ecto.Changeset{action: :insert}, %Ecto.Changeset{action: :update})
end
end
test "change/2 with a struct" do
changeset = change(%Post{})
assert changeset.valid?
assert changeset.data == %Post{}
assert changeset.changes == %{}
changeset = change(%Post{body: "bar"}, body: "bar")
assert changeset.valid?
assert changeset.data == %Post{body: "bar"}
assert changeset.changes == %{}
changeset = change(%Post{body: "bar"}, %{body: "bar", title: "foo"})
assert changeset.valid?
assert changeset.data == %Post{body: "bar"}
assert changeset.changes == %{title: "foo"}
changeset = change(%Post{}, body: "bar")
assert changeset.valid?
assert changeset.data == %Post{}
assert changeset.changes == %{body: "bar"}
changeset = change(%Post{}, %{body: "bar"})
assert changeset.valid?
assert changeset.data == %Post{}
assert changeset.changes == %{body: "bar"}
end
test "change/2 with data and types" do
datatypes = {%{title: "hello"}, %{title: :string}}
changeset = change(datatypes)
assert changeset.valid?
assert changeset.data == %{title: "hello"}
assert changeset.changes == %{}
changeset = change(datatypes, title: "world")
assert changeset.valid?
assert changeset.data == %{title: "hello"}
assert changeset.changes == %{title: "world"}
assert apply_changes(changeset) == %{title: "world"}
changeset = change(datatypes, title: "hello")
assert changeset.valid?
assert changeset.data == %{title: "hello"}
assert changeset.changes == %{}
assert apply_changes(changeset) == %{title: "hello"}
end
test "change/2 with a changeset" do
base_changeset = cast(%Post{upvotes: 5}, %{title: "title"}, ~w(title)a)
assert change(base_changeset) == base_changeset
changeset = change(base_changeset, %{body: "body"})
assert changeset.changes == %{title: "title", body: "body"}
changeset = change(base_changeset, %{title: "new title"})
assert changeset.changes == %{title: "new title"}
changeset = change(base_changeset, title: "new title")
assert changeset.changes == %{title: "new title"}
changeset = change(base_changeset, body: nil)
assert changeset.changes == %{title: "title"}
changeset = change(base_changeset, %{upvotes: nil})
assert changeset.changes == %{title: "title", upvotes: nil}
changeset = change(base_changeset, %{upvotes: 5})
assert changeset.changes == %{title: "title"}
changeset = change(base_changeset, %{upvotes: 10})
assert changeset.changes == %{title: "title", upvotes: 10}
changeset = change(base_changeset, %{title: "new title", upvotes: 5})
assert changeset.changes == %{title: "new title"}
end
test "change/2 semantic comparison" do
post = %Post{decimal: Decimal.new("1.0")}
changeset = change(post, decimal: Decimal.new(1))
assert changeset.changes == %{}
end
test "change/2 with unknown field" do
post = %Post{}
assert_raise ArgumentError, ~r"unknown field `:unknown`", fn ->
change(post, unknown: Decimal.new(1))
end
end
test "change/2 with non-atom field" do
post = %Post{}
assert_raise ArgumentError, ~r"must be atoms, got: `\"bad\"`", fn ->
change(post, %{"bad" => 42})
end
end
test "fetch_field/2" do
changeset = changeset(%Post{body: "bar"}, %{"title" => "foo"})
assert fetch_field(changeset, :title) == {:changes, "foo"}
assert fetch_field(changeset, :body) == {:data, "bar"}
assert fetch_field(changeset, :other) == :error
end
test "fetch_field!/2" do
changeset = changeset(%Post{body: "bar"}, %{"title" => "foo"})
assert fetch_field!(changeset, :title) == "foo"
assert fetch_field!(changeset, :body) == "bar"
assert_raise KeyError, ~r/key :other not found in/, fn ->
fetch_field!(changeset, :other)
end
end
test "get_field/3" do
changeset = changeset(%Post{body: "bar"}, %{"title" => "foo"})
assert get_field(changeset, :title) == "foo"
assert get_field(changeset, :body) == "bar"
assert get_field(changeset, :body, "other") == "bar"
assert get_field(changeset, :other) == nil
assert get_field(changeset, :other, "other") == "other"
end
test "get_field/3 with associations" do
post = %Post{comments: [%Comment{}]}
changeset = change(post) |> put_assoc(:comments, [])
assert get_field(changeset, :comments) == []
end
test "fetch_change/2" do
changeset = changeset(%{"title" => "foo", "body" => nil, "upvotes" => nil})
assert fetch_change(changeset, :title) == {:ok, "foo"}
assert fetch_change(changeset, :body) == :error
assert fetch_change(changeset, :upvotes) == {:ok, nil}
end
test "fetch_change!/2" do
changeset = changeset(%{"title" => "foo", "body" => nil, "upvotes" => nil})
assert fetch_change!(changeset, :title) == "foo"
assert_raise KeyError, "key :body not found in: %{title: \"foo\", upvotes: nil}", fn ->
fetch_change!(changeset, :body)
end
assert fetch_change!(changeset, :upvotes) == nil
end
test "get_change/3" do
changeset = changeset(%{"title" => "foo", "body" => nil, "upvotes" => nil})
assert get_change(changeset, :title) == "foo"
assert get_change(changeset, :body) == nil
assert get_change(changeset, :body, "other") == "other"
assert get_change(changeset, :upvotes) == nil
assert get_change(changeset, :upvotes, "other") == nil
end
test "update_change/3" do
changeset =
changeset(%{"title" => "foo"})
|> update_change(:title, & &1 <> "bar")
assert changeset.changes.title == "foobar"
changeset =
changeset(%{"upvotes" => nil})
|> update_change(:upvotes, & &1 || 10)
assert changeset.changes.upvotes == 10
changeset =
changeset(%{})
|> update_change(:title, & &1 || "bar")
assert changeset.changes == %{}
changeset =
changeset(%Post{title: "mytitle"}, %{title: "MyTitle"})
|> update_change(:title, &String.downcase/1)
assert changeset.changes == %{}
end
test "put_change/3 and delete_change/2" do
base_changeset = change(%Post{upvotes: 5})
changeset = put_change(base_changeset, :title, "foo")
assert changeset.changes.title == "foo"
changeset = delete_change(changeset, :title)
assert changeset.changes == %{}
changeset = put_change(base_changeset, :title, "bar")
assert changeset.changes.title == "bar"
changeset = put_change(base_changeset, :body, nil)
assert changeset.changes == %{}
changeset = put_change(base_changeset, :upvotes, 5)
assert changeset.changes == %{}
changeset = put_change(changeset, :upvotes, 10)
assert changeset.changes.upvotes == 10
changeset = put_change(base_changeset, :upvotes, nil)
assert changeset.changes.upvotes == nil
end
test "force_change/3" do
changeset = change(%Post{upvotes: 5})
changeset = force_change(changeset, :title, "foo")
assert changeset.changes.title == "foo"
changeset = force_change(changeset, :title, "bar")
assert changeset.changes.title == "bar"
changeset = force_change(changeset, :upvotes, 5)
assert changeset.changes.upvotes == 5
end
test "apply_changes/1" do
post = %Post{}
category = %Category{name: "bar"}
assert post.title == ""
changeset = post
|> changeset(%{"title" => "foo"})
|> put_assoc(:category, category)
changed_post = apply_changes(changeset)
assert changed_post.__struct__ == post.__struct__
assert changed_post.title == "foo"
assert changed_post.category_id == category.id
end
describe "apply_action/2" do
test "valid changeset" do
post = %Post{}
assert post.title == ""
changeset = changeset(post, %{"title" => "foo"})
assert changeset.valid?
assert {:ok, changed_post} = apply_action(changeset, :update)
assert changed_post.__struct__ == post.__struct__
assert changed_post.title == "foo"
end
test "invalid changeset" do
changeset =
%Post{}
|> changeset(%{"title" => "foo"})
|> validate_length(:title, min: 10)
refute changeset.valid?
changeset_new_action = %Ecto.Changeset{changeset | action: :update}
assert {:error, ^changeset_new_action} = apply_action(changeset, :update)
end
test "invalid action" do
assert_raise ArgumentError, ~r/expected action to be an atom/, fn ->
%Post{}
|> changeset(%{})
|> apply_action("invalid_action")
end
end
end
describe "apply_action!/2" do
test "valid changeset" do
changeset = changeset(%Post{}, %{"title" => "foo"})
post = apply_action!(changeset, :update)
assert post.title == "foo"
end
test "invalid changeset" do
changeset =
%Post{}
|> changeset(%{"title" => "foo"})
|> validate_length(:title, min: 10)
assert_raise Ecto.InvalidChangesetError, fn ->
apply_action!(changeset, :update)
end
end
end
## Validations
test "add_error/3" do
changeset =
changeset(%{})
|> add_error(:foo, "bar")
assert changeset.errors == [foo: {"bar", []}]
changeset =
changeset(%{})
|> add_error(:foo, "bar", additional: "information")
assert changeset.errors == [foo: {"bar", [additional: "information"]}]
end
test "validate_change/3" do
# When valid
changeset =
changeset(%{"title" => "hello"})
|> validate_change(:title, fn :title, "hello" -> [] end)
assert changeset.valid?
assert changeset.errors == []
# When invalid with binary
changeset =
changeset(%{"title" => "hello"})
|> validate_change(:title, fn :title, "hello" -> [title: "oops"] end)
refute changeset.valid?
assert changeset.errors == [title: {"oops", []}]
# When invalid with tuple
changeset =
changeset(%{"title" => "hello"})
|> validate_change(:title, fn :title, "hello" -> [title: {"oops", type: "bar"}] end)
refute changeset.valid?
assert changeset.errors == [title: {"oops", type: "bar"}]
# When missing
changeset =
changeset(%{})
|> validate_change(:title, fn :title, "hello" -> [title: "oops"] end)
assert changeset.valid?
assert changeset.errors == []
# When nil
changeset =
changeset(%{"title" => nil})
|> validate_change(:title, fn :title, "hello" -> [title: "oops"] end)
assert changeset.valid?
assert changeset.errors == []
# When virtual
changeset =
changeset(%{"virtual" => "hello"})
|> validate_change(:virtual, fn :virtual, "hello" -> [] end)
assert changeset.valid?
assert changeset.errors == []
# When unknown field
assert_raise ArgumentError, ~r/unknown field :bad in/, fn ->
changeset(%{"title" => "hello"})
|> validate_change(:bad, fn _, _ -> [] end)
end
end
test "validate_change/4" do
changeset =
changeset(%{"title" => "hello"})
|> validate_change(:title, :oops, fn :title, "hello" -> [title: "oops"] end)
refute changeset.valid?
assert changeset.errors == [title: {"oops", []}]
assert validations(changeset) == [title: :oops]
changeset =
changeset(%{})
|> validate_change(:title, :oops, fn :title, "hello" -> [title: "oops"] end)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [title: :oops]
end
test "validate_required/2" do
# When valid
changeset =
changeset(%{"title" => "hello", "body" => "something"})
|> validate_required(:title)
assert changeset.valid?
assert changeset.errors == []
# When missing
changeset = changeset(%{}) |> validate_required(:title)
refute changeset.valid?
assert changeset.required == [:title]
assert changeset.errors == [title: {"can't be blank", [validation: :required]}]
# When nil
changeset =
changeset(%{title: nil, body: "\n"})
|> validate_required([:title, :body], message: "is blank")
refute changeset.valid?
assert changeset.required == [:title, :body]
assert changeset.changes == %{}
assert changeset.errors == [title: {"is blank", [validation: :required]}, body: {"is blank", [validation: :required]}]
# When :trim option is false
changeset = changeset(%{title: " "}) |> validate_required(:title, trim: false)
assert changeset.valid?
assert changeset.errors == []
changeset = changeset(%{color: <<12, 12, 12>>}) |> validate_required(:color, trim: false)
assert changeset.valid?
assert changeset.errors == []
# When unknown field
assert_raise ArgumentError, ~r/unknown field :bad in/, fn ->
changeset(%{"title" => "hello", "body" => "something"})
|> validate_required(:bad)
end
# When field is not an atom
assert_raise ArgumentError, ~r/expects field names to be atoms, got: `"title"`/, fn ->
changeset(%{"title" => "hello"})
|> validate_required("title")
end
# When field is nil
assert_raise FunctionClauseError, fn ->
changeset(%{"title" => "hello"})
|> validate_required(nil)
end
end
test "validate_format/3" do
changeset =
changeset(%{"title" => "foo@bar"})
|> validate_format(:title, ~r/@/)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [title: {:format, ~r/@/}]
changeset =
changeset(%{"title" => "foobar"})
|> validate_format(:title, ~r/@/)
refute changeset.valid?
assert changeset.errors == [title: {"has invalid format", [validation: :format]}]
assert validations(changeset) == [title: {:format, ~r/@/}]
changeset =
changeset(%{"title" => "foobar"})
|> validate_format(:title, ~r/@/, message: "yada")
assert changeset.errors == [title: {"yada", [validation: :format]}]
end
test "validate_inclusion/3" do
changeset =
changeset(%{"title" => "hello"})
|> validate_inclusion(:title, ~w(hello))
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [title: {:inclusion, ~w(hello)}]
changeset =
changeset(%{"title" => "hello"})
|> validate_inclusion(:title, ~w(world))
refute changeset.valid?
assert changeset.errors == [title: {"is invalid", [validation: :inclusion, enum: ~w(world)]}]
assert validations(changeset) == [title: {:inclusion, ~w(world)}]
changeset =
changeset(%{"title" => "hello"})
|> validate_inclusion(:title, ~w(world), message: "yada")
assert changeset.errors == [title: {"yada", [validation: :inclusion, enum: ~w(world)]}]
end
test "validate_subset/3" do
changeset =
changeset(%{"topics" => ["cat", "dog"]})
|> validate_subset(:topics, ~w(cat dog))
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [topics: {:subset, ~w(cat dog)}]
changeset =
changeset(%{"topics" => ["cat", "laptop"]})
|> validate_subset(:topics, ~w(cat dog))
refute changeset.valid?
assert changeset.errors == [topics: {"has an invalid entry", [validation: :subset, enum: ~w(cat dog)]}]
assert validations(changeset) == [topics: {:subset, ~w(cat dog)}]
changeset =
changeset(%{"topics" => ["laptop"]})
|> validate_subset(:topics, ~w(cat dog), message: "yada")
assert changeset.errors == [topics: {"yada", [validation: :subset, enum: ~w(cat dog)]}]
end
test "validate_exclusion/3" do
changeset =
changeset(%{"title" => "world"})
|> validate_exclusion(:title, ~w(hello))
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [title: {:exclusion, ~w(hello)}]
changeset =
changeset(%{"title" => "world"})
|> validate_exclusion(:title, ~w(world))
refute changeset.valid?
assert changeset.errors == [title: {"is reserved", [validation: :exclusion, enum: ~w(world)]}]
assert validations(changeset) == [title: {:exclusion, ~w(world)}]
changeset =
changeset(%{"title" => "world"})
|> validate_exclusion(:title, ~w(world), message: "yada")
assert changeset.errors == [title: {"yada", [validation: :exclusion, enum: ~w(world)]}]
end
test "validate_length/3 with string" do
changeset = changeset(%{"title" => "world"}) |> validate_length(:title, min: 3, max: 7)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [title: {:length, [min: 3, max: 7]}]
changeset = changeset(%{"title" => "world"}) |> validate_length(:title, min: 5, max: 5)
assert changeset.valid?
changeset = changeset(%{"title" => "world"}) |> validate_length(:title, is: 5)
assert changeset.valid?
changeset = changeset(%{"title" => "world"}) |> validate_length(:title, min: 6)
refute changeset.valid?
assert changeset.errors == [title: {"should be at least %{count} character(s)", count: 6, validation: :length, kind: :min, type: :string}]
changeset = changeset(%{"title" => "world"}) |> validate_length(:title, max: 4)
refute changeset.valid?
assert changeset.errors == [title: {"should be at most %{count} character(s)", count: 4, validation: :length, kind: :max, type: :string}]
changeset = changeset(%{"title" => "world"}) |> validate_length(:title, is: 10)
refute changeset.valid?
assert changeset.errors == [title: {"should be %{count} character(s)", count: 10, validation: :length, kind: :is, type: :string}]
changeset = changeset(%{"title" => "world"}) |> validate_length(:title, is: 10, message: "yada")
assert changeset.errors == [title: {"yada", count: 10, validation: :length, kind: :is, type: :string}]
changeset = changeset(%{"title" => "\u0065\u0301"}) |> validate_length(:title, max: 1)
assert changeset.valid?
changeset = changeset(%{"title" => "\u0065\u0301"}) |> validate_length(:title, max: 1, count: :codepoints)
refute changeset.valid?
assert changeset.errors == [title: {"should be at most %{count} character(s)", count: 1, validation: :length, kind: :max, type: :string}]
end
test "validate_length/3 with binary" do
changeset =
changeset(%{"body" => <<0, 1, 2, 3>>})
|> validate_length(:body, count: :bytes, min: 3, max: 7)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [body: {:length, [count: :bytes, min: 3, max: 7]}]
changeset =
changeset(%{"body" => <<0, 1, 2, 3, 4>>})
|> validate_length(:body, count: :bytes, min: 5, max: 5)
assert changeset.valid?
changeset =
changeset(%{"body" => <<0, 1, 2, 3, 4>>}) |> validate_length(:body, count: :bytes, is: 5)
assert changeset.valid?
changeset =
changeset(%{"body" => <<0, 1, 2, 3, 4>>}) |> validate_length(:body, count: :bytes, min: 6)
refute changeset.valid?
assert changeset.errors == [
body:
{"should be at least %{count} byte(s)", count: 6, validation: :length, kind: :min, type: :binary}
]
changeset =
changeset(%{"body" => <<0, 1, 2, 3, 4>>}) |> validate_length(:body, count: :bytes, max: 4)
refute changeset.valid?
assert changeset.errors == [
body: {"should be at most %{count} byte(s)", count: 4, validation: :length, kind: :max, type: :binary}
]
changeset =
changeset(%{"body" => <<0, 1, 2, 3, 4>>}) |> validate_length(:body, count: :bytes, is: 10)
refute changeset.valid?
assert changeset.errors == [
body: {"should be %{count} byte(s)", count: 10, validation: :length, kind: :is, type: :binary}
]
changeset =
changeset(%{"body" => <<0, 1, 2, 3, 4>>})
|> validate_length(:body, count: :bytes, is: 10, message: "yada")
assert changeset.errors == [body: {"yada", count: 10, validation: :length, kind: :is, type: :binary}]
end
test "validate_length/3 with list" do
changeset = changeset(%{"topics" => ["Politics", "Security", "Economy", "Elections"]}) |> validate_length(:topics, min: 3, max: 7)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [topics: {:length, [min: 3, max: 7]}]
changeset = changeset(%{"topics" => ["Politics", "Security"]}) |> validate_length(:topics, min: 2, max: 2)
assert changeset.valid?
changeset = changeset(%{"topics" => ["Politics", "Security", "Economy"]}) |> validate_length(:topics, is: 3)
assert changeset.valid?
changeset = changeset(%{"topics" => ["Politics", "Security"]}) |> validate_length(:topics, min: 6, foo: true)
refute changeset.valid?
assert changeset.errors == [topics: {"should have at least %{count} item(s)", count: 6, validation: :length, kind: :min, type: :list}]
changeset = changeset(%{"topics" => ["Politics", "Security", "Economy"]}) |> validate_length(:topics, max: 2)
refute changeset.valid?
assert changeset.errors == [topics: {"should have at most %{count} item(s)", count: 2, validation: :length, kind: :max, type: :list}]
changeset = changeset(%{"topics" => ["Politics", "Security"]}) |> validate_length(:topics, is: 10)
refute changeset.valid?
assert changeset.errors == [topics: {"should have %{count} item(s)", count: 10, validation: :length, kind: :is, type: :list}]
changeset = changeset(%{"topics" => ["Politics", "Security"]}) |> validate_length(:topics, is: 10, message: "yada")
assert changeset.errors == [topics: {"yada", count: 10, validation: :length, kind: :is, type: :list}]
end
test "validate_length/3 with associations" do
post = %Post{comments: [%Comment{id: 1}]}
changeset = change(post) |> put_assoc(:comments, []) |> validate_length(:comments, min: 1)
assert changeset.errors == [comments: {"should have at least %{count} item(s)", count: 1, validation: :length, kind: :min, type: :list}]
changeset = change(post) |> put_assoc(:comments, [%Comment{id: 2}, %Comment{id: 3}]) |> validate_length(:comments, max: 2)
assert changeset.valid?
end
test "validate_number/3" do
changeset = changeset(%{"upvotes" => 3})
|> validate_number(:upvotes, greater_than: 0)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [upvotes: {:number, [greater_than: 0]}]
# Single error
changeset = changeset(%{"upvotes" => -1})
|> validate_number(:upvotes, greater_than: 0)
refute changeset.valid?
assert changeset.errors == [upvotes: {"must be greater than %{number}", validation: :number, kind: :greater_than, number: 0}]
assert validations(changeset) == [upvotes: {:number, [greater_than: 0]}]
# Non equality error
changeset = changeset(%{"upvotes" => 1})
|> validate_number(:upvotes, not_equal_to: 1)
refute changeset.valid?
assert changeset.errors == [upvotes: {"must be not equal to %{number}", validation: :number, kind: :not_equal_to, number: 1}]
assert validations(changeset) == [upvotes: {:number, [not_equal_to: 1]}]
# Multiple validations
changeset = changeset(%{"upvotes" => 3})
|> validate_number(:upvotes, greater_than: 0, less_than: 100)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [upvotes: {:number, [greater_than: 0, less_than: 100]}]
# Multiple validations with multiple errors
changeset = changeset(%{"upvotes" => 3})
|> validate_number(:upvotes, greater_than: 100, less_than: 0)
refute changeset.valid?
assert changeset.errors == [upvotes: {"must be greater than %{number}", validation: :number, kind: :greater_than, number: 100}]
# Multiple validations with custom message errors
changeset = changeset(%{"upvotes" => 3})
|> validate_number(:upvotes, greater_than: 100, less_than: 0, message: "yada")
assert changeset.errors == [upvotes: {"yada", validation: :number, kind: :greater_than, number: 100}]
end
test "validate_number/3 with decimal" do
changeset = changeset(%{"decimal" => Decimal.new(1)})
|> validate_number(:decimal, greater_than: Decimal.new(-3))
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new(-3)})
|> validate_number(:decimal, less_than: Decimal.new(1))
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new(-1)})
|> validate_number(:decimal, equal_to: Decimal.new(-1))
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new(0)})
|> validate_number(:decimal, not_equal_to: Decimal.new(-1))
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new(-3)})
|> validate_number(:decimal, less_than_or_equal_to: Decimal.new(-1))
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new(-3)})
|> validate_number(:decimal, less_than_or_equal_to: Decimal.new(-3))
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new(-1)})
|> validate_number(:decimal, greater_than_or_equal_to: Decimal.new("-1.5"))
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new("1.5")})
|> validate_number(:decimal, greater_than_or_equal_to: Decimal.new("1.5"))
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new("4.9")})
|> validate_number(:decimal, greater_than_or_equal_to: 4.9)
assert changeset.valid?
changeset = changeset(%{"decimal" => Decimal.new(5)})
|> validate_number(:decimal, less_than: 4)
refute changeset.valid?
end
test "validate_number/3 with bad options" do
assert_raise ArgumentError, ~r"unknown option :min given to validate_number/3", fn ->
validate_number(changeset(%{"upvotes" => 1}), :upvotes, min: Decimal.new("1.5"))
end
end
test "validate_confirmation/3" do
changeset = changeset(%{"title" => "title", "title_confirmation" => "title"})
|> validate_confirmation(:title)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [{:title, {:confirmation, []}}]
changeset = changeset(%{"title" => "title"})
|> validate_confirmation(:title)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [{:title, {:confirmation, []}}]
changeset = changeset(%{"title" => "title"})
|> validate_confirmation(:title, required: false)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [{:title, {:confirmation, [required: false]}}]
changeset = changeset(%{"title" => "title"})
|> validate_confirmation(:title, required: true)
refute changeset.valid?
assert changeset.errors == [title_confirmation: {"can't be blank", [validation: :required]}]
assert validations(changeset) == [{:title, {:confirmation, [required: true]}}]
changeset = changeset(%{"title" => "title", "title_confirmation" => nil})
|> validate_confirmation(:title)
refute changeset.valid?
assert changeset.errors == [title_confirmation: {"does not match confirmation", [validation: :confirmation]}]
assert validations(changeset) == [{:title, {:confirmation, []}}]
changeset = changeset(%{"title" => "title", "title_confirmation" => "not title"})
|> validate_confirmation(:title)
refute changeset.valid?
assert changeset.errors == [title_confirmation: {"does not match confirmation", [validation: :confirmation]}]
assert validations(changeset) == [{:title, {:confirmation, []}}]
changeset = changeset(%{"title" => "title", "title_confirmation" => "not title"})
|> validate_confirmation(:title, message: "doesn't match field below")
refute changeset.valid?
assert changeset.errors == [title_confirmation: {"doesn't match field below", [validation: :confirmation]}]
assert validations(changeset) == [{:title, {:confirmation, [message: "doesn't match field below"]}}]
# Skip when no parameter
changeset = changeset(%{"title" => "title"})
|> validate_confirmation(:title, message: "password doesn't match")
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [{:title, {:confirmation, [message: "password doesn't match"]}}]
# With casting
changeset = changeset(%{"upvotes" => "1", "upvotes_confirmation" => "1"})
|> validate_confirmation(:upvotes)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [{:upvotes, {:confirmation, []}}]
# With blank change
changeset = changeset(%{"password" => "", "password_confirmation" => "password"})
|> validate_confirmation(:password)
refute changeset.valid?
assert changeset.errors == [password_confirmation: {"does not match confirmation", [validation: :confirmation]}]
# With missing change
changeset = changeset(%{"password_confirmation" => "password"})
|> validate_confirmation(:password)
refute changeset.valid?
assert changeset.errors == [password_confirmation: {"does not match confirmation", [validation: :confirmation]}]
# invalid params
changeset = changeset(:invalid)
|> validate_confirmation(:password)
refute changeset.valid?
assert changeset.errors == []
assert validations(changeset) == []
end
test "validate_acceptance/3" do
# accepted
changeset = changeset(%{"terms_of_service" => "true"})
|> validate_acceptance(:terms_of_service)
assert changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [terms_of_service: {:acceptance, []}]
# not accepted
changeset = changeset(%{"terms_of_service" => "false"})
|> validate_acceptance(:terms_of_service)
refute changeset.valid?
assert changeset.errors == [terms_of_service: {"must be accepted", [validation: :acceptance]}]
assert validations(changeset) == [terms_of_service: {:acceptance, []}]
changeset = changeset(%{"terms_of_service" => "other"})
|> validate_acceptance(:terms_of_service)
refute changeset.valid?
assert changeset.errors == [terms_of_service: {"must be accepted", [validation: :acceptance]}]
assert validations(changeset) == [terms_of_service: {:acceptance, []}]
# empty params
changeset = changeset(%{})
|> validate_acceptance(:terms_of_service)
refute changeset.valid?
assert changeset.errors == [terms_of_service: {"must be accepted", [validation: :acceptance]}]
assert validations(changeset) == [terms_of_service: {:acceptance, []}]
# invalid params
changeset = changeset(:invalid)
|> validate_acceptance(:terms_of_service)
refute changeset.valid?
assert changeset.errors == []
assert validations(changeset) == [terms_of_service: {:acceptance, []}]
# custom message
changeset = changeset(%{})
|> validate_acceptance(:terms_of_service, message: "must be abided")
refute changeset.valid?
assert changeset.errors == [terms_of_service: {"must be abided", [validation: :acceptance]}]
assert validations(changeset) == [terms_of_service: {:acceptance, [message: "must be abided"]}]
end
alias Ecto.TestRepo
describe "unsafe_validate_unique/4" do
setup do
dup_result = {1, [true]}
no_dup_result = {0, []}
base_changeset = changeset(%Post{}, %{"title" => "Hello World", "body" => "hi"})
[dup_result: dup_result, no_dup_result: no_dup_result, base_changeset: base_changeset]
end
defmodule MockRepo do
@moduledoc """
Allows tests to verify or refute that a query was run.
"""
def one(query, opts \\ []) do
send(self(), [__MODULE__, function: :one, query: query, opts: opts])
end
end
test "validates the uniqueness of a single field", context do
Process.put(:test_repo_all_results, context.dup_result)
changeset = unsafe_validate_unique(context.base_changeset, :title, TestRepo)
assert changeset.errors ==
[title: {"has already been taken", validation: :unsafe_unique, fields: [:title]}]
Process.put(:test_repo_all_results, context.no_dup_result)
changeset = unsafe_validate_unique(context.base_changeset, :title, TestRepo)
assert changeset.valid?
end
test "validates the uniqueness of a combination of fields", context do
Process.put(:test_repo_all_results, context.dup_result)
changeset = unsafe_validate_unique(context.base_changeset, [:title, :body], TestRepo)
assert changeset.errors ==
[
title:
{"has already been taken", validation: :unsafe_unique, fields: [:title, :body]}
]
Process.put(:test_repo_all_results, context.no_dup_result)
changeset = unsafe_validate_unique(context.base_changeset, [:title, :body], TestRepo)
assert changeset.valid?
end
test "does not validate uniqueness if there is any prior error on a field", context do
Process.put(:test_repo_all_results, context.dup_result)
changeset =
context.base_changeset
|> validate_length(:title, max: 3)
|> unsafe_validate_unique(:title, TestRepo)
refute changeset.valid?
assert changeset.errors == [title: {"should be at most %{count} character(s)", [count: 3, validation: :length, kind: :max, type: :string]}]
end
test "does not validate uniqueness if there is any prior error on a combination of fields", context do
Process.put(:test_repo_all_results, context.dup_result)
changeset =
context.base_changeset
|> validate_length(:title, max: 3)
|> unsafe_validate_unique([:title, :body], TestRepo)
refute changeset.valid?
assert changeset.errors == [title: {"should be at most %{count} character(s)", [count: 3, validation: :length, kind: :max, type: :string]}]
end
test "allows setting a custom error message", context do
Process.put(:test_repo_all_results, context.dup_result)
changeset =
unsafe_validate_unique(context.base_changeset, [:title], TestRepo, message: "is taken")
assert changeset.errors ==
[title: {"is taken", validation: :unsafe_unique, fields: [:title]}]
end
test "allows setting a custom error key", context do
Process.put(:test_repo_all_results, context.dup_result)
changeset =
unsafe_validate_unique(context.base_changeset, [:title], TestRepo, message: "is taken", error_key: :foo)
assert changeset.errors ==
[foo: {"is taken", validation: :unsafe_unique, fields: [:title]}]
end
test "accepts a prefix option" do
body_change = changeset(%Post{title: "Hello World", body: "hi"}, %{body: "ho"})
unsafe_validate_unique(body_change, :body, MockRepo, prefix: "my_prefix")
assert_receive [MockRepo, function: :one, query: %Ecto.Query{prefix: "my_prefix"}, opts: []]
end
test "accepts repo options" do
body_change = changeset(%Post{title: "Hello World", body: "hi"}, %{body: "ho"})
unsafe_validate_unique(body_change, :body, MockRepo, repo_opts: [tenant_id: 1])
assert_receive [MockRepo, function: :one, query: %Ecto.Query{}, opts: [tenant_id: 1]]
end
test "only queries the db when necessary" do
body_change = changeset(%Post{title: "Hello World", body: "hi"}, %{body: "ho"})
unsafe_validate_unique(body_change, :body, MockRepo)
assert_receive [MockRepo, function: :one, query: %Ecto.Query{}, opts: []]
unsafe_validate_unique(body_change, [:body, :title], MockRepo)
assert_receive [MockRepo, function: :one, query: %Ecto.Query{}, opts: []]
unsafe_validate_unique(body_change, :title, MockRepo)
# no overlap between changed fields and those required to be unique
refute_receive [MockRepo, function: :one, query: %Ecto.Query{}, opts: []]
end
end
## Locks
defp prepared_changes(changeset) do
Enum.reduce(changeset.prepare, changeset, & &1.(&2)).changes
end
test "optimistic_lock/3 with changeset with default incremeter" do
changeset = changeset(%{}) |> optimistic_lock(:upvotes)
assert changeset.filters == %{upvotes: 0}
assert changeset.changes == %{}
assert prepared_changes(changeset) == %{upvotes: 1}
changeset = changeset(%Post{upvotes: 2}, %{upvotes: 1}) |> optimistic_lock(:upvotes)
assert changeset.filters == %{upvotes: 1}
assert changeset.changes == %{upvotes: 1}
assert prepared_changes(changeset) == %{upvotes: 2}
# Assert default increment will rollover to 1 when the current one is equal or greater than 2_147_483_647
changeset = changeset(%Post{upvotes: 2_147_483_647}, %{}) |> optimistic_lock(:upvotes)
assert changeset.filters == %{upvotes: 2_147_483_647}
assert changeset.changes == %{}
assert prepared_changes(changeset) == %{upvotes: 1}
changeset = changeset(%Post{upvotes: 3_147_483_647}, %{}) |> optimistic_lock(:upvotes)
assert changeset.filters == %{upvotes: 3_147_483_647}
assert changeset.changes == %{}
assert prepared_changes(changeset) == %{upvotes: 1}
changeset = changeset(%Post{upvotes: 2_147_483_647}, %{upvotes: 2_147_483_648}) |> optimistic_lock(:upvotes)
assert changeset.filters == %{upvotes: 2_147_483_648}
assert changeset.changes == %{upvotes: 2_147_483_648}
assert prepared_changes(changeset) == %{upvotes: 1}
end
test "optimistic_lock/3 with struct" do
changeset = %Post{} |> optimistic_lock(:upvotes)
assert changeset.filters == %{upvotes: 0}
assert changeset.changes == %{}
assert prepared_changes(changeset) == %{upvotes: 1}
end
test "optimistic_lock/3 with custom incrementer" do
changeset = %Post{} |> optimistic_lock(:upvotes, &(&1 - 1))
assert changeset.filters == %{upvotes: 0}
assert changeset.changes == %{}
assert prepared_changes(changeset) == %{upvotes: -1}
end
## Constraints
test "check_constraint/3" do
changeset = change(%Post{}) |> check_constraint(:title, name: :title_must_be_short)
assert constraints(changeset) ==
[%{type: :check, field: :title, constraint: "title_must_be_short", match: :exact,
error_message: "is invalid", error_type: :check}]
changeset = change(%Post{}) |> check_constraint(:title, name: :title_must_be_short, message: "cannot be more than 15 characters")
assert constraints(changeset) ==
[%{type: :check, field: :title, constraint: "title_must_be_short", match: :exact,
error_message: "cannot be more than 15 characters", error_type: :check}]
assert_raise ArgumentError, ~r/invalid match type: :invalid/, fn ->
change(%Post{}) |> check_constraint(:title, name: :whatever, match: :invalid, message: "match is invalid")
end
assert_raise ArgumentError, ~r/supply the name/, fn ->
check_constraint(:title, message: "cannot be more than 15 characters")
end
end
test "unique_constraint/3" do
changeset = change(%Post{}) |> unique_constraint(:title)
assert constraints(changeset) ==
[%{type: :unique, field: :title, constraint: "posts_title_index", match: :exact,
error_message: "has already been taken", error_type: :unique}]
changeset = change(%Post{}) |> unique_constraint(:title, name: :whatever, message: "is taken")
assert constraints(changeset) ==
[%{type: :unique, field: :title, constraint: "whatever", match: :exact, error_message: "is taken", error_type: :unique}]
changeset = change(%Post{}) |> unique_constraint(:title, name: :whatever, match: :suffix, message: "is taken")
assert constraints(changeset) ==
[%{type: :unique, field: :title, constraint: "whatever", match: :suffix, error_message: "is taken", error_type: :unique}]
changeset = change(%Post{}) |> unique_constraint(:title, name: :whatever, match: :prefix, message: "is taken")
assert constraints(changeset) ==
[%{type: :unique, field: :title, constraint: "whatever", match: :prefix, error_message: "is taken", error_type: :unique}]
assert_raise ArgumentError, ~r/invalid match type: :invalid/, fn ->
change(%Post{}) |> unique_constraint(:title, name: :whatever, match: :invalid, message: "is taken")
end
end
test "unique_constraint/3 on field with :source" do
changeset = change(%Post{}) |> unique_constraint(:permalink)
assert constraints(changeset) ==
[%{type: :unique, field: :permalink, constraint: "posts_url_index", match: :exact,
error_message: "has already been taken", error_type: :unique}]
changeset = change(%Post{}) |> unique_constraint(:permalink, name: :whatever, message: "is taken")
assert constraints(changeset) ==
[%{type: :unique, field: :permalink, constraint: "whatever", match: :exact, error_message: "is taken", error_type: :unique}]
changeset = change(%Post{}) |> unique_constraint(:permalink, name: :whatever, match: :suffix, message: "is taken")
assert constraints(changeset) ==
[%{type: :unique, field: :permalink, constraint: "whatever", match: :suffix, error_message: "is taken", error_type: :unique}]
assert_raise ArgumentError, ~r/invalid match type: :invalid/, fn ->
change(%Post{}) |> unique_constraint(:permalink, name: :whatever, match: :invalid, message: "is taken")
end
end
test "unique_constraint/3 with multiple fields" do
changeset = change(%Post{}) |> unique_constraint([:permalink, :color])
assert constraints(changeset) ==
[%{type: :unique, field: :permalink, constraint: "posts_url_color_index", match: :exact,
error_message: "has already been taken", error_type: :unique}]
end
test "foreign_key_constraint/3" do
changeset = change(%Comment{}) |> foreign_key_constraint(:post_id)
assert constraints(changeset) ==
[%{type: :foreign_key, field: :post_id, constraint: "comments_post_id_fkey", match: :exact,
error_message: "does not exist", error_type: :foreign}]
changeset = change(%Comment{}) |> foreign_key_constraint(:post_id, name: :whatever, message: "is not available")
assert constraints(changeset) ==
[%{type: :foreign_key, field: :post_id, constraint: "whatever", match: :exact, error_message: "is not available", error_type: :foreign}]
end
test "foreign_key_constraint/3 on field with :source" do
changeset = change(%Post{}) |> foreign_key_constraint(:permalink)
assert constraints(changeset) ==
[%{type: :foreign_key, field: :permalink, constraint: "posts_url_fkey", match: :exact,
error_message: "does not exist", error_type: :foreign}]
changeset = change(%Post{}) |> foreign_key_constraint(:permalink, name: :whatever, message: "is not available")
assert constraints(changeset) ==
[%{type: :foreign_key, field: :permalink, constraint: "whatever", match: :exact, error_message: "is not available", error_type: :foreign}]
end
test "assoc_constraint/3" do
changeset = change(%Comment{}) |> assoc_constraint(:post)
assert constraints(changeset) ==
[%{type: :foreign_key, field: :post, constraint: "comments_post_id_fkey", match: :exact,
error_message: "does not exist", error_type: :assoc}]
changeset = change(%Comment{}) |> assoc_constraint(:post, name: :whatever, message: "is not available")
assert constraints(changeset) ==
[%{type: :foreign_key, field: :post, constraint: "whatever", match: :exact, error_message: "is not available", error_type: :assoc}]
end
test "assoc_constraint/3 on field with :source" do
changeset = change(%Post{}) |> assoc_constraint(:category)
assert constraints(changeset) ==
[%{type: :foreign_key, field: :category, constraint: "posts_category_id_fkey", match: :exact,
error_message: "does not exist", error_type: :assoc}]
changeset = change(%Post{}) |> assoc_constraint(:category, name: :whatever, message: "is not available")
assert constraints(changeset) ==
[%{type: :foreign_key, field: :category, constraint: "whatever", match: :exact, error_message: "is not available", error_type: :assoc}]
end
test "assoc_constraint/3 with errors" do
message = ~r"cannot add constraint to changeset because association `unknown` does not exist. Did you mean one of `category`, `comment`, `comments`?"
assert_raise ArgumentError, message, fn ->
change(%Post{}) |> assoc_constraint(:unknown)
end
message = ~r"assoc_constraint can only be added to belongs to associations"
assert_raise ArgumentError, message, fn ->
change(%Post{}) |> assoc_constraint(:comments)
end
end
test "no_assoc_constraint/3 with has_many" do
changeset = change(%Post{}) |> no_assoc_constraint(:comments)
assert constraints(changeset) ==
[%{type: :foreign_key, field: :comments, constraint: "comments_post_id_fkey", match: :exact,
error_message: "are still associated with this entry", error_type: :no_assoc}]
changeset = change(%Post{}) |> no_assoc_constraint(:comments, name: :whatever, message: "exists")
assert constraints(changeset) ==
[%{type: :foreign_key, field: :comments, constraint: "whatever", match: :exact,
error_message: "exists", error_type: :no_assoc}]
end
test "no_assoc_constraint/3 with has_one" do
changeset = change(%Post{}) |> no_assoc_constraint(:comment)
assert constraints(changeset) ==
[%{type: :foreign_key, field: :comment, constraint: "comments_post_id_fkey", match: :exact,
error_message: "is still associated with this entry", error_type: :no_assoc}]
changeset = change(%Post{}) |> no_assoc_constraint(:comment, name: :whatever, message: "exists")
assert constraints(changeset) ==
[%{type: :foreign_key, field: :comment, constraint: "whatever", match: :exact,
error_message: "exists", error_type: :no_assoc}]
end
test "no_assoc_constraint/3 with errors" do
message = ~r"cannot add constraint to changeset because association `unknown` does not exist"
assert_raise ArgumentError, message, fn ->
change(%Post{}) |> no_assoc_constraint(:unknown)
end
message = ~r"no_assoc_constraint can only be added to has one/many associations"
assert_raise ArgumentError, message, fn ->
change(%Comment{}) |> no_assoc_constraint(:post)
end
end
test "exclusion_constraint/3" do
changeset = change(%Post{}) |> exclusion_constraint(:title)
assert constraints(changeset) ==
[%{type: :exclusion, field: :title, constraint: "posts_title_exclusion", match: :exact,
error_message: "violates an exclusion constraint", error_type: :exclusion}]
changeset = change(%Post{}) |> exclusion_constraint(:title, name: :whatever, message: "is invalid")
assert constraints(changeset) ==
[%{type: :exclusion, field: :title, constraint: "whatever", match: :exact,
error_message: "is invalid", error_type: :exclusion}]
assert_raise ArgumentError, ~r/invalid match type: :invalid/, fn ->
change(%Post{}) |> exclusion_constraint(:title, name: :whatever, match: :invalid, message: "match is invalid")
end
end
## traverse_errors
test "traverses changeset errors" do
changeset =
changeset(%{"title" => "title", "body" => "hi", "upvotes" => :bad})
|> validate_length(:body, min: 3)
|> validate_format(:body, ~r/888/)
|> add_error(:title, "is taken", name: "your title")
errors = traverse_errors(changeset, fn
{"is invalid", [type: type, validation: :cast]} ->
"expected to be #{inspect(type)}"
{"is taken", keys} ->
String.upcase("#{keys[:name]} is taken")
{msg, keys} ->
msg
|> String.replace("%{count}", to_string(keys[:count]))
|> String.upcase()
end)
assert errors == %{
body: ["HAS INVALID FORMAT", "SHOULD BE AT LEAST 3 CHARACTER(S)"],
title: ["YOUR TITLE IS TAKEN"],
upvotes: ["expected to be :integer"],
}
end
test "traverses changeset errors with field" do
changeset =
changeset(%{"title" => "title", "body" => "hi", "upvotes" => :bad})
|> validate_length(:body, min: 3)
|> validate_format(:body, ~r/888/)
|> validate_inclusion(:body, ["hola", "bonjour", "hallo"])
|> add_error(:title, "is taken", name: "your title")
errors = traverse_errors(changeset, fn
%Ecto.Changeset{}, field, {_, [type: type, validation: :cast]} ->
"expected #{field} to be #{inspect(type)}"
%Ecto.Changeset{}, field, {_, [name: "your title"]} ->
"value in #{field} is taken"
|> String.upcase()
%Ecto.Changeset{}, field, {_, [count: 3, validation: :length, kind: :min, type: :string] = keys} ->
"should be at least #{keys[:count]} character(s) in field #{field}"
|> String.upcase()
%Ecto.Changeset{validations: validations}, field, {_, [validation: :format]} ->
validation = Keyword.get_values(validations, field)
"field #{field} should match format #{inspect validation[:format]}"
%Ecto.Changeset{validations: validations}, field, {_, [validation: :inclusion, enum: _]} ->
validation = Keyword.get_values(validations, field)
values = Enum.join(validation[:inclusion], ", ")
"#{field} value should be in #{values}"
end)
assert errors == %{
body: ["body value should be in hola, bonjour, hallo",
"field body should match format ~r/888/",
"SHOULD BE AT LEAST 3 CHARACTER(S) IN FIELD BODY"],
title: ["VALUE IN TITLE IS TAKEN"],
upvotes: ["expected upvotes to be :integer"],
}
end
## inspect
defmodule RedactedSchema do
use Ecto.Schema
schema "redacted_schema" do
field :password, :string, redact: true
field :username, :string
field :display_name, :string, redact: false
field :virtual_pass, :string, redact: true, virtual: true
end
end
describe "inspect" do
test "reveals relevant data" do
assert inspect(%Ecto.Changeset{}) ==
"#Ecto.Changeset<action: nil, changes: %{}, errors: [], data: nil, valid?: false>"
assert inspect(changeset(%{"title" => "title", "body" => "hi"})) ==
"#Ecto.Changeset<action: nil, changes: %{body: \"hi\", title: \"title\"}, " <>
"errors: [], data: #Ecto.ChangesetTest.Post<>, valid?: true>"
data = {%NoSchemaPost{title: "hello"}, %{title: :string, upvotes: :integer}}
params = %{"title" => "world", "upvotes" => "0"}
assert inspect(cast(data, params, ~w(title upvotes)a)) ==
"#Ecto.Changeset<action: nil, changes: %{title: \"world\", upvotes: 0}, " <>
"errors: [], data: #Ecto.ChangesetTest.NoSchemaPost<>, valid?: true>"
end
test "redacts fields marked redact: true" do
changeset = Ecto.Changeset.cast(%RedactedSchema{}, %{password: "hunter2"}, [:password])
refute inspect(changeset) =~ "hunter2"
assert inspect(changeset) =~ "**redacted**"
end
test "redacts virtual fields marked redact: true" do
changeset = Ecto.Changeset.cast(%RedactedSchema{}, %{virtual_pass: "hunter2"}, [:virtual_pass])
refute inspect(changeset) =~ "hunter2"
assert inspect(changeset) =~ "**redacted**"
end
test "doesn't redact fields without redacted (defaults to false)" do
changeset = Ecto.Changeset.cast(%RedactedSchema{}, %{username: "hunter2"}, [:username])
assert inspect(changeset) =~ "hunter2"
refute inspect(changeset) =~ "**redacted**"
end
test "doesn't redact fields marked redact: false" do
changeset = Ecto.Changeset.cast(%RedactedSchema{}, %{display_name: "hunter2"}, [:display_name])
assert inspect(changeset) =~ "hunter2"
refute inspect(changeset) =~ "**redacted**"
end
end
end
| 38.297105 | 167 | 0.634665 |
732a5a3aa7fa16bc6b6baef8bfc8f738598e86bc
| 411 |
ex
|
Elixir
|
lib/nkn_client/ws/supervisor.ex
|
termoose/nkn_client
|
f7b7ea2401d16ea6eeddb90317bd4231cc2f9c83
|
[
"MIT"
] | 2 |
2019-02-14T19:13:55.000Z
|
2020-01-21T14:18:30.000Z
|
lib/nkn_client/ws/supervisor.ex
|
termoose/nkn_client
|
f7b7ea2401d16ea6eeddb90317bd4231cc2f9c83
|
[
"MIT"
] | 2 |
2020-02-03T10:51:19.000Z
|
2020-04-12T10:13:23.000Z
|
lib/nkn_client/ws/supervisor.ex
|
termoose/nkn_client
|
f7b7ea2401d16ea6eeddb90317bd4231cc2f9c83
|
[
"MIT"
] | 1 |
2020-04-11T06:38:23.000Z
|
2020-04-11T06:38:23.000Z
|
defmodule NknClient.WS.Supervisor do
use Supervisor
alias NknClient.WS
alias NknClient.RPC
def start_link do
Supervisor.start_link(__MODULE__, :ok, name: __MODULE__)
end
def init(:ok) do
children = [
{WS.Client, "ws://#{RPC.get_address()}"},
{WS.MessageSink, []},
{WS.NodeInfo, :ok},
{WS, :ok}
]
Supervisor.init(children, strategy: :one_for_all)
end
end
| 19.571429 | 60 | 0.635036 |
732aa10eabeadd506af34bf4a86b8ad4e5119dcf
| 10,582 |
exs
|
Elixir
|
lib/elixir/test/elixir/module/types/infer_test.exs
|
DmitryKakurin/elixir
|
a5df6a5a830d4cff8b7c8da54342b66cab999e0f
|
[
"Apache-2.0"
] | 1 |
2020-01-14T18:44:56.000Z
|
2020-01-14T18:44:56.000Z
|
lib/elixir/test/elixir/module/types/infer_test.exs
|
DmitryKakurin/elixir
|
a5df6a5a830d4cff8b7c8da54342b66cab999e0f
|
[
"Apache-2.0"
] | null | null | null |
lib/elixir/test/elixir/module/types/infer_test.exs
|
DmitryKakurin/elixir
|
a5df6a5a830d4cff8b7c8da54342b66cab999e0f
|
[
"Apache-2.0"
] | 1 |
2018-01-09T20:10:59.000Z
|
2018-01-09T20:10:59.000Z
|
Code.require_file("../../test_helper.exs", __DIR__)
defmodule Module.Types.InferTest do
use ExUnit.Case, async: true
import Module.Types.Infer
alias Module.Types
defp unify_lift(left, right, context \\ new_context()) do
unify(left, right, new_stack(), context)
|> lift_result()
end
defp new_context() do
Types.head_context("types_test.ex", TypesTest, {:test, 0})
end
defp new_stack() do
%{
Types.head_stack()
| expr_stack: [{:foo, [], nil}]
}
end
defp unify(left, right, context) do
unify(left, right, new_stack(), context)
end
defp lift_result({:ok, type, context}) do
{:ok, Types.lift_type(type, context)}
end
defp lift_result({:error, {Types, reason, location}}) do
{:error, {reason, location}}
end
describe "unify/3" do
test "literal" do
assert unify_lift({:atom, :foo}, {:atom, :foo}) == {:ok, {:atom, :foo}}
assert {:error, {{:unable_unify, {:atom, :foo}, {:atom, :bar}, _, _}, _}} =
unify_lift({:atom, :foo}, {:atom, :bar})
end
test "type" do
assert unify_lift(:integer, :integer) == {:ok, :integer}
assert unify_lift(:binary, :binary) == {:ok, :binary}
assert unify_lift(:atom, :atom) == {:ok, :atom}
assert unify_lift(:boolean, :boolean) == {:ok, :boolean}
assert {:error, {{:unable_unify, :integer, :boolean, _, _}, _}} =
unify_lift(:integer, :boolean)
end
test "subtype" do
assert unify_lift(:boolean, :atom) == {:ok, :boolean}
assert unify_lift(:atom, :boolean) == {:ok, :boolean}
assert unify_lift(:boolean, {:atom, true}) == {:ok, {:atom, true}}
assert unify_lift({:atom, true}, :boolean) == {:ok, {:atom, true}}
assert unify_lift(:atom, {:atom, true}) == {:ok, {:atom, true}}
assert unify_lift({:atom, true}, :atom) == {:ok, {:atom, true}}
end
test "tuple" do
assert unify_lift({:tuple, []}, {:tuple, []}) == {:ok, {:tuple, []}}
assert unify_lift({:tuple, [:integer]}, {:tuple, [:integer]}) == {:ok, {:tuple, [:integer]}}
assert unify_lift({:tuple, [:boolean]}, {:tuple, [:atom]}) == {:ok, {:tuple, [:boolean]}}
assert {:error, {{:unable_unify, {:tuple, [:integer]}, {:tuple, []}, _, _}, _}} =
unify_lift({:tuple, [:integer]}, {:tuple, []})
assert {:error, {{:unable_unify, :integer, :atom, _, _}, _}} =
unify_lift({:tuple, [:integer]}, {:tuple, [:atom]})
end
test "list" do
assert unify_lift({:list, :integer}, {:list, :integer}) == {:ok, {:list, :integer}}
assert {:error, {{:unable_unify, :atom, :integer, _, _}, _}} =
unify_lift({:list, :atom}, {:list, :integer})
end
test "map" do
assert unify_lift({:map, []}, {:map, []}) == {:ok, {:map, []}}
assert unify_lift({:map, [{:integer, :atom}]}, {:map, []}) ==
{:ok, {:map, [{:integer, :atom}]}}
assert unify_lift({:map, []}, {:map, [{:integer, :atom}]}) ==
{:ok, {:map, [{:integer, :atom}]}}
assert unify_lift({:map, [{:integer, :atom}]}, {:map, [{:integer, :atom}]}) ==
{:ok, {:map, [{:integer, :atom}]}}
assert unify_lift({:map, [{:integer, :atom}]}, {:map, [{:atom, :integer}]}) ==
{:ok, {:map, [{:integer, :atom}, {:atom, :integer}]}}
assert unify_lift(
{:map, [{{:atom, :foo}, :boolean}]},
{:map, [{{:atom, :foo}, :atom}]}
) ==
{:ok, {:map, [{{:atom, :foo}, :boolean}]}}
assert {:error, {{:unable_unify, :integer, :atom, _, _}, _}} =
unify_lift(
{:map, [{{:atom, :foo}, :integer}]},
{:map, [{{:atom, :foo}, :atom}]}
)
end
test "union" do
assert unify_lift({:union, []}, {:union, []}) == {:ok, {:union, []}}
assert unify_lift({:union, [:integer]}, {:union, [:integer]}) == {:ok, {:union, [:integer]}}
assert unify_lift({:union, [:integer, :atom]}, {:union, [:integer, :atom]}) ==
{:ok, {:union, [:integer, :atom]}}
assert unify_lift({:union, [:integer, :atom]}, {:union, [:atom, :integer]}) ==
{:ok, {:union, [:integer, :atom]}}
assert unify_lift({:union, [:atom]}, {:union, [:boolean]}) == {:ok, {:union, [:boolean]}}
assert unify_lift({:union, [:boolean]}, {:union, [:atom]}) == {:ok, {:union, [:boolean]}}
assert {:error, {{:unable_unify, {:union, [:integer]}, {:union, [:atom]}, _, _}, _}} =
unify_lift({:union, [:integer]}, {:union, [:atom]})
end
test "dynamic" do
assert unify_lift({:atom, :foo}, :dynamic) == {:ok, {:atom, :foo}}
assert unify_lift(:dynamic, {:atom, :foo}) == {:ok, {:atom, :foo}}
assert unify_lift(:integer, :dynamic) == {:ok, :integer}
assert unify_lift(:dynamic, :integer) == {:ok, :integer}
end
test "vars" do
assert {{:var, 0}, var_context} = new_var({:foo, [version: 0], nil}, new_context())
assert {{:var, 1}, var_context} = new_var({:bar, [version: 1], nil}, var_context)
assert {:ok, {:var, 0}, context} = unify({:var, 0}, :integer, var_context)
assert Types.lift_type({:var, 0}, context) == :integer
assert {:ok, {:var, 0}, context} = unify(:integer, {:var, 0}, var_context)
assert Types.lift_type({:var, 0}, context) == :integer
assert {:ok, {:var, _}, context} = unify({:var, 0}, {:var, 1}, var_context)
assert {:var, _} = Types.lift_type({:var, 0}, context)
assert {:var, _} = Types.lift_type({:var, 1}, context)
assert {:ok, {:var, 0}, context} = unify({:var, 0}, :integer, var_context)
assert {:ok, {:var, 1}, context} = unify({:var, 1}, :integer, context)
assert {:ok, {:var, _}, context} = unify({:var, 0}, {:var, 1}, context)
assert {:ok, {:var, 0}, context} = unify({:var, 0}, :integer, var_context)
assert {:ok, {:var, 1}, context} = unify({:var, 1}, :integer, context)
assert {:ok, {:var, _}, context} = unify({:var, 1}, {:var, 0}, context)
assert {:ok, {:var, 0}, context} = unify({:var, 0}, :integer, var_context)
assert {:ok, {:var, 1}, context} = unify({:var, 1}, :binary, context)
assert {:error, {{:unable_unify, :integer, :binary, _, _}, _}} =
unify_lift({:var, 0}, {:var, 1}, context)
assert {:ok, {:var, 0}, context} = unify({:var, 0}, :integer, var_context)
assert {:ok, {:var, 1}, context} = unify({:var, 1}, :binary, context)
assert {:error, {{:unable_unify, :binary, :integer, _, _}, _}} =
unify_lift({:var, 1}, {:var, 0}, context)
end
test "vars inside tuples" do
assert {{:var, 0}, var_context} = new_var({:foo, [version: 0], nil}, new_context())
assert {{:var, 1}, var_context} = new_var({:bar, [version: 1], nil}, var_context)
assert {:ok, {:tuple, [{:var, 0}]}, context} =
unify({:tuple, [{:var, 0}]}, {:tuple, [:integer]}, var_context)
assert Types.lift_type({:var, 0}, context) == :integer
assert {:ok, {:var, 0}, context} = unify({:var, 0}, :integer, var_context)
assert {:ok, {:var, 1}, context} = unify({:var, 1}, :integer, context)
assert {:ok, {:tuple, [{:var, _}]}, context} =
unify({:tuple, [{:var, 0}]}, {:tuple, [{:var, 1}]}, context)
assert {:ok, {:var, 1}, context} = unify({:var, 1}, {:tuple, [{:var, 0}]}, var_context)
assert {:ok, {:var, 0}, context} = unify({:var, 0}, :integer, context)
assert Types.lift_type({:var, 1}, context) == {:tuple, [:integer]}
assert {:ok, {:var, 0}, context} = unify({:var, 0}, :integer, var_context)
assert {:ok, {:var, 1}, context} = unify({:var, 1}, :binary, context)
assert {:error, {{:unable_unify, :integer, :binary, _, _}, _}} =
unify_lift({:tuple, [{:var, 0}]}, {:tuple, [{:var, 1}]}, context)
end
# TODO: Vars inside unions
test "recursive type" do
assert {{:var, 0}, var_context} = new_var({:foo, [version: 0], nil}, new_context())
assert {{:var, 1}, var_context} = new_var({:bar, [version: 1], nil}, var_context)
assert {{:var, 2}, var_context} = new_var({:baz, [version: 2], nil}, var_context)
assert {:ok, {:var, _}, context} = unify({:var, 0}, {:var, 1}, var_context)
assert {:ok, {:var, _}, context} = unify({:var, 1}, {:var, 0}, context)
assert {:ok, {:var, _}, context} = unify({:var, 0}, {:var, 1}, var_context)
assert {:ok, {:var, _}, context} = unify({:var, 1}, {:var, 2}, context)
assert {:ok, {:var, _}, context} = unify({:var, 2}, {:var, 0}, context)
assert {:ok, {:var, _}, context} = unify({:var, 0}, {:var, 1}, var_context)
assert {:error, {{:unable_unify, {:var, 1}, {:tuple, [{:var, 0}]}, _, _}, _}} =
unify_lift({:var, 1}, {:tuple, [{:var, 0}]}, context)
assert {:ok, {:var, _}, context} = unify({:var, 0}, {:var, 1}, var_context)
assert {:ok, {:var, _}, context} = unify({:var, 1}, {:var, 2}, context)
assert {:error, {{:unable_unify, {:var, 2}, {:tuple, [{:var, 0}]}, _, _}, _}} =
unify_lift({:var, 2}, {:tuple, [{:var, 0}]}, context)
end
end
test "subtype?/3" do
assert subtype?({:atom, :foo}, :atom, new_context())
assert subtype?({:atom, true}, :boolean, new_context())
assert subtype?({:atom, true}, :atom, new_context())
assert subtype?(:boolean, :atom, new_context())
refute subtype?(:integer, :binary, new_context())
refute subtype?(:atom, {:atom, :foo}, new_context())
refute subtype?(:boolean, {:atom, true}, new_context())
refute subtype?(:atom, {:atom, true}, new_context())
refute subtype?(:atom, :boolean, new_context())
end
test "to_union/2" do
assert to_union([:atom], new_context()) == :atom
assert to_union([:integer, :integer], new_context()) == :integer
assert to_union([:boolean, :atom], new_context()) == :atom
assert to_union([{:atom, :foo}, :boolean, :atom], new_context()) == :atom
assert to_union([:binary, :atom], new_context()) == {:union, [:binary, :atom]}
assert to_union([:atom, :binary, :atom], new_context()) == {:union, [:atom, :binary]}
assert to_union([{:atom, :foo}, :binary, :atom], new_context()) ==
{:union, [:binary, :atom]}
assert {{:var, 0}, var_context} = new_var({:foo, [version: 0], nil}, new_context())
assert to_union([{:var, 0}], var_context) == {:var, 0}
assert to_union([{:tuple, [:integer]}, {:tuple, [:integer]}], new_context()) ==
{:tuple, [:integer]}
end
end
| 41.661417 | 98 | 0.534871 |
732aac3feb9283c89a4b41d3750abf4d468e6bbb
| 645 |
exs
|
Elixir
|
test/live_sup_web/live/welcome_live_test.exs
|
livesup-dev/livesup
|
eaf9ffc78d3043bd9e3408f0f4df26ed16eb8446
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
test/live_sup_web/live/welcome_live_test.exs
|
livesup-dev/livesup
|
eaf9ffc78d3043bd9e3408f0f4df26ed16eb8446
|
[
"Apache-2.0",
"MIT"
] | 3 |
2022-02-23T15:51:48.000Z
|
2022-03-14T22:52:43.000Z
|
test/live_sup_web/live/welcome_live_test.exs
|
livesup-dev/livesup
|
eaf9ffc78d3043bd9e3408f0f4df26ed16eb8446
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
defmodule LiveSupWeb.Test.Live.WelcomeLive do
use LiveSupWeb.ConnCase
import Phoenix.LiveViewTest
import LiveSup.Test.TeamsFixtures
defp create_teams(_) do
team_a = team_fixture(%{name: "Team A"})
team_b = team_fixture(%{name: "Team B"})
%{team_a: team_a, team_b: team_b}
end
describe "Index" do
@describetag :welcome
setup [:register_and_log_in_user, :create_teams]
test "lists all teams", %{conn: conn, team_a: team_a, team_b: team_b} do
{:ok, _index_live, html} = live(conn, Routes.welcome_path(conn, :teams))
assert html =~ team_a.name
assert html =~ team_b.name
end
end
end
| 24.807692 | 78 | 0.685271 |
732ae43a19aa485807716aa18bc76d836b415dd6
| 727 |
ex
|
Elixir
|
apps/note_it/lib/note_it/application.ex
|
sushilman/note-it
|
c47edafb4272b9f01d53f8566f5ed7858f2d355c
|
[
"Apache-2.0"
] | null | null | null |
apps/note_it/lib/note_it/application.ex
|
sushilman/note-it
|
c47edafb4272b9f01d53f8566f5ed7858f2d355c
|
[
"Apache-2.0"
] | null | null | null |
apps/note_it/lib/note_it/application.ex
|
sushilman/note-it
|
c47edafb4272b9f01d53f8566f5ed7858f2d355c
|
[
"Apache-2.0"
] | null | null | null |
defmodule NoteIt.Application do
# See http://elixir-lang.org/docs/stable/elixir/Application.html
# for more information on OTP Applications
@moduledoc false
use Application
def start(_type, _args) do
import Supervisor.Spec, warn: false
# Define workers and child supervisors to be supervised
children = [
# Starts a worker by calling: NoteIt.Worker.start_link(arg1, arg2, arg3)
# worker(NoteIt.Worker, [arg1, arg2, arg3]),
worker(NoteIt.Repo, [])
]
# See http://elixir-lang.org/docs/stable/elixir/Supervisor.html
# for other strategies and supported options
opts = [strategy: :one_for_one, name: NoteIt.Supervisor]
Supervisor.start_link(children, opts)
end
end
| 30.291667 | 78 | 0.708391 |
732af1e33cd7c1579fb8219bc0ea3fb63069c229
| 22,128 |
exs
|
Elixir
|
test/sanbase_web/graphql/billing/timeframe_access_restrictions/api_product_access_test.exs
|
santiment/sanbase2
|
9ef6e2dd1e377744a6d2bba570ea6bd477a1db31
|
[
"MIT"
] | 81 |
2017-11-20T01:20:22.000Z
|
2022-03-05T12:04:25.000Z
|
test/sanbase_web/graphql/billing/timeframe_access_restrictions/api_product_access_test.exs
|
rmoorman/sanbase2
|
226784ab43a24219e7332c49156b198d09a6dd85
|
[
"MIT"
] | 359 |
2017-10-15T14:40:53.000Z
|
2022-01-25T13:34:20.000Z
|
test/sanbase_web/graphql/billing/timeframe_access_restrictions/api_product_access_test.exs
|
rmoorman/sanbase2
|
226784ab43a24219e7332c49156b198d09a6dd85
|
[
"MIT"
] | 16 |
2017-11-19T13:57:40.000Z
|
2022-02-07T08:13:02.000Z
|
defmodule Sanbase.Billing.ApiProductAccessTest do
use SanbaseWeb.ConnCase, async: false
import Sanbase.Factory
import Sanbase.TestHelpers
import SanbaseWeb.Graphql.TestHelpers
import Mock
alias Sanbase.Accounts.Apikey
alias Sanbase.Metric
alias Sanbase.Signal
@product "SANAPI"
setup_all_with_mocks([
{Sanbase.Price, [], [timeseries_data: fn _, _, _, _ -> price_resp() end]},
{Sanbase.Metric, [:passthrough], [timeseries_data: fn _, _, _, _, _, _ -> metric_resp() end]},
{Sanbase.Signal, [:passthrough], [timeseries_data: fn _, _, _, _, _, _ -> signal_resp() end]}
]) do
[]
end
setup do
user = insert(:user)
project = insert(:random_erc20_project)
{:ok, apikey} = Apikey.generate_apikey(user)
conn = setup_apikey_auth(build_conn(), apikey)
[user: user, conn: conn, project: project]
end
describe "SanAPI product, No subscription" do
test "can access FREE metrics for all time", context do
{from, to} = from_to(2500, 0)
metric = get_free_timeseries_element(context.next_integer.(), @product, :metric)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access FREE queries for all time", context do
{from, to} = from_to(2500, 0)
slug = context.project.slug
query = history_price_query(slug, from, to)
result = execute_query(context.conn, query, "historyPrice")
assert_called(Sanbase.Price.timeseries_data(slug, from, to, :_))
assert result != nil
end
test "can access FREE signals for all time", context do
{from, to} = from_to(2500, 0)
signal = get_free_timeseries_element(context.next_integer.(), @product, :signal)
slug = context.project.slug
query = signal_query(signal, slug, from, to)
result = execute_query(context.conn, query, "getSignal")
assert_called(Signal.timeseries_data(signal, :_, from, to, :_, :_))
assert result != nil
end
test "cannot access RESTRICTED metrics for over 2 years", context do
{from, to} = from_to(2 * 365 + 1, 31)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :free)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, :_, :_, :_, :_))
refute called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "cannot access RESTRICTED queries for over 2 years", context do
{from, to} = from_to(2 * 365 + 1, 31)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
refute called(Metric.timeseries_data("network_growth", :_, from, to, :_, :_))
assert result != nil
end
test "cannot access RESTRICTED queries last 30 days", context do
{from, to} = from_to(31, 29)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
refute called(Metric.timeseries_data("network_growth", :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED metrics within 2 years and 30 days interval", context do
{from, to} = from_to(2 * 365, 31)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :free)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED queries within 2 years and 30 days interval", context do
{from, to} = from_to(2 * 365, 31)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
assert_called(Metric.timeseries_data("network_growth", :_, :_, :_, :_, :_))
assert result != nil
end
end
describe "SanAPI product, user with BASIC plan" do
setup context do
insert(:subscription_essential, user: context.user)
:ok
end
test "can access FREE metrics for all time", context do
{from, to} = from_to(2500, 0)
metric = get_free_timeseries_element(context.next_integer.(), @product, :metric)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access FREE queries for all time", context do
{from, to} = from_to(2500, 0)
slug = context.project.slug
query = history_price_query(slug, from, to)
result = execute_query(context.conn, query, "historyPrice")
assert_called(Sanbase.Price.timeseries_data(slug, from, to, :_))
assert result != nil
end
test "can access FREE signals for all time", context do
{from, to} = from_to(2500, 0)
signal = get_free_timeseries_element(context.next_integer.(), @product, :signal)
slug = context.project.slug
query = signal_query(signal, slug, from, to)
result = execute_query(context.conn, query, "getSignal")
assert_called(Signal.timeseries_data(signal, :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED metrics for less than 2 years", context do
{from, to} = from_to(2 * 365 - 1, 2 * 365 - 2)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :basic)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED queries for less than 2 years", context do
{from, to} = from_to(2 * 365 - 1, 2 * 365 - 2)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
assert_called(Metric.timeseries_data("network_growth", :_, :_, :_, :_, :_))
assert result != nil
end
test "cannot access RESTRICTED queries for more than 2 years", context do
{from, to} = from_to(2 * 365 + 1, 2 * 365 - 1)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
refute called(Metric.timeseries_data("network_growth", :_, from, to, :_, :_))
assert result != nil
end
test "cannot access RESTRICTED metrics for more than 2 years", context do
{from, to} = from_to(2 * 365 + 1, 2 * 365 - 1)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :basic)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, :_, :_, :_, :_))
refute called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "cannot access RESTRICTED metrics for more than 2 years - both params outside allowed",
context do
{from, to} = from_to(2 * 365 - 10, 2 * 365 - 2)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :basic)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query_with_error(context.conn, query, "getMetric")
refute called(Metric.timeseries_data(metric, :_, :_, :_, :_, :_))
assert result != nil
end
test "can access RESTRICTED metrics realtime", context do
{from, to} = from_to(10, 0)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :basic)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED queries realtime", context do
{from, to} = from_to(10, 0)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
assert_called(Metric.timeseries_data("network_growth", :_, :_, :_, :_, :_))
assert result != nil
end
test "can't access metric with min plan PRO", context do
{from, to} = from_to(2 * 365 - 1, 2 * 365 - 2)
metric = "withdrawal_balance"
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
error_message = execute_query_with_error(context.conn, query, "getMetric")
refute called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert error_message ==
"""
The metric #{metric} is not accessible with the currently used
Sanapi Basic subscription. Please upgrade to Sanapi Pro subscription.
If you have a subscription for one product but attempt to fetch data using
another product, this error will still be shown. The data on Sanbase cannot
be fetched with a Sanapi subscription and vice versa.
"""
end
# test "can't access signal with min plan PRO", context do
# {from, to} = from_to(2 * 365 - 1, 2 * 365 - 2)
# signal = restricted_signal_for_plan(context.next_integer.(), @product, :pro)
# slug = context.project.slug
# query = signal_query(signal, slug, from, to)
# error_message = execute_query_with_error(context.conn, query, "getSignal")
# refute called(Signal.timeseries_data(signal, :_, from, to, :_, :_))
# assert error_message ==
# """
# The signal #{signal} is not accessible with the currently used
# Sanapi Basic subscription. Please upgrade to Sanapi Pro subscription.
# If you have a subscription for one product but attempt to fetch data using
# another product, this error will still be shown. The data on Sanbase cannot
# be fetched with a Sanapi subscription and vice versa.
# """
# end
test "some metrics can be accessed only with free timeframe", context do
{from, to} = from_to(89, 2)
metric = "active_deposits"
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, :_, :_, :_, :_))
assert result != nil
end
end
describe "SanAPI product, user with PRO plan" do
setup context do
insert(:subscription_pro, user: context.user)
:ok
end
test "can access FREE metrics for all time", context do
{from, to} = from_to(2500, 0)
metric = get_free_timeseries_element(context.next_integer.(), @product, :metric)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access FREE queries for all time", context do
{from, to} = from_to(2500, 0)
slug = context.project.slug
query = history_price_query(slug, from, to)
result = execute_query(context.conn, query, "historyPrice")
assert_called(Sanbase.Price.timeseries_data(slug, from, to, :_))
assert result != nil
end
test "can access FREE signals for all time", context do
{from, to} = from_to(2500, 0)
signal = get_free_timeseries_element(context.next_integer.(), @product, :signal)
slug = context.project.slug
query = signal_query(signal, slug, from, to)
result = execute_query(context.conn, query, "getSignal")
assert_called(Signal.timeseries_data(signal, :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED metrics for less than 7 years", context do
{from, to} = from_to(7 * 365 - 1, 7 * 365 - 2)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :pro)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
# test "can access RESTRICTED signals for less than 7 years", context do
# {from, to} = from_to(7 * 365 - 1, 7 * 365 - 2)
# signal = restricted_signal_for_plan(context.next_integer.(), @product, :pro)
# slug = context.project.slug
# query = signal_query(signal, slug, from, to)
# result = execute_query(context.conn, query, "getSignal")
# assert called(Signal.timeseries_data(signal, :_, from, to, :_, :_))
# assert result != nil
# end
test "can access RESTRICTED queries for less than 7 years", context do
{from, to} = from_to(7 * 365 - 1, 7 * 365 - 2)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
assert_called(Metric.timeseries_data("network_growth", :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED metrics for over 7 years", context do
{from, to} = from_to(7 * 365 + 1, 7 * 365 - 1)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :pro)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
# test "can access RESTRICTED signals for over 7 years", context do
# {from, to} = from_to(7 * 365 + 1, 7 * 365 - 1)
# signal = restricted_signal_for_plan(context.next_integer.(), @product, :pro)
# slug = context.project.slug
# query = signal_query(signal, slug, from, to)
# result = execute_query(context.conn, query, "getSignal")
# assert_called(Signal.timeseries_data(signal, :_, from, to, :_, :_))
# assert result != nil
# end
test "can access RESTRICTED queries for more than 7 years", context do
{from, to} = from_to(7 * 365 + 1, 7 * 365 - 1)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
assert_called(Metric.timeseries_data("network_growth", :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED metrics realtime", context do
{from, to} = from_to(10, 0)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :pro)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED queries realtime", context do
{from, to} = from_to(10, 0)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
assert_called(Metric.timeseries_data("network_growth", :_, from, to, :_, :_))
assert result != nil
end
# test "can access RESTRICTED signals realtime", context do
# {from, to} = from_to(10, 0)
# signal = restricted_signal_for_plan(context.next_integer.(), @product, :pro)
# slug = context.project.slug
# query = signal_query(signal, slug, from, to)
# result = execute_query(context.conn, query, "getSignal")
# assert called(Signal.timeseries_data(signal, :_, from, to, :_, :_))
# assert result != nil
# end
test "can access metric with min plan PRO", context do
{from, to} = from_to(7 * 365 + 1, 7 * 365 - 1)
metric = "mvrv_long_short_diff_usd"
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
end
describe "SanAPI product, user with CUSTOM plan" do
setup context do
insert(:subscription_custom, user: context.user)
:ok
end
test "can access FREE metrics for all time", context do
{from, to} = from_to(2500, 0)
metric = get_free_timeseries_element(context.next_integer.(), @product, :metric)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access FREE queries for all time", context do
{from, to} = from_to(2500, 0)
slug = context.project.slug
query = history_price_query(slug, from, to)
result = execute_query(context.conn, query, "historyPrice")
assert_called(Sanbase.Price.timeseries_data(slug, from, to, :_))
assert result != nil
end
test "can access FREE signals for all time", context do
{from, to} = from_to(2500, 0)
signal = get_free_timeseries_element(context.next_integer.(), @product, :signal)
slug = context.project.slug
query = signal_query(signal, slug, from, to)
result = execute_query(context.conn, query, "getSignal")
assert_called(Signal.timeseries_data(signal, :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED metrics for all time & realtime", context do
{from, to} = from_to(2500, 0)
metric = v2_restricted_metric_for_plan(context.next_integer.(), @product, :custom)
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
test "can access RESTRICTED queries for all time & realtime", context do
{from, to} = from_to(2500, 0)
query = network_growth_query(context.project.slug, from, to)
result = execute_query(context.conn, query, "networkGrowth")
assert_called(Metric.timeseries_data("network_growth", :_, :_, :_, :_, :_))
assert result != nil
end
test "can access holders distributions for all time & realtime", context do
{from, to} = from_to(2500, 0)
metric = "holders_distribution_0.01_to_0.1"
slug = context.project.slug
selector = %{slug: slug}
query = metric_query(metric, selector, from, to)
result = execute_query(context.conn, query, "getMetric")
assert_called(Metric.timeseries_data(metric, :_, from, to, :_, :_))
assert result != nil
end
end
# Private functions
defp metric_query(metric, selector, from, to) do
selector = extend_selector_with_required_fields(metric, selector)
"""
{
getMetric(metric: "#{metric}") {
timeseriesData(
selector: #{map_to_input_object_str(selector)}
from: "#{from}"
to: "#{to}"
interval: "30d"
includeIncompleteData: true){
datetime
value
}
}
}
"""
end
defp signal_query(signal, slug, from, to) do
"""
{
getSignal(signal: "#{signal}") {
timeseriesData(
slug: "#{slug}"
from: "#{from}"
to: "#{to}"
interval: "30d"){
datetime
value
}
}
}
"""
end
defp network_growth_query(slug, from, to) do
"""
{
networkGrowth(slug: "#{slug}", from: "#{from}", to: "#{to}", interval: "1d"){
datetime
newAddresses
}
}
"""
end
defp history_price_query(slug, from, to) do
"""
{
historyPrice(slug: "#{slug}", from: "#{from}", to: "#{to}", interval: "30d"){
datetime
priceUsd
}
}
"""
end
defp metric_resp() do
{:ok,
[
%{value: 10.0, datetime: ~U[2019-01-01 00:00:00Z]},
%{value: 20.0, datetime: ~U[2019-01-02 00:00:00Z]}
]}
end
defp signal_resp() do
{:ok,
[
%{value: 5.0, datetime: ~U[2020-01-01 00:00:00Z]},
%{value: 10.0, datetime: ~U[2020-01-02 00:00:00Z]}
]}
end
defp price_resp() do
{:ok,
[
%{
datetime: ~U[2019-01-01 00:00:00Z],
price_usd: 10,
price_btc: 0.1,
marketcap: 10_000,
marketcap_usd: 10_000,
volume: 500,
volume_usd: 500
},
%{
datetime: ~U[2019-01-02 00:00:00Z],
price_usd: 20,
price_btc: 0.2,
marketcap: 20_000,
marketcap_usd: 20_000,
volume: 2500,
volume_usd: 2500
}
]}
end
end
| 36.941569 | 98 | 0.632683 |
732b0f7e4f67d03aca6d0168e0119b95cb4d8701
| 1,653 |
ex
|
Elixir
|
clients/service_management/lib/google_api/service_management/v1/model/get_iam_policy_request.ex
|
MasashiYokota/elixir-google-api
|
975dccbff395c16afcb62e7a8e411fbb58e9ab01
|
[
"Apache-2.0"
] | null | null | null |
clients/service_management/lib/google_api/service_management/v1/model/get_iam_policy_request.ex
|
MasashiYokota/elixir-google-api
|
975dccbff395c16afcb62e7a8e411fbb58e9ab01
|
[
"Apache-2.0"
] | 1 |
2020-12-18T09:25:12.000Z
|
2020-12-18T09:25:12.000Z
|
clients/service_management/lib/google_api/service_management/v1/model/get_iam_policy_request.ex
|
MasashiYokota/elixir-google-api
|
975dccbff395c16afcb62e7a8e411fbb58e9ab01
|
[
"Apache-2.0"
] | 1 |
2020-10-04T10:12:44.000Z
|
2020-10-04T10:12:44.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.ServiceManagement.V1.Model.GetIamPolicyRequest do
@moduledoc """
Request message for `GetIamPolicy` method.
## Attributes
* `options` (*type:* `GoogleApi.ServiceManagement.V1.Model.GetPolicyOptions.t`, *default:* `nil`) - OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:options => GoogleApi.ServiceManagement.V1.Model.GetPolicyOptions.t()
}
field(:options, as: GoogleApi.ServiceManagement.V1.Model.GetPolicyOptions)
end
defimpl Poison.Decoder, for: GoogleApi.ServiceManagement.V1.Model.GetIamPolicyRequest do
def decode(value, options) do
GoogleApi.ServiceManagement.V1.Model.GetIamPolicyRequest.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.ServiceManagement.V1.Model.GetIamPolicyRequest do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 35.170213 | 183 | 0.760436 |
732b10df97ee353e04a3a8f6c74f280733106ca3
| 512 |
ex
|
Elixir
|
lib/dnsimple/whois_privacy_renewal.ex
|
patmaddox/dnsimple-elixir
|
99e7d178493e7900c3a89dd4d99d81597ed58020
|
[
"MIT"
] | null | null | null |
lib/dnsimple/whois_privacy_renewal.ex
|
patmaddox/dnsimple-elixir
|
99e7d178493e7900c3a89dd4d99d81597ed58020
|
[
"MIT"
] | 14 |
2021-02-19T07:09:55.000Z
|
2022-02-24T12:33:37.000Z
|
lib/dnsimple/whois_privacy_renewal.ex
|
littleairmada/dnsimple-elixir
|
a1b71a9c84d1a440f86199b8f48754e1c88ca19f
|
[
"MIT"
] | null | null | null |
defmodule Dnsimple.WhoisPrivacyRenewal do
@moduledoc """
Represents the whois privacy renewal of a domain.
See:
- https://developer.dnsimple.com/v2/registrar/whois-privacy/
"""
@type t :: %__MODULE__{
id: integer,
domain_id: integer,
whois_privacy_id: integer,
state: String.t,
enabled: boolean,
expires_on: String.t,
created_at: String.t,
updated_at: String.t,
}
defstruct ~w(id domain_id whois_privacy_id state enabled expires_on created_at updated_at)a
end
| 22.26087 | 93 | 0.707031 |
732b188c79a4ba4991c86b2bd053f172e6f618a4
| 985 |
exs
|
Elixir
|
projects/FizzBuzz/fizzbuzz_recursive_optimized.exs
|
Maultasche/LwmElixirProjects
|
4b962230c9b5b3cf6cc8b34ef2161ca6fde4412c
|
[
"MIT"
] | 38 |
2018-12-31T10:51:42.000Z
|
2022-03-25T18:18:10.000Z
|
projects/FizzBuzz/fizzbuzz_recursive_optimized.exs
|
Maultasche/LwmElixirProjects
|
4b962230c9b5b3cf6cc8b34ef2161ca6fde4412c
|
[
"MIT"
] | null | null | null |
projects/FizzBuzz/fizzbuzz_recursive_optimized.exs
|
Maultasche/LwmElixirProjects
|
4b962230c9b5b3cf6cc8b34ef2161ca6fde4412c
|
[
"MIT"
] | 6 |
2019-08-19T03:21:36.000Z
|
2021-07-16T09:34:49.000Z
|
defmodule FizzBuzz do
def perform_count(total_count) do
total_count
|> count([])
|> print()
end
#The base case: gets called when count is 0
defp count(0, output) do
output
end
#Gets called when the count is divisible by 3 and 5
defp count(count, output) when rem(count, 3) == 0 and rem(count, 5) == 0 do
count(count - 1, ["fizzbuzz" | output])
end
#Gets called when the count is divisible by 3
defp count(count, output) when rem(count, 3) == 0 do
count(count - 1, ["fizz" | output])
end
#Gets called when the count is divisible by 5
defp count(count, output) when rem(count, 5) == 0 do
count(count - 1, ["buzz" | output])
end
#Gets called when the count is not divisible by 3 or 5
defp count(count, output) do
count(count - 1, [Integer.to_string(count) | output])
end
#Prints out the contents of a list
defp print([head | tail]) do
IO.puts(head)
print(tail)
end
#Base case: prints out an empty list
defp print([]) do
end
end
| 22.906977 | 76 | 0.669036 |
732b35abc8eede1f98b478d7ebe134e31537e54e
| 1,206 |
exs
|
Elixir
|
mix.exs
|
lucaong/elixir_mlx90640
|
3f85856c2c799588e86bc753190e8fb16ae78e77
|
[
"Apache-2.0"
] | 9 |
2018-11-03T21:36:18.000Z
|
2020-11-19T06:04:25.000Z
|
mix.exs
|
lucaong/elixir_mlx90640
|
3f85856c2c799588e86bc753190e8fb16ae78e77
|
[
"Apache-2.0"
] | null | null | null |
mix.exs
|
lucaong/elixir_mlx90640
|
3f85856c2c799588e86bc753190e8fb16ae78e77
|
[
"Apache-2.0"
] | null | null | null |
defmodule Mlx90640.MixProject do
use Mix.Project
def project do
[
app: :elixir_mlx90640,
version: "0.1.8",
elixir: "~> 1.6",
start_permanent: Mix.env() == :prod,
compilers: [:elixir_make] ++ Mix.compilers,
aliases: aliases(),
package: package(),
source_url: "https://github.com/lucaong/elixir_mlx90640",
deps: deps(),
docs: [
main: "Mlx90640",
extras: ["README.md"]
]
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger]
]
end
defp aliases do
[clean: ["clean", "clean.make"]]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:elixir_make, "~> 0.4", runtime: false},
{:ex_doc, "~> 0.19", only: :dev, runtime: false}
]
end
defp package do
[
description: "An Elixir library to interface with the MLX90640 Far Infrared Thermal Sensor Array",
files: ["lib", "LICENSE", "mix.exs", "README.md", "src/*.cpp", "src/*.h", "src/linux/i2c-dev.h", "Makefile"],
maintainers: ["Luca Ongaro"],
licenses: ["Apache-2.0"],
links: %{}
]
end
end
| 23.647059 | 115 | 0.573798 |
732b475401306b2924eff3c4ee354d26117f826f
| 2,046 |
ex
|
Elixir
|
clients/content/lib/google_api/content/v2/model/order_refund.ex
|
matehat/elixir-google-api
|
c1b2523c2c4cdc9e6ca4653ac078c94796b393c3
|
[
"Apache-2.0"
] | 1 |
2018-12-03T23:43:10.000Z
|
2018-12-03T23:43:10.000Z
|
clients/content/lib/google_api/content/v2/model/order_refund.ex
|
matehat/elixir-google-api
|
c1b2523c2c4cdc9e6ca4653ac078c94796b393c3
|
[
"Apache-2.0"
] | null | null | null |
clients/content/lib/google_api/content/v2/model/order_refund.ex
|
matehat/elixir-google-api
|
c1b2523c2c4cdc9e6ca4653ac078c94796b393c3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Content.V2.Model.OrderRefund do
@moduledoc """
## Attributes
* `actor` (*type:* `String.t`, *default:* `nil`) - The actor that created the refund.
* `amount` (*type:* `GoogleApi.Content.V2.Model.Price.t`, *default:* `nil`) - The amount that is refunded.
* `creationDate` (*type:* `String.t`, *default:* `nil`) - Date on which the item has been created, in ISO 8601 format.
* `reason` (*type:* `String.t`, *default:* `nil`) - The reason for the refund.
* `reasonText` (*type:* `String.t`, *default:* `nil`) - The explanation of the reason.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:actor => String.t(),
:amount => GoogleApi.Content.V2.Model.Price.t(),
:creationDate => String.t(),
:reason => String.t(),
:reasonText => String.t()
}
field(:actor)
field(:amount, as: GoogleApi.Content.V2.Model.Price)
field(:creationDate)
field(:reason)
field(:reasonText)
end
defimpl Poison.Decoder, for: GoogleApi.Content.V2.Model.OrderRefund do
def decode(value, options) do
GoogleApi.Content.V2.Model.OrderRefund.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Content.V2.Model.OrderRefund do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 34.677966 | 122 | 0.689638 |
732b7645727c4729f4d83673a6b6521d87c2e90b
| 678 |
ex
|
Elixir
|
Microsoft.Azure.Management.EventHub/lib/microsoft/azure/management/event_hub/model/arm_disaster_recovery.ex
|
chgeuer/ex_microsoft_azure_management
|
99cd9f7f2ff1fdbe69ca5bac55b6e2af91ba3603
|
[
"Apache-2.0"
] | 4 |
2018-09-29T03:43:15.000Z
|
2021-04-01T18:30:46.000Z
|
Microsoft.Azure.Management.EventHub/lib/microsoft/azure/management/event_hub/model/arm_disaster_recovery.ex
|
chgeuer/ex_microsoft_azure_management
|
99cd9f7f2ff1fdbe69ca5bac55b6e2af91ba3603
|
[
"Apache-2.0"
] | null | null | null |
Microsoft.Azure.Management.EventHub/lib/microsoft/azure/management/event_hub/model/arm_disaster_recovery.ex
|
chgeuer/ex_microsoft_azure_management
|
99cd9f7f2ff1fdbe69ca5bac55b6e2af91ba3603
|
[
"Apache-2.0"
] | null | null | null |
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule Microsoft.Azure.Management.EventHub.Model.ArmDisasterRecovery do
@moduledoc """
Single item in List or Get Alias(Disaster Recovery configuration) operation
"""
@derive [Poison.Encoder]
defstruct [
:"id",
:"name",
:"type"
]
@type t :: %__MODULE__{
:"id" => String.t,
:"name" => String.t,
:"type" => String.t
}
end
defimpl Poison.Decoder, for: Microsoft.Azure.Management.EventHub.Model.ArmDisasterRecovery do
def decode(value, _options) do
value
end
end
| 22.6 | 93 | 0.69174 |
732bd33ce1e0a503451673a12e53501943c287fd
| 454 |
ex
|
Elixir
|
lib/kwtool/crawlers/upload_parser.ex
|
byhbt/kwtool
|
8958a160066e3e4c61806202af2563541f2261e3
|
[
"MIT"
] | 5 |
2021-12-14T08:18:24.000Z
|
2022-03-29T10:02:48.000Z
|
lib/kwtool/crawlers/upload_parser.ex
|
byhbt/kwtool
|
8958a160066e3e4c61806202af2563541f2261e3
|
[
"MIT"
] | 32 |
2021-03-21T16:32:18.000Z
|
2022-03-23T08:00:37.000Z
|
lib/kwtool/crawlers/upload_parser.ex
|
byhbt/kwtool
|
8958a160066e3e4c61806202af2563541f2261e3
|
[
"MIT"
] | 1 |
2021-06-03T17:22:16.000Z
|
2021-06-03T17:22:16.000Z
|
defmodule Kwtool.Crawlers.UploadParser do
alias NimbleCSV.RFC4180, as: CSV
def parse(%Plug.Upload{content_type: "text/csv"} = keyword_file) do
keyword_list =
keyword_file.path
|> File.stream!()
|> CSV.parse_stream(skip_headers: false)
|> Enum.to_list()
if length(keyword_list) > 0 do
{:ok, keyword_list}
else
{:error, :file_is_empty}
end
end
def parse(_), do: {:error, :file_is_invalid}
end
| 22.7 | 69 | 0.647577 |
732bdda7d6082f4bffdd50d52aaf45d49d851bc9
| 953 |
ex
|
Elixir
|
lib/graphql/resolvers/machine_translation.ex
|
isshindev/accent
|
ae4c13139b0a0dfd64ff536b94c940a4e2862150
|
[
"BSD-3-Clause"
] | 806 |
2018-04-07T20:40:33.000Z
|
2022-03-30T01:39:57.000Z
|
lib/graphql/resolvers/machine_translation.ex
|
isshindev/accent
|
ae4c13139b0a0dfd64ff536b94c940a4e2862150
|
[
"BSD-3-Clause"
] | 194 |
2018-04-07T13:49:37.000Z
|
2022-03-30T19:58:45.000Z
|
lib/graphql/resolvers/machine_translation.ex
|
isshindev/accent
|
ae4c13139b0a0dfd64ff536b94c940a4e2862150
|
[
"BSD-3-Clause"
] | 89 |
2018-04-09T13:55:49.000Z
|
2022-03-24T07:09:31.000Z
|
defmodule Accent.GraphQL.Resolvers.MachineTranslation do
require Ecto.Query
alias Ecto.Query
alias Accent.Scopes.Revision, as: RevisionScope
alias Accent.{
Language,
MachineTranslations,
Project,
Repo,
Revision
}
@spec translate_text(Project.t(), %{text: String.t(), source_language_slug: String.t(), target_language_slug: String.t()}, GraphQLContext.t()) :: nil
def translate_text(project, args, _info) do
source_language = slug_language(project.id, args.source_language_slug)
target_language = slug_language(project.id, args.target_language_slug)
{:ok, MachineTranslations.translate_text(args.text, source_language, target_language)}
end
defp slug_language(project_id, slug) do
revision =
Revision
|> RevisionScope.from_project(project_id)
|> Query.where(slug: ^slug)
|> Repo.one()
language = Repo.get_by(Language, slug: slug)
revision || language
end
end
| 27.228571 | 151 | 0.718783 |
732c0703b9200417833344b1175e644543a5fff1
| 17,909 |
ex
|
Elixir
|
lib/elixir/lib/record.ex
|
joearms/elixir
|
9a0f8107bd8bbd089acb96fe0041d61a05e88a9b
|
[
"Apache-2.0"
] | 4 |
2016-04-05T05:51:36.000Z
|
2019-10-31T06:46:35.000Z
|
lib/elixir/lib/record.ex
|
joearms/elixir
|
9a0f8107bd8bbd089acb96fe0041d61a05e88a9b
|
[
"Apache-2.0"
] | null | null | null |
lib/elixir/lib/record.ex
|
joearms/elixir
|
9a0f8107bd8bbd089acb96fe0041d61a05e88a9b
|
[
"Apache-2.0"
] | 5 |
2015-02-01T06:01:19.000Z
|
2019-08-29T09:02:35.000Z
|
defmodule Record do
@moduledoc """
Functions to define Elixir records
"""
@doc """
Extract record information from an Erlang file and
return the fields as a list of tuples.
## Examples
defrecord FileInfo, Record.extract(:file_info, from_lib: "kernel/include/file.hrl")
"""
def extract(name, opts) do
Record.Extractor.retrieve(name, opts)
end
@doc """
Main entry point for records definition. It defines a module
with the given `name` and the fields specified in `values`.
This is invoked directly by `Kernel.defrecord`, so check it
for more information and documentation.
"""
def defrecord(name, values, opts) do
block = Keyword.get(opts, :do, nil)
quote do
unquoted_values = unquote(values)
defmodule unquote(name) do
@moduledoc false
import Record.DSL
@record_fields []
@record_types []
# Reassign values to inner scope to
# avoid conflicts in nested records
values = unquoted_values
Record.deffunctions(values, __ENV__)
value = unquote(block)
Record.deftypes(values, @record_types, __ENV__)
value
end
end
end
@doc """
Import public record definition as a set of private macros (as defined by defrecordp/2)
## Usage
Record.import Record.Module, as: macro_name
## Example
defmodule Test do
Record.import File.Stat, as: :file_stat
def size(file_stat(size: size)), do: size
end
"""
defmacro import(module, as: name) do
quote do
Record.defmacros(unquote(name), unquote(module).__record__(:fields), __ENV__, unquote(module))
end
end
@doc """
Main entry point for private records definition. It defines
a set of macros with the given `name` and the fields specified
in `values`. This is invoked directly by `Kernel.defrecordp`,
so check it for more information and documentation.
"""
def defrecordp(name, fields) do
quote do
Record.defmacros(unquote(name), unquote(fields), __ENV__)
end
end
@doc """
Defines record functions skipping the module definition.
This is called directly by `defrecord`. It expects the record
values, a set of options and the module environment.
## Examples
defmodule CustomRecord do
Record.deffunctions [:name, :age], __ENV__
Record.deftypes [:name, :age], [name: :binary, age: :integer], __ENV__
end
"""
def deffunctions(values, env) do
values = lc value inlist values, do: convert_value(value)
escaped = Macro.escape(values)
contents = [
reflection(escaped),
initializer(escaped),
indexes(escaped),
conversions(values),
record_optimizable(),
updater(values),
accessors(values, 1),
switch_recorder()
]
contents = [quote(do: @record_fields unquote(escaped))|contents]
# Special case for bootstraping purposes
if env == Macro.Env do
Module.eval_quoted(env, contents, [], [])
else
Module.eval_quoted(env.module, contents, [], env.location)
end
end
@doc """
Defines types and specs for the record.
"""
def deftypes(values, types, env) do
types = types || []
values = lc value inlist values do
{ name, default } = convert_value(value)
{ name, default, find_spec(types, name) }
end
contents = [
core_specs(values),
accessor_specs(values, 1, [])
]
if env == Macro.Env do
Module.eval_quoted(env, contents, [], [])
else
Module.eval_quoted(env.module, contents, [], env.location)
end
end
@doc """
Defines macros for manipulating records. This is called
directly by `defrecordp`. It expects the macro name, the
record values and the environment.
## Examples
defmodule CustomRecord do
Record.defmacros :user, [:name, :age], __ENV__
end
"""
def defmacros(name, values, env, tag // nil) do
escaped = lc value inlist values do
{ key, value } = convert_value(value)
{ key, Macro.escape(value) }
end
contents = quote do
defmacrop unquote(name)() do
Record.access(unquote(tag) || __MODULE__, unquote(escaped), [], __CALLER__)
end
defmacrop unquote(name)(record) when is_tuple(record) do
Record.to_keywords(unquote(tag) || __MODULE__, unquote(escaped), record)
end
defmacrop unquote(name)(args) do
Record.access(unquote(tag) || __MODULE__, unquote(escaped), args, __CALLER__)
end
defmacrop unquote(name)(record, key) when is_atom(key) do
Record.get(unquote(tag) || __MODULE__, unquote(escaped), record, key)
end
defmacrop unquote(name)(record, args) do
Record.dispatch(unquote(tag) || __MODULE__, unquote(escaped), record, args, __CALLER__)
end
end
Module.eval_quoted(env.module, contents, [], env.location)
end
## Callbacks
# Store all optimizable fields in the record as well
@doc false
defmacro __before_compile__(_) do
quote do
def __record__(:optimizable), do: @record_optimizable
end
end
# Store fields that can be optimized and that cannot be
# optimized as they are overriden
@doc false
def __on_definition__(env, kind, name, args, _guards, _body) do
tuple = { name, length(args) }
module = env.module
functions = Module.get_attribute(module, :record_optimizable)
functions =
if kind in [:def] and Module.get_attribute(module, :record_optimized) do
[tuple|functions]
else
List.delete(functions, tuple)
end
Module.put_attribute(module, :record_optimizable, functions)
end
# Implements the access macro used by records.
# It returns a quoted expression that defines
# a record or a match in case the record is
# inside a match.
@doc false
def access(atom, fields, keyword, caller) do
unless is_keyword(keyword) do
raise "expected contents inside brackets to be a Keyword"
end
in_match = caller.in_match?
has_underscore_value = Keyword.has_key?(keyword, :_)
underscore_value = Keyword.get(keyword, :_, { :_, [], nil })
keyword = Keyword.delete keyword, :_
iterator = fn({field, default}, each_keyword) ->
new_fields =
case Keyword.has_key?(each_keyword, field) do
true -> Keyword.get(each_keyword, field)
false ->
case in_match or has_underscore_value do
true -> underscore_value
false -> Macro.escape(default)
end
end
{ new_fields, Keyword.delete(each_keyword, field) }
end
{ match, remaining } = :lists.mapfoldl(iterator, keyword, fields)
case remaining do
[] ->
quote do: { unquote_splicing([atom|match]) }
_ ->
keys = lc { key, _ } inlist remaining, do: key
raise "record #{inspect atom} does not have the keys: #{inspect keys}"
end
end
# Dispatch the call to either update or to_list depending on the args given.
@doc false
def dispatch(atom, fields, record, args, caller) do
if is_keyword(args) do
update(atom, fields, record, args, caller)
else
to_list(atom, fields, record, args)
end
end
# Implements the update macro defined by defmacros.
# It returns a quoted expression that represents
# the access given by the keywords.
@doc false
defp update(atom, fields, var, keyword, caller) do
unless is_keyword(keyword) do
raise "expected contents inside brackets to be a Keyword"
end
if caller.in_match? do
raise "cannot invoke update style macro inside match context"
end
Enum.reduce keyword, var, fn({ key, value }, acc) ->
index = find_index(fields, key, 0)
if index do
quote do
:erlang.setelement(unquote(index + 2), unquote(acc), unquote(value))
end
else
raise "record #{inspect atom} does not have the key: #{inspect key}"
end
end
end
# Implements the get macro defined by defmacros.
# It returns a quoted expression that represents
# getting the value of a given field.
@doc false
def get(atom, fields, var, key) do
index = find_index(fields, key, 0)
if index do
quote do
:erlang.element(unquote(index + 2), unquote(var))
end
else
raise "record #{inspect atom} does not have the key: #{inspect key}"
end
end
# Implements to_keywords macro defined by defmacros.
# It returns a quoted expression that represents
# converting record to keywords list.
@doc false
def to_keywords(_atom, fields, record) do
Enum.map fields,
fn { key, _default } ->
index = find_index(fields, key, 0)
quote do
{ unquote(key), :erlang.element(unquote(index + 2), unquote(record)) }
end
end
end
# Implements to_list macro defined by defmacros.
# It returns a quoted expression that represents
# extracting given fields from record.
@doc false
defp to_list(atom, fields, record, keys) do
Enum.map keys,
fn(key) ->
index = find_index(fields, key, 0)
if index do
quote do: :erlang.element(unquote(index + 2), unquote(record))
else
raise "record #{inspect atom} does not have the key: #{inspect key}"
end
end
end
## Function generation
# Define __record__/1 and __record__/2 as reflection functions
# that returns the record names and fields.
#
# Note that fields are *not* keywords. They are in the same
# order as given as parameter and reflects the order of the
# fields in the tuple.
#
# ## Examples
#
# defrecord FileInfo, atime: nil, mtime: nil
#
# FileInfo.__record__(:name) #=> FileInfo
# FileInfo.__record__(:fields) #=> [atime: nil, mtime: nil]
#
defp reflection(values) do
quote do
@doc false
def __record__(kind, _), do: __record__(kind)
@doc false
def __record__(:name), do: __MODULE__
def __record__(:fields), do: unquote(values)
end
end
# Define initializers methods. For a declaration like:
#
# defrecord FileInfo, atime: nil, mtime: nil
#
# It will define three methods:
#
# def new() do
# new([])
# end
#
# def new([]) do
# { FileInfo, nil, nil }
# end
#
# def new(opts) do
# { FileInfo, Keyword.get(opts, :atime), Keyword.get(opts, :mtime) }
# end
#
defp initializer(values) do
defaults = lc { _, value } inlist values, do: value
# For each value, define a piece of code that will receive
# an ordered dict of options (opts) and it will try to fetch
# the given key from the ordered dict, falling back to the
# default value if one does not exist.
selective = lc { k, v } inlist values do
quote do: Keyword.get(opts, unquote(k), unquote(v))
end
quote do
@doc false
def new(), do: new([])
@doc false
def new([]), do: { __MODULE__, unquote_splicing(defaults) }
def new(opts) when is_list(opts), do: { __MODULE__, unquote_splicing(selective) }
def new(tuple) when is_tuple(tuple), do: :erlang.setelement(1, tuple, __MODULE__)
end
end
# Define method to get index of a given key.
#
# Useful if you need to know position of the key for such applications as:
# - ets
# - mnesia
#
# For a declaration like:
#
# defrecord FileInfo, atime: nil, mtime: nil
#
# It will define following method:
#
# def __index__(:atime), do: 2
# def __index__(:mtime), do: 3
# def __index__(_), do: nil
#
defp indexes(values) do
quoted = lc { k, _ } inlist values do
index = find_index(values, k, 0)
quote do
@doc false
def __index__(unquote(k)), do: unquote(index + 1)
end
end
quote do
unquote(quoted)
@doc false
def __index__(_), do: nil
@doc false
def __index__(key, _), do: __index__(key)
end
end
# Define converters method(s). For a declaration like:
#
# defrecord FileInfo, atime: nil, mtime: nil
#
# It will define one method, to_keywords, which will return a Keyword
#
# [atime: nil, mtime: nil]
#
defp conversions(values) do
sorted = lc { k, _ } inlist values do
index = find_index(values, k, 0)
{ k, quote(do: :erlang.element(unquote(index + 2), record)) }
end
quote do
@doc false
def to_keywords(record) do
unquote(:orddict.from_list(sorted))
end
end
end
# Implement accessors. For a declaration like:
#
# defrecord FileInfo, atime: nil, mtime: nil
#
# It will define four methods:
#
# def atime(record) do
# elem(record, 1)
# end
#
# def mtime(record) do
# elem(record, 2)
# end
#
# def atime(value, record) do
# set_elem(record, 1, value)
# end
#
# def mtime(record) do
# set_elem(record, 2, value)
# end
#
# def atime(callback, record) do
# set_elem(record, 1, callback.(elem(record, 1)))
# end
#
# def mtime(callback, record) do
# set_elem(record, 2, callback.(elem(record, 2)))
# end
#
defp accessors([{ :__exception__, _ }|t], 1) do
accessors(t, 2)
end
defp accessors([{ key, _default }|t], i) do
update = binary_to_atom "update_" <> atom_to_binary(key)
contents = quote do
@doc false
def unquote(key)(record) do
:erlang.element(unquote(i + 1), record)
end
@doc false
def unquote(key)(value, record) do
:erlang.setelement(unquote(i + 1), record, value)
end
@doc false
def unquote(update)(function, record) do
:erlang.setelement(unquote(i + 1), record,
function.(:erlang.element(unquote(i + 1), record)))
end
end
[contents|accessors(t, i + 1)]
end
defp accessors([], _i) do
[]
end
# Define an updater method that receives a
# keyword list and updates the record.
defp updater(values) do
fields =
lc {key, _default} inlist values do
index = find_index(values, key, 1)
quote do
Keyword.get(keywords, unquote(key), elem(record, unquote(index)))
end
end
contents = quote do: { __MODULE__, unquote_splicing(fields) }
quote do
@doc false
def update([], record) do
record
end
def update(keywords, record) do
unquote(contents)
end
end
end
defp record_optimizable do
quote do
@record_optimized true
@record_optimizable []
@before_compile { unquote(__MODULE__), :__before_compile__ }
@on_definition { unquote(__MODULE__), :__on_definition__ }
end
end
defp switch_recorder do
quote do: @record_optimized false
end
## Types/specs generation
defp core_specs(values) do
types = lc { _, _, spec } inlist values, do: spec
options = if values == [], do: [], else: [options_specs(values)]
quote do
unless Kernel.Typespec.defines_type?(__MODULE__, :t, 0) do
@type t :: { __MODULE__, unquote_splicing(types) }
end
unless Kernel.Typespec.defines_type?(__MODULE__, :options, 0) do
@type options :: unquote(options)
end
@spec new :: t
@spec new(options | tuple) :: t
@spec to_keywords(t) :: options
@spec update(options, t) :: t
@spec __record__(:name) :: atom
@spec __record__(:fields) :: [{atom,any}]
@spec __index__(atom) :: non_neg_integer | nil
end
end
defp options_specs([{ k, _, v }|t]) do
:lists.foldl fn { k, _, v }, acc ->
{ :|, [], [{ k, v }, acc] }
end, { k, v }, t
end
defp accessor_specs([{ :__exception__, _, _ }|t], 1, acc) do
accessor_specs(t, 2, acc)
end
defp accessor_specs([{ key, _default, spec }|t], i, acc) do
update = binary_to_atom "update_" <> atom_to_binary(key)
contents = quote do
@spec unquote(key)(t) :: unquote(spec)
@spec unquote(key)(unquote(spec), t) :: t
@spec unquote(update)((unquote(spec) -> unquote(spec)), t) :: t
end
accessor_specs(t, i + 1, [contents | acc])
end
defp accessor_specs([], _i, acc), do: acc
## Helpers
defp is_keyword(list) when is_list(list), do: :lists.all(is_keyword_tuple(&1), list)
defp is_keyword(_), do: false
defp is_keyword_tuple({ x, _ }) when is_atom(x), do: true
defp is_keyword_tuple(_), do: false
defp convert_value(atom) when is_atom(atom), do: { atom, nil }
defp convert_value({ atom, other }) when is_atom(atom) and is_function(other), do:
raise ArgumentError, message: "record field default value #{inspect atom} cannot be a function"
defp convert_value({ atom, other }) when is_atom(atom) and (is_reference(other) or is_pid(other) or is_port(other)), do:
raise ArgumentError, message: "record field default value #{inspect atom} cannot be a reference, pid or port"
defp convert_value({ atom, _ } = tuple) when is_atom(atom), do: tuple
defp convert_value({ field, _ }), do:
raise ArgumentError, message: "record field name has to be an atom, got #{inspect field}"
defp find_index([{ k, _ }|_], k, i), do: i
defp find_index([{ _, _ }|t], k, i), do: find_index(t, k, i + 1)
defp find_index([], _k, _i), do: nil
defp find_spec(types, name) do
matches = lc { k, v } inlist types, name == k, do: v
case matches do
[h|_] -> h
_ -> quote do: term
end
end
end
defmodule Record.DSL do
@moduledoc false
@doc """
Defines the type for each field in the record.
Expects a keyword list.
"""
defmacro record_type(opts) when is_list(opts) do
escaped = lc { k, v } inlist opts, do: { k, Macro.escape(v) }
quote do
@record_types Keyword.merge(@record_types || [], unquote(escaped))
end
end
end
| 27.217325 | 122 | 0.627227 |
732c256449d71ce41cf91d47a26cc933d0c1a832
| 1,248 |
ex
|
Elixir
|
debian/watch.ex
|
mingw-deb/pdcurses
|
aa2dec102403fc95f433f22e856f5026557aba10
|
[
"X11"
] | null | null | null |
debian/watch.ex
|
mingw-deb/pdcurses
|
aa2dec102403fc95f433f22e856f5026557aba10
|
[
"X11"
] | null | null | null |
debian/watch.ex
|
mingw-deb/pdcurses
|
aa2dec102403fc95f433f22e856f5026557aba10
|
[
"X11"
] | null | null | null |
# Example watch control file for uscan
# Rename this file to "watch" and then you can run the "uscan" command
# to check for upstream updates and more.
# See uscan(1) for format
# Compulsory line, this is a version 4 file
version=4
# PGP signature mangle, so foo.tar.gz has foo.tar.gz.sig
#opts="pgpsigurlmangle=s%$%.sig%"
# HTTP site (basic)
#http://example.com/downloads.html \
# files/mingw-w64-pdcurses-([\d\.]+)\.tar\.gz debian uupdate
# Uncommment to examine a FTP server
#ftp://ftp.example.com/pub/mingw-w64-pdcurses-(.*)\.tar\.gz debian uupdate
# SourceForge hosted projects
# http://sf.net/mingw-w64-pdcurses/ mingw-w64-pdcurses-(.*)\.tar\.gz debian uupdate
# GitHub hosted projects
#opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)\.tar\.gz%<project>-$1.tar.gz%" \
# https://github.com/<user>/mingw-w64-pdcurses/tags \
# (?:.*?/)?v?(\d[\d.]*)\.tar\.gz debian uupdate
# PyPI
# https://pypi.debian.net/mingw-w64-pdcurses/mingw-w64-pdcurses-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))
# Direct Git
# opts="mode=git" http://git.example.com/mingw-w64-pdcurses.git \
# refs/tags/v([\d\.]+) debian uupdate
# Uncomment to find new files on GooglePages
# http://example.googlepages.com/foo.html mingw-w64-pdcurses-(.*)\.tar\.gz
| 32 | 112 | 0.679487 |
732c3527efd367d37d280b6c259eb866087648ef
| 7,393 |
ex
|
Elixir
|
lib/astarte_vmq_plugin.ex
|
matt-mazzucato/astarte_vmq_plugin
|
83b1b3e67fe749326b66f06ce57dfc120d549fdf
|
[
"Apache-2.0"
] | null | null | null |
lib/astarte_vmq_plugin.ex
|
matt-mazzucato/astarte_vmq_plugin
|
83b1b3e67fe749326b66f06ce57dfc120d549fdf
|
[
"Apache-2.0"
] | null | null | null |
lib/astarte_vmq_plugin.ex
|
matt-mazzucato/astarte_vmq_plugin
|
83b1b3e67fe749326b66f06ce57dfc120d549fdf
|
[
"Apache-2.0"
] | null | null | null |
#
# This file is part of Astarte.
#
# Copyright 2017 Ispirata Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
defmodule Astarte.VMQ.Plugin do
@moduledoc """
Documentation for Astarte.VMQ.Plugin.
"""
alias Astarte.VMQ.Plugin.Config
alias Astarte.VMQ.Plugin.AMQPClient
@max_rand trunc(:math.pow(2, 32) - 1)
def auth_on_register(_peer, _subscriber_id, :undefined, _password, _cleansession) do
# If it doesn't have a username we let someone else decide
:next
end
def auth_on_register(_peer, {mountpoint, _client_id}, username, _password, _cleansession) do
if !String.contains?(username, "/") do
# Not a device, let someone else decide
:next
else
subscriber_id = {mountpoint, username}
# TODO: we probably want some of these values to be configurable in some way
{:ok,
[
subscriber_id: subscriber_id,
max_inflight_messages: 100,
max_message_rate: 10000,
max_message_size: 65535,
retry_interval: 20000,
upgrade_qos: false
]}
end
end
def auth_on_publish(
_username,
{_mountpoint, client_id},
_qos,
topic_tokens,
_payload,
_isretain
) do
cond do
# Not a device, authorizing everything
!String.contains?(client_id, "/") ->
:ok
# Device auth
String.split(client_id, "/") == Enum.take(topic_tokens, 2) ->
:ok
true ->
{:error, :unauthorized}
end
end
def auth_on_subscribe(_username, {_mountpoint, client_id}, topics) do
if !String.contains?(client_id, "/") do
:ok
else
client_id_tokens = String.split(client_id, "/")
authorized_topics =
Enum.filter(topics, fn {topic_tokens, _qos} ->
client_id_tokens == Enum.take(topic_tokens, 2)
end)
case authorized_topics do
[] -> {:error, :unauthorized}
authorized_topics -> {:ok, authorized_topics}
end
end
end
def disconnect_client(client_id, discard_state) do
opts =
if discard_state do
[:do_cleanup]
else
[]
end
mountpoint = ''
subscriber_id = {mountpoint, client_id}
case :vernemq_dev_api.disconnect_by_subscriber_id(subscriber_id, opts) do
:ok ->
:ok
:not_found ->
{:error, :not_found}
end
end
def on_client_gone({_mountpoint, client_id}) do
publish_event(client_id, "disconnection", now_us_x10_timestamp())
end
def on_client_offline({_mountpoint, client_id}) do
publish_event(client_id, "disconnection", now_us_x10_timestamp())
end
def on_register({ip_addr, _port}, {_mountpoint, client_id}, _username) do
with [realm, device_id] <- String.split(client_id, "/") do
# Start the heartbeat
setup_heartbeat_timer(realm, device_id, self())
timestamp = now_us_x10_timestamp()
ip_string =
ip_addr
|> :inet.ntoa()
|> to_string()
publish_event(client_id, "connection", timestamp, x_astarte_remote_ip: ip_string)
else
# Not a device, ignoring it
_ ->
:ok
end
end
def on_publish(_username, {_mountpoint, client_id}, _qos, topic_tokens, payload, _isretain) do
with [realm, device_id] <- String.split(client_id, "/") do
timestamp = now_us_x10_timestamp()
case topic_tokens do
[^realm, ^device_id] ->
publish_introspection(realm, device_id, payload, timestamp)
[^realm, ^device_id, "control" | control_path_tokens] ->
control_path = "/" <> Enum.join(control_path_tokens, "/")
publish_control_message(realm, device_id, control_path, payload, timestamp)
[^realm, ^device_id, interface | path_tokens] ->
path = "/" <> Enum.join(path_tokens, "/")
publish_data(realm, device_id, interface, path, payload, timestamp)
end
else
# Not a device, ignoring it
_ ->
:ok
end
end
def handle_heartbeat(realm, device_id, session_pid) do
if Process.alive?(session_pid) do
publish_heartbeat(realm, device_id)
setup_heartbeat_timer(realm, device_id, session_pid)
else
# The session is not alive anymore, just stop
:ok
end
end
defp setup_heartbeat_timer(realm, device_id, session_pid) do
args = [realm, device_id, session_pid]
interval = Config.device_heartbeat_interval_ms() |> randomize_interval(0.25)
{:ok, _timer} = :timer.apply_after(interval, __MODULE__, :handle_heartbeat, args)
:ok
end
defp randomize_interval(interval, tolerance) do
multiplier = 1 + (tolerance * 2 * :random.uniform() - tolerance)
(interval * multiplier)
|> Float.round()
|> trunc()
end
defp publish_introspection(realm, device_id, payload, timestamp) do
publish(realm, device_id, payload, "introspection", timestamp)
end
defp publish_data(realm, device_id, interface, path, payload, timestamp) do
additional_headers = [x_astarte_interface: interface, x_astarte_path: path]
publish(realm, device_id, payload, "data", timestamp, additional_headers)
end
defp publish_control_message(realm, device_id, control_path, payload, timestamp) do
additional_headers = [x_astarte_control_path: control_path]
publish(realm, device_id, payload, "control", timestamp, additional_headers)
end
defp publish_event(client_id, event_string, timestamp, additional_headers \\ []) do
with [realm, device_id] <- String.split(client_id, "/") do
publish(realm, device_id, "", event_string, timestamp, additional_headers)
else
# Not a device, ignoring it
_ ->
:ok
end
end
defp publish_heartbeat(realm, device_id) do
timestamp = now_us_x10_timestamp()
publish(realm, device_id, "", "heartbeat", timestamp)
end
defp publish(realm, device_id, payload, event_string, timestamp, additional_headers \\ []) do
headers =
[
x_astarte_vmqamqp_proto_ver: 1,
x_astarte_realm: realm,
x_astarte_device_id: device_id,
x_astarte_msg_type: event_string
] ++ additional_headers
message_id = generate_message_id(realm, device_id, timestamp)
sharding_key = {realm, device_id}
:ok =
AMQPClient.publish(payload,
headers: headers,
message_id: message_id,
timestamp: timestamp,
sharding_key: sharding_key
)
end
defp now_us_x10_timestamp do
DateTime.utc_now()
|> DateTime.to_unix(:microsecond)
|> Kernel.*(10)
end
defp generate_message_id(realm, device_id, timestamp) do
realm_trunc = String.slice(realm, 0..63)
device_id_trunc = String.slice(device_id, 0..15)
timestamp_hex_str = Integer.to_string(timestamp, 16)
rnd = Enum.random(0..@max_rand) |> Integer.to_string(16)
"#{realm_trunc}-#{device_id_trunc}-#{timestamp_hex_str}-#{rnd}"
end
end
| 28.544402 | 96 | 0.665765 |
732c3c9baa40a372357fddb836dc9e2870a2bb6a
| 7,740 |
ex
|
Elixir
|
lib/mcc/lib.ex
|
getong/mcc
|
5e4dd398a7f5ac0869aa8208724ee1c4b2a9df40
|
[
"Apache-2.0"
] | 13 |
2019-02-28T05:24:35.000Z
|
2021-11-10T16:39:07.000Z
|
lib/mcc/lib.ex
|
getong/mcc
|
5e4dd398a7f5ac0869aa8208724ee1c4b2a9df40
|
[
"Apache-2.0"
] | 1 |
2019-03-02T16:20:26.000Z
|
2019-03-15T09:18:57.000Z
|
lib/mcc/lib.ex
|
getong/mcc
|
5e4dd398a7f5ac0869aa8208724ee1c4b2a9df40
|
[
"Apache-2.0"
] | 2 |
2019-02-28T00:00:02.000Z
|
2019-08-15T13:55:17.000Z
|
defmodule Mcc.Lib do
@moduledoc """
This module contains functions to manipulate cluster based on mnesia.
Includes:
- 1, start mnesia and mnesia table
- 2, make node join into the mnesia cluster
- 3, make node leave from the mnesia cluster
- 4, remove one node from mnesia cluster
- 5, get status of mnesia cluster
"""
@doc """
Tries to start mnesia and create or copy mnesia table.
It will raises an exception in case of failure, return `:ok` if successful,
or `{:error, reason}` if an error occurs.
"""
@spec start :: :ok | {:error, term()}
def start do
:ok = ensure_ok(ensure_data_dir())
:ok = ensure_ok(init_mnesia_schema())
:ok = :mnesia.start()
:ok = init_tables()
:ok = wait_for(:tables)
end
@doc """
Make one node join into the mnesia cluster.
"""
@spec join_cluster(node()) :: :ok | {:error, term()}
def join_cluster(node_name) do
:ok = ensure_ok(ensure_stopped())
:ok = ensure_ok(delete_schema())
:ok = ensure_ok(ensure_started())
:ok = ensure_ok(connect(node_name))
:ok = ensure_ok(copy_schema(node()))
:ok = copy_tables()
:ok = ensure_ok(wait_for(:tables))
end
@doc """
Make one node leave from the mnesia cluster.
"""
@spec leave_cluster ::
:ok
| {:error, :node_not_in_cluster}
| {:error, {:failed_to_leave, node()}}
def leave_cluster do
leave_cluster(running_nodes() -- [node()])
end
@doc """
Remove one node from the mnesia cluster.
"""
@spec remove_from_cluster(node()) ::
:ok
| {:error, :node_not_in_cluster}
| {:error, term()}
def remove_from_cluster(node_name) when node_name != node() do
case {node_in_cluster?(node_name), running_db_node?(node_name)} do
{true, true} ->
:ok = ensure_ok(:rpc.call(node_name, __MODULE__, :ensure_stopped, []))
:ok = ensure_ok(del_schema_copy(node_name))
:ok = ensure_ok(:rpc.call(node_name, __MODULE__, :delete_schema, []))
{true, false} ->
:ok = ensure_ok(del_schema_copy(node_name))
:ok = ensure_ok(:rpc.call(node_name, __MODULE__, :delete_schema, []))
{false, _} ->
{:error, :node_not_in_cluster}
end
end
@doc """
Get status of the mnesia cluster.
"""
@spec status :: list()
def status do
running = :mnesia.system_info(:running_db_nodes)
stopped = :mnesia.system_info(:db_nodes) -- running
[{:running_nodes, running}, {:stopped_nodes, stopped}]
end
@doc """
Delete schema copy of given node.
"""
@spec del_schema_copy(node()) :: :ok | {:error, any()}
def del_schema_copy(node_name) do
case :mnesia.del_table_copy(:schema, node_name) do
{:atomic, :ok} -> :ok
{:aborted, reason} -> {:error, reason}
end
end
@doc """
Delete schema information in local node.
"""
@spec delete_schema :: :ok | {:error, any()}
def delete_schema, do: :mnesia.delete_schema([node()])
@doc """
Ensure mnesia stoppted.
"""
@spec ensure_stopped :: :ok | {:error, any()}
def ensure_stopped do
_ = :mnesia.stop()
wait_for(:stop)
end
@doc """
Get all nodes in current mnesia cluster.
"""
@spec all_nodes :: [node()]
def all_nodes, do: :mnesia.system_info(:db_nodes)
@doc """
Get all running nodes in current mnesia cluster.
"""
@spec running_nodes :: [node()]
def running_nodes, do: :mnesia.system_info(:running_db_nodes)
@doc """
Get all not running nodes in current mnesia cluster.
"""
@spec not_running_nodes :: [node()]
def not_running_nodes, do: all_nodes() -- running_nodes()
@doc """
Copy mnesia table from remote node.
"""
@spec copy_table(atom(), atom()) :: :ok | {:error, any()}
def copy_table(name, ram_or_disc \\ :ram_copies) do
ensure_tab(:mnesia.add_table_copy(name, node(), ram_or_disc))
end
@doc """
Create mnesia table.
"""
@spec create_table(atom(), list()) :: :ok | {:error, any()}
def create_table(name, tabdef) do
ensure_tab(:mnesia.create_table(name, tabdef))
end
@doc false
defp ensure_data_dir do
mnesia_dir = :mnesia.system_info(:directory)
case :filelib.ensure_dir(:filename.join(mnesia_dir, :foo)) do
:ok -> :ok
{:error, reason} -> {:error, {:mnesia_dir_error, mnesia_dir, reason}}
end
end
@doc false
defp init_mnesia_schema do
case :mnesia.system_info(:extra_db_nodes) do
[] -> :mnesia.create_schema([node()])
[_ | _] -> :ok
end
end
@doc false
defp copy_schema(node_name) do
case :mnesia.change_table_copy_type(:schema, node_name, :disc_copies) do
{:atomic, :ok} -> :ok
{:aborted, {:already_exists, :schema, _node_name, :disc_copies}} -> :ok
{:aborted, error} -> {:error, error}
end
end
@doc false
defp init_tables do
case :mnesia.system_info(:extra_db_nodes) do
[] -> create_tables()
[_ | _] -> copy_tables()
end
end
@doc false
defp create_tables do
:mcc
|> Application.get_env(:mnesia_table_modules, [])
|> Enum.each(fn t -> Code.ensure_loaded?(t) and apply(t, :boot_tables, []) end)
end
@doc false
defp copy_tables do
:mcc
|> Application.get_env(:mnesia_table_modules, [])
|> Enum.each(fn t -> Code.ensure_loaded?(t) and apply(t, :copy_tables, []) end)
end
@doc false
defp ensure_started do
_ = :mnesia.start()
wait_for(:start)
end
@doc false
defp connect(node_name) do
case :mnesia.change_config(:extra_db_nodes, [node_name]) do
{:ok, [_node_name]} -> :ok
{:ok, []} -> {:error, {:failed_to_connect, node_name}}
error -> error
end
end
@doc false
defp leave_cluster([]), do: {:error, :node_not_in_cluster}
defp leave_cluster(nodes) when is_list(nodes) do
case Enum.any?(nodes, fn node_name -> leave_cluster(node_name) end) do
true -> :ok
_ -> {:error, {:failed_to_leave, nodes}}
end
end
defp leave_cluster(node_name) when is_atom(node_name) and node_name != node() do
case running_db_node?(node_name) do
true ->
:ok = ensure_ok(ensure_stopped())
:ok = ensure_ok(:rpc.call(node_name, __MODULE__, :del_schema_copy, [node()]))
:ok = ensure_ok(delete_schema())
false ->
{:error, {:node_name_not_running, node_name}}
end
end
@doc false
defp node_in_cluster?(node_name), do: Enum.member?(all_nodes(), node_name)
@doc false
defp running_db_node?(node_name), do: Enum.member?(running_nodes(), node_name)
@doc false
defp wait_for(:start) do
case :mnesia.system_info(:is_running) do
:yes ->
:ok
:starting ->
Process.sleep(1_000)
wait_for(:start)
_ ->
{:error, :mnesia_unexpectedly_stopped}
end
end
defp wait_for(:stop) do
case :mnesia.system_info(:is_running) do
:no ->
:ok
:stopping ->
Process.sleep(1_000)
wait_for(:stop)
_ ->
{:error, :mnesia_unexpectedly_running}
end
end
defp wait_for(:tables) do
:local_tables
|> :mnesia.system_info()
|> :mnesia.wait_for_tables(Application.get_env(:mcc, :mnesia_table_wait_timeout, 150_000))
|> case do
:ok -> :ok
{:error, reason} -> {:error, reason}
{:timeout, badtables} -> {:error, {:timetout, badtables}}
end
end
@doc false
defp ensure_ok(:ok), do: :ok
defp ensure_ok({:error, {_, {:already_exists, _}}}), do: :ok
defp ensure_ok(any), do: {:error, any}
@doc false
defp ensure_tab({:atomic, :ok}), do: :ok
defp ensure_tab({:aborted, {:already_exists, _}}), do: :ok
defp ensure_tab({:aborted, {:already_exists, _name, _node_name}}), do: :ok
defp ensure_tab({:aborted, error}), do: {:error, error}
# __end_of_module__
end
| 26.506849 | 94 | 0.627778 |
732cb05579b48a247ada8dc8f3e1979a19b9347b
| 122 |
exs
|
Elixir
|
test/test_helper.exs
|
rubysolo/logster
|
706de0abc211b2ec9f657bcc5c7c8d2a373ac49f
|
[
"MIT"
] | 181 |
2016-03-31T13:16:33.000Z
|
2021-11-14T09:04:39.000Z
|
test/test_helper.exs
|
rubysolo/logster
|
706de0abc211b2ec9f657bcc5c7c8d2a373ac49f
|
[
"MIT"
] | 23 |
2016-03-31T15:20:02.000Z
|
2021-04-29T10:05:37.000Z
|
test/test_helper.exs
|
rubysolo/logster
|
706de0abc211b2ec9f657bcc5c7c8d2a373ac49f
|
[
"MIT"
] | 27 |
2016-04-01T17:05:09.000Z
|
2021-12-10T15:11:36.000Z
|
Application.ensure_all_started(:plug)
ExUnit.configure(formatters: [ExUnit.CLIFormatter, ExUnitNotifier])
ExUnit.start()
| 24.4 | 67 | 0.827869 |
732cc54c2aacf91f18b96039774f32bca2e6f30a
| 1,696 |
ex
|
Elixir
|
clients/compute/lib/google_api/compute/v1/model/global_network_endpoint_groups_detach_endpoints_request.ex
|
medikent/elixir-google-api
|
98a83d4f7bfaeac15b67b04548711bb7e49f9490
|
[
"Apache-2.0"
] | null | null | null |
clients/compute/lib/google_api/compute/v1/model/global_network_endpoint_groups_detach_endpoints_request.ex
|
medikent/elixir-google-api
|
98a83d4f7bfaeac15b67b04548711bb7e49f9490
|
[
"Apache-2.0"
] | 1 |
2020-12-18T09:25:12.000Z
|
2020-12-18T09:25:12.000Z
|
clients/compute/lib/google_api/compute/v1/model/global_network_endpoint_groups_detach_endpoints_request.ex
|
medikent/elixir-google-api
|
98a83d4f7bfaeac15b67b04548711bb7e49f9490
|
[
"Apache-2.0"
] | 1 |
2020-10-04T10:12:44.000Z
|
2020-10-04T10:12:44.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Compute.V1.Model.GlobalNetworkEndpointGroupsDetachEndpointsRequest do
@moduledoc """
## Attributes
* `networkEndpoints` (*type:* `list(GoogleApi.Compute.V1.Model.NetworkEndpoint.t)`, *default:* `nil`) - The list of network endpoints to be detached.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:networkEndpoints => list(GoogleApi.Compute.V1.Model.NetworkEndpoint.t())
}
field(:networkEndpoints, as: GoogleApi.Compute.V1.Model.NetworkEndpoint, type: :list)
end
defimpl Poison.Decoder,
for: GoogleApi.Compute.V1.Model.GlobalNetworkEndpointGroupsDetachEndpointsRequest do
def decode(value, options) do
GoogleApi.Compute.V1.Model.GlobalNetworkEndpointGroupsDetachEndpointsRequest.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.Compute.V1.Model.GlobalNetworkEndpointGroupsDetachEndpointsRequest do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 32.615385 | 153 | 0.757075 |
732cd24d480ecf07a7a902cd78f6ddfbe468a087
| 5,091 |
exs
|
Elixir
|
test/cforum/jobs/notify_users_message_job_test.exs
|
jrieger/cforum_ex
|
61f6ce84708cb55bd0feedf69853dae64146a7a0
|
[
"MIT"
] | null | null | null |
test/cforum/jobs/notify_users_message_job_test.exs
|
jrieger/cforum_ex
|
61f6ce84708cb55bd0feedf69853dae64146a7a0
|
[
"MIT"
] | null | null | null |
test/cforum/jobs/notify_users_message_job_test.exs
|
jrieger/cforum_ex
|
61f6ce84708cb55bd0feedf69853dae64146a7a0
|
[
"MIT"
] | null | null | null |
defmodule Cforum.Jobs.NotifyUsersMessageJobTest do
use Cforum.DataCase
import Swoosh.TestAssertions
import CforumWeb.Gettext
import Ecto.Query, warn: false
alias Cforum.Notifications
alias Cforum.Repo
setup do
user = insert(:user)
forum = insert(:public_forum)
thread = insert(:thread, forum: forum)
message = insert(:message, thread: thread, forum: forum, tags: [])
tag = insert(:tag, messages: [message])
{:ok, user: user, forum: forum, thread: thread, message: message, tag: tag}
end
describe "message" do
test "sends a mention notification when unconfigured", %{user: user, thread: thread, forum: forum, tag: tag} do
message =
insert(:message,
tags: [tag],
forum: forum,
thread: thread,
content: "foo bar baz\n@#{user.username}",
flags: %{"mentions" => [[user.username, user.user_id, false]]}
)
Cforum.Jobs.NotifyUsersMessageJob.enqueue(thread, message, "message")
assert %{success: 1, failure: 0} == Oban.drain_queue(queue: :background)
notifications = Notifications.list_notifications(user)
assert length(notifications) == 1
n = List.first(notifications)
assert n.otype == "message:mention"
end
test "sends a mention notification when configured", %{user: user, thread: thread, forum: forum, tag: tag} do
insert(:setting, user: user, options: %{"notify_on_mention" => "yes"})
message =
insert(:message,
tags: [tag],
content: "foo bar baz\n@#{user.username}",
thread: thread,
forum: forum,
flags: %{"mentions" => [[user.username, user.user_id, false]]}
)
Cforum.Jobs.NotifyUsersMessageJob.enqueue(thread, message, "message")
assert %{success: 1, failure: 0} == Oban.drain_queue(queue: :background)
notifications = Notifications.list_notifications(user)
assert length(notifications) == 1
n = List.first(notifications)
assert n.otype == "message:mention"
end
test "doesn't send a mention notification when configured", %{user: user, thread: thread, forum: forum, tag: tag} do
insert(:setting, user: user, options: %{"notify_on_mention" => "no"})
message =
insert(:message,
tags: [tag],
content: "foo bar baz\n@#{user.username}",
thread: thread,
forum: forum,
flags: %{"mentions" => [[user.username, user.user_id, false]]}
)
Cforum.Jobs.NotifyUsersMessageJob.enqueue(thread, message, "message")
assert %{success: 1, failure: 0} == Oban.drain_queue(queue: :background)
notifications = Notifications.list_notifications(user)
assert Enum.empty?(notifications)
end
test "sends an email notification", %{user: user, thread: thread, forum: forum, tag: tag} do
insert(:setting, user: user, options: %{"notify_on_mention" => "email"})
message =
insert(:message,
tags: [tag],
content: "foo bar baz\n@#{user.username}",
thread: thread,
forum: forum,
flags: %{"mentions" => [[user.username, user.user_id, false]]}
)
Cforum.Jobs.NotifyUsersMessageJob.enqueue(thread, message, "message")
assert %{success: 1, failure: 0} == Oban.drain_queue(queue: :background)
subject =
gettext("%{nick} mentioned you in a new message: “%{subject}”", subject: message.subject, nick: message.author)
msg_subject = gettext("new notification: “%{subject}”", subject: subject)
assert_email_sent(to: {user.username, user.email}, subject: msg_subject)
end
end
describe "thread" do
test "sends no notifications to users who didn't choose", %{thread: thread, message: message} do
Cforum.Jobs.NotifyUsersMessageJob.enqueue(thread, message, "thread")
assert %{success: 1, failure: 0} == Oban.drain_queue(queue: :background)
notifications = from(notification in Cforum.Notifications.Notification, select: count()) |> Repo.one()
assert notifications == 0
end
test "sends no notifications to users who chose no", %{user: user, thread: thread, message: message} do
insert(:setting, user: user, options: %{"notify_on_new_thread" => "no"})
Cforum.Jobs.NotifyUsersMessageJob.enqueue(thread, message, "thread")
assert %{success: 1, failure: 0} == Oban.drain_queue(queue: :background)
notifications = from(notification in Cforum.Notifications.Notification, select: count()) |> Repo.one()
assert notifications == 0
end
test "sends notifications to users who chose yes", %{user: user, thread: thread, message: message} do
insert(:setting, user: user, options: %{"notify_on_new_thread" => "yes"})
Cforum.Jobs.NotifyUsersMessageJob.enqueue(thread, message, "thread")
assert %{success: 1, failure: 0} == Oban.drain_queue(queue: :background)
notifications = from(notification in Cforum.Notifications.Notification, select: count()) |> Repo.one()
assert notifications == 1
end
end
end
| 36.891304 | 120 | 0.646238 |
732cea2afeae4f87279e0dc1b506244a016b3523
| 2,290 |
ex
|
Elixir
|
clients/cloud_run/lib/google_api/cloud_run/v1alpha1/model/resource_requirements.ex
|
MMore/elixir-google-api
|
0574ec1439d9bbfe22d63965be1681b0f45a94c9
|
[
"Apache-2.0"
] | null | null | null |
clients/cloud_run/lib/google_api/cloud_run/v1alpha1/model/resource_requirements.ex
|
MMore/elixir-google-api
|
0574ec1439d9bbfe22d63965be1681b0f45a94c9
|
[
"Apache-2.0"
] | null | null | null |
clients/cloud_run/lib/google_api/cloud_run/v1alpha1/model/resource_requirements.ex
|
MMore/elixir-google-api
|
0574ec1439d9bbfe22d63965be1681b0f45a94c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.CloudRun.V1alpha1.Model.ResourceRequirements do
@moduledoc """
ResourceRequirements describes the compute resource requirements.
## Attributes
* `limits` (*type:* `map()`, *default:* `nil`) - (Optional) Only memory and CPU are supported. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
* `requests` (*type:* `map()`, *default:* `nil`) - (Optional) Only memory and CPU are supported. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:limits => map() | nil,
:requests => map() | nil
}
field(:limits, type: :map)
field(:requests, type: :map)
end
defimpl Poison.Decoder, for: GoogleApi.CloudRun.V1alpha1.Model.ResourceRequirements do
def decode(value, options) do
GoogleApi.CloudRun.V1alpha1.Model.ResourceRequirements.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.CloudRun.V1alpha1.Model.ResourceRequirements do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 45.8 | 489 | 0.751092 |
732d03601d1f16ffb52511325f76d3a4f31b1a76
| 367 |
ex
|
Elixir
|
web/models/user.ex
|
nethad/appricot-phoenix
|
cbce91b23e785f6f3501014710a473547b4af386
|
[
"MIT"
] | null | null | null |
web/models/user.ex
|
nethad/appricot-phoenix
|
cbce91b23e785f6f3501014710a473547b4af386
|
[
"MIT"
] | null | null | null |
web/models/user.ex
|
nethad/appricot-phoenix
|
cbce91b23e785f6f3501014710a473547b4af386
|
[
"MIT"
] | null | null | null |
defmodule Appricot.User do
use Appricot.Web, :model
schema "users" do
field :name, :string
field :email, :string
timestamps()
end
@doc """
Builds a changeset based on the `struct` and `params`.
"""
def changeset(struct, params \\ %{}) do
struct
|> cast(params, [:name, :email])
|> validate_required([:name, :email])
end
end
| 18.35 | 56 | 0.615804 |
732d08e75ccd39bd308e9d08a515d9634b32d8d3
| 485 |
exs
|
Elixir
|
priv/repo/migrations/20200824131242_microsecond_timestamps.exs
|
sealas/sealax
|
3f11b7f649972a43f4812ea959bd2be2e0151baa
|
[
"MIT"
] | null | null | null |
priv/repo/migrations/20200824131242_microsecond_timestamps.exs
|
sealas/sealax
|
3f11b7f649972a43f4812ea959bd2be2e0151baa
|
[
"MIT"
] | 9 |
2021-08-19T01:09:55.000Z
|
2022-03-08T01:18:45.000Z
|
priv/repo/migrations/20200824131242_microsecond_timestamps.exs
|
sealas/sealax
|
3f11b7f649972a43f4812ea959bd2be2e0151baa
|
[
"MIT"
] | null | null | null |
defmodule Sealax.Repo.Migrations.MicrosecondTimestamps do
use Ecto.Migration
def change do
alter table(:account) do
modify :inserted_at, :utc_datetime_usec
modify :updated_at, :utc_datetime_usec
end
alter table(:user) do
modify :inserted_at, :utc_datetime_usec
modify :updated_at, :utc_datetime_usec
end
alter table(:items) do
modify :inserted_at, :utc_datetime_usec
modify :updated_at, :utc_datetime_usec
end
end
end
| 25.526316 | 57 | 0.715464 |
732d14937d34c6ff3b9f6e4cf0f02c828dcdb3b2
| 1,215 |
exs
|
Elixir
|
config/config.exs
|
philipecortez/issues
|
5887fbb0435d43328a2b44fd2817b7de60e67e49
|
[
"CC0-1.0"
] | null | null | null |
config/config.exs
|
philipecortez/issues
|
5887fbb0435d43328a2b44fd2817b7de60e67e49
|
[
"CC0-1.0"
] | null | null | null |
config/config.exs
|
philipecortez/issues
|
5887fbb0435d43328a2b44fd2817b7de60e67e49
|
[
"CC0-1.0"
] | null | null | null |
# This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
use Mix.Config
config :issues, github_url: "https://api.github.com"
config :logger, compile_time_purge_level: :info
# This configuration is loaded before any dependency and is restricted
# to this project. If another project depends on this project, this
# file won't be loaded nor affect the parent project. For this reason,
# if you want to provide default values for your application for
# 3rd-party users, it should be done in your "mix.exs" file.
# You can configure for your application as:
#
# config :issues, key: :value
#
# And access this configuration in your application as:
#
# Application.get_env(:issues, :key)
#
# Or configure a 3rd-party app:
#
# config :logger, level: :info
#
# It is also possible to import configuration files, relative to this
# directory. For example, you can emulate configuration per environment
# by uncommenting the line below and defining dev.exs, test.exs and such.
# Configuration from the imported file will override the ones defined
# here (which is why it is important to import them last).
#
# import_config "#{Mix.env}.exs"
| 37.96875 | 73 | 0.753909 |
732d1c08929082cc59560b0115eaff79eb17ec64
| 686 |
ex
|
Elixir
|
lib/ex_twilio/resources/studio/execution.ex
|
kianmeng/ex_twilio
|
d8621cab60bd03a46375a05b377623875754dda6
|
[
"MIT"
] | null | null | null |
lib/ex_twilio/resources/studio/execution.ex
|
kianmeng/ex_twilio
|
d8621cab60bd03a46375a05b377623875754dda6
|
[
"MIT"
] | 1 |
2021-02-19T04:34:52.000Z
|
2021-03-29T19:14:14.000Z
|
lib/ex_twilio/resources/studio/execution.ex
|
kianmeng/ex_twilio
|
d8621cab60bd03a46375a05b377623875754dda6
|
[
"MIT"
] | 2 |
2022-03-08T22:05:17.000Z
|
2022-03-09T05:29:46.000Z
|
defmodule ExTwilio.Studio.Execution do
@moduledoc """
Represents a specific person's run through a Flow.
An execution is active while the user is in the Flow, and it is considered ended when they stop or are kicked out of the Flow.
- [Twilio docs](https://www.twilio.com/docs/studio/rest-api/execution)
"""
defstruct [
:sid,
:account_sid,
:flow_sid,
:context,
:contact_sid,
:status,
:date_created,
:date_updated,
:url
]
use ExTwilio.Resource, import: [:stream, :all, :find, :create, :delete]
def parents,
do: [%ExTwilio.Parent{module: ExTwilio.Studio.Flow, key: :flow}]
def children, do: [:execution_context, :steps]
end
| 24.5 | 128 | 0.674927 |
732d51e1f76ce8904c6086e26c8415f66b2f8d45
| 998 |
ex
|
Elixir
|
lib/monitor/application.ex
|
licaonfee/phoenix_gitlab_monitor
|
1b7dd437018d42a2b7b9a1643e6767a48f312eee
|
[
"MIT"
] | 12 |
2018-11-04T03:39:34.000Z
|
2020-04-29T19:30:58.000Z
|
lib/monitor/application.ex
|
licaonfee/phoenix_gitlab_monitor
|
1b7dd437018d42a2b7b9a1643e6767a48f312eee
|
[
"MIT"
] | 16 |
2018-11-07T01:05:01.000Z
|
2021-05-07T21:32:07.000Z
|
lib/monitor/application.ex
|
licaonfee/phoenix_gitlab_monitor
|
1b7dd437018d42a2b7b9a1643e6767a48f312eee
|
[
"MIT"
] | 3 |
2019-08-27T20:29:00.000Z
|
2020-05-25T20:36:12.000Z
|
defmodule Monitor.Application do
use Application
# See https://hexdocs.pm/elixir/Application.html
# for more information on OTP Applications
def start(_type, _args) do
import Supervisor.Spec
# Define workers and child supervisors to be supervised
children = [
# Start the endpoint when the application starts
supervisor(MonitorWeb.Endpoint, []),
supervisor(Monitor.PipelineCache, []),
# Start your own worker by calling: Monitor.Worker.start_link(arg1, arg2, arg3)
# worker(Monitor.Worker, [arg1, arg2, arg3]),
]
# See https://hexdocs.pm/elixir/Supervisor.html
# for other strategies and supported options
opts = [strategy: :one_for_one, name: Monitor.Supervisor]
Supervisor.start_link(children, opts)
end
# Tell Phoenix to update the endpoint configuration
# whenever the application is updated.
def config_change(changed, _new, removed) do
MonitorWeb.Endpoint.config_change(changed, removed)
:ok
end
end
| 32.193548 | 85 | 0.718437 |
732da7aceef722a755499516918b3b91de152bc1
| 9,244 |
exs
|
Elixir
|
test/cldr_test.exs
|
kianmeng/cldr
|
d0510a55b5406e4604d8823a98ff6b6aea2bb397
|
[
"Apache-2.0"
] | null | null | null |
test/cldr_test.exs
|
kianmeng/cldr
|
d0510a55b5406e4604d8823a98ff6b6aea2bb397
|
[
"Apache-2.0"
] | null | null | null |
test/cldr_test.exs
|
kianmeng/cldr
|
d0510a55b5406e4604d8823a98ff6b6aea2bb397
|
[
"Apache-2.0"
] | null | null | null |
defmodule Cldr.Test do
use ExUnit.Case, async: true
test "that the cldr source data directory is correct" do
assert String.ends_with?(Cldr.Config.source_data_dir(), "/priv/cldr") == true
end
test "that the client data directory is correct" do
assert String.ends_with?(Cldr.Config.client_data_dir(TestBackend.Cldr), "/priv/cldr") ==
true
end
test "that the cldr data directory is correct" do
assert String.ends_with?(Cldr.Config.cldr_data_dir(), "/_build/test/lib/ex_cldr/priv/cldr") ==
true
end
test "that we have the correct modules (keys) for the json consolidation" do
assert Cldr.Config.required_modules() ==
[
"number_formats",
"list_formats",
"currencies",
"number_systems",
"number_symbols",
"minimum_grouping_digits",
"rbnf",
"units",
"date_fields",
"dates",
"territories",
"languages",
"delimiters",
"ellipsis",
"lenient_parse",
"locale_display_names",
"subdivisions"
]
end
test "default locale" do
assert TestBackend.Cldr.default_locale() ==
%Cldr.LanguageTag{
backend: TestBackend.Cldr,
canonical_locale_name: "en-001",
cldr_locale_name: :"en-001",
language_subtags: [],
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: :en,
requested_locale_name: "en-001",
script: :Latn,
territory: :"001",
transform: %{},
language_variants: []
}
end
test "locale name does not exist" do
alias TestBackend.Cldr
refute Cldr.available_locale_name?("jabberwocky")
end
test "that we have the right number of rbnf locales" do
alias TestBackend.Cldr
assert Cldr.known_rbnf_locale_names() ==
[
:af,
:ak,
:am,
:ar,
:az,
:be,
:bg,
:bs,
:ca,
:ccp,
:chr,
:cs,
:cy,
:da,
:de,
:"de-CH",
:ee,
:el,
:en,
:"en-IN",
:eo,
:es,
:"es-419",
:et,
:fa,
:"fa-AF",
:ff,
:fi,
:fil,
:fo,
:fr,
:"fr-BE",
:"fr-CH",
:ga,
:he,
:hi,
:hr,
:hu,
:hy,
:id,
:is,
:it,
:ja,
:ka,
:kl,
:km,
:ko,
:ky,
:lb,
:lo,
:lrc,
:lt,
:lv,
:mk,
:ms,
:mt,
:my,
:nb,
:ne,
:nl,
:nn,
:no,
:pl,
:pt,
:"pt-PT",
:qu,
:ro,
:ru,
:se,
:sk,
:sl,
:sq,
:sr,
:"sr-Latn",
:su,
:sv,
:sw,
:ta,
:th,
:tr,
:uk,
:und,
:vi,
:yue,
:"yue-Hans",
:zh,
:"zh-Hant"
]
end
test "that locale substitutions are applied" do
assert Cldr.Locale.substitute_aliases(Cldr.LanguageTag.Parser.parse!("en-US")) ==
%Cldr.LanguageTag{
backend: nil,
canonical_locale_name: nil,
cldr_locale_name: nil,
language_subtags: [],
extensions: %{},
gettext_locale_name: nil,
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: nil,
requested_locale_name: "en-US",
script: nil,
territory: "US",
transform: %{},
language_variants: []
}
assert Cldr.Locale.substitute_aliases(Cldr.LanguageTag.Parser.parse!("sh_Arab_AQ")) ==
%Cldr.LanguageTag{
backend: nil,
canonical_locale_name: nil,
cldr_locale_name: nil,
language_subtags: [],
extensions: %{},
gettext_locale_name: nil,
language: "sr",
locale: %{},
private_use: [],
rbnf_locale_name: nil,
requested_locale_name: "sh_Arab_AQ",
script: "Arab",
territory: "AQ",
transform: %{},
language_variants: []
}
assert Cldr.Locale.substitute_aliases(Cldr.LanguageTag.Parser.parse!("sh_AQ")) ==
%Cldr.LanguageTag{
backend: nil,
canonical_locale_name: nil,
cldr_locale_name: nil,
language_subtags: [],
extensions: %{},
gettext_locale_name: nil,
language: "sr",
locale: %{},
private_use: [],
rbnf_locale_name: nil,
requested_locale_name: "sh_AQ",
script: :Latn,
territory: "AQ",
transform: %{},
language_variants: []
}
end
test "that we can have repeated currencies in a territory" do
assert Cldr.Config.territory(:PS)[:currency] ==
[
JOD: %{from: ~D[1996-02-12]},
ILS: %{from: ~D[1985-09-04]},
ILP: %{from: ~D[1967-06-01], to: ~D[1980-02-22]},
JOD: %{from: ~D[1950-07-01], to: ~D[1967-06-01]}
]
end
test "that we get the correct default json library" do
assert Cldr.Config.json_library() == Jason
end
test "that configs merge correctly" do
assert WithOtpAppBackend.Cldr.__cldr__(:config).locales ==
[:en, :"en-001", :fr, :und]
assert WithGettextBackend.Cldr.__cldr__(:config).locales ==
[:en, :"en-001", :"en-GB", :es, :it, :und]
assert TestBackend.Cldr.__cldr__(:config).locales == :all
end
test "that data_dir is correctly resolved" do
# data_dir configured in the otp_app
assert "./with_opt_app_backend/cldr/some_dir" ==
Cldr.Config.client_data_dir(WithOtpAppBackend.Cldr)
# data_dir configured on the module
assert "./another_backend/cldr/data_dir" == Cldr.Config.client_data_dir(AnotherBackend.Cldr)
# default data_dir
assert Cldr.Config.client_data_dir(DefaultBackend.Cldr) =~
"_build/test/lib/ex_cldr/priv/cldr"
end
test "that an unknown otp_app config raises" do
assert_raise Cldr.UnknownOTPAppError, "The configured OTP app :rubbish is not known", fn ->
Cldr.Config.client_data_dir(%{otp_app: :rubbish})
end
end
test "return of currency map" do
{:ok, currencies} = Cldr.Config.currencies_for(:en, WithOtpAppBackend.Cldr)
assert Map.get(currencies, :AUD)
end
test "correct date parsing of currencies" do
{:ok, currencies} = Cldr.Config.currencies_for(:en, WithOtpAppBackend.Cldr)
assert Map.get(currencies, :YUM).from == 1994
assert Map.get(currencies, :YUM).to == 2002
assert Map.get(currencies, :UYI).name == "Uruguayan Peso (Indexed Units)"
assert Map.get(currencies, :USN).name == "US Dollar (Next day)"
end
test "UTF8 names in currency annotations" do
{:ok, currencies} = Cldr.Config.currencies_for(:de, TestBackend.Cldr)
assert Map.get(currencies, :USN).name == "US Dollar (Nächster Tag)"
end
test "validating locales that are not precompiled" do
assert {:ok, _locale} = Cldr.validate_locale("en-au", TestBackend.Cldr)
assert {:ok, _locale} = Cldr.validate_locale("en_au", TestBackend.Cldr)
assert {:ok, _locale} = Cldr.validate_locale("en-au-u-ca-buddhist", TestBackend.Cldr)
end
if function_exported?(Code, :fetch_docs, 1) do
test "that no module docs are generated for a backend" do
assert {:docs_v1, _, :elixir, _, :hidden, %{}, _} = Code.fetch_docs(DefaultBackend.Cldr)
end
test "that module docs are generated for a backend" do
assert {:docs_v1, _, :elixir, "text/markdown", %{"en" => _}, %{}, _} =
Code.fetch_docs(TestBackend.Cldr)
end
end
test "Cldr.Chars.to_string for a language_tag" do
{:ok, locale} = Cldr.validate_locale("en-US-u-nu-thai-cu-AUD", MyApp.Cldr)
assert Cldr.to_string(locale) == "en-Latn-US-u-cu-aud-nu-thai"
end
end
| 30.407895 | 98 | 0.478473 |
732dabb2ffdbff58a27d4ad79bff7f84e52bf030
| 33,156 |
ex
|
Elixir
|
apps/astarte_housekeeping/lib/astarte_housekeeping/queries.ex
|
drf/astarte
|
f1163ce26bcf53470b0d1419abb670d73bb280f5
|
[
"Apache-2.0"
] | null | null | null |
apps/astarte_housekeeping/lib/astarte_housekeeping/queries.ex
|
drf/astarte
|
f1163ce26bcf53470b0d1419abb670d73bb280f5
|
[
"Apache-2.0"
] | 6 |
2019-11-15T16:00:19.000Z
|
2019-12-16T10:41:48.000Z
|
apps/astarte_housekeeping/lib/astarte_housekeeping/queries.ex
|
drf/astarte
|
f1163ce26bcf53470b0d1419abb670d73bb280f5
|
[
"Apache-2.0"
] | 2 |
2018-08-27T13:26:02.000Z
|
2019-11-19T11:35:22.000Z
|
#
# This file is part of Astarte.
#
# Copyright 2017-2018 Ispirata Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
defmodule Astarte.Housekeeping.Queries do
require Logger
alias Astarte.Core.Realm
alias Astarte.Housekeeping.Config
@datacenter_name_regex ~r/^[a-z][a-zA-Z0-9_-]*$/
@default_replication_factor 1
@current_astarte_schema_version 2
@current_realm_schema_version 2
def create_realm(realm_name, public_key_pem, nil = _replication_factor, opts) do
create_realm(realm_name, public_key_pem, @default_replication_factor, opts)
end
def create_realm(realm_name, public_key_pem, replication, opts) do
with :ok <- validate_realm_name(realm_name),
:ok <- Xandra.Cluster.run(:xandra, &check_replication(&1, replication)),
{:ok, replication_map_str} <- build_replication_map_str(replication) do
if opts[:async] do
{:ok, _pid} =
Task.start(fn ->
do_create_realm(realm_name, public_key_pem, replication_map_str)
end)
:ok
else
do_create_realm(realm_name, public_key_pem, replication_map_str)
end
end
end
defp build_replication_map_str(replication_factor)
when is_integer(replication_factor) and replication_factor > 0 do
replication_map_str =
"{'class': 'SimpleStrategy', 'replication_factor': #{replication_factor}}"
{:ok, replication_map_str}
end
defp build_replication_map_str(datacenter_replication_factors)
when is_map(datacenter_replication_factors) do
datacenter_replications_str =
Enum.map(datacenter_replication_factors, fn {datacenter, replication_factor} ->
"'#{datacenter}': #{replication_factor}"
end)
|> Enum.join(",")
replication_map_str = "{'class': 'NetworkTopologyStrategy', #{datacenter_replications_str}}"
{:ok, replication_map_str}
end
defp build_replication_map_str(_invalid_replication) do
{:error, :invalid_replication}
end
defp validate_realm_name(realm_name) do
if Realm.valid_name?(realm_name) do
:ok
else
_ =
Logger.warn("Invalid realm name.",
tag: "invalid_realm_name",
realm: realm_name
)
{:error, :realm_not_allowed}
end
end
defp do_create_realm(realm_name, public_key_pem, replication_map_str) do
Xandra.Cluster.run(:xandra, [timeout: 60_000], fn conn ->
with :ok <- validate_realm_name(realm_name),
:ok <- create_realm_keyspace(conn, realm_name, replication_map_str),
:ok <- use_realm(conn, realm_name),
:ok <- create_realm_kv_store(conn),
:ok <- create_names_table(conn),
:ok <- create_devices_table(conn),
:ok <- create_endpoints_table(conn),
:ok <- create_interfaces_table(conn),
:ok <- create_individual_properties_table(conn),
:ok <- create_simple_triggers_table(conn),
:ok <- create_grouped_devices_table(conn),
:ok <- insert_realm_public_key(conn, public_key_pem),
:ok <- insert_realm_astarte_schema_version(conn),
:ok <- insert_realm(conn, realm_name) do
:ok
else
{:error, reason} ->
_ =
Logger.warn("Cannot create realm: #{inspect(reason)}.",
tag: "realm_creation_failed",
realm: realm_name
)
{:error, reason}
end
end)
end
defp use_realm(conn, realm_name) do
with :ok <- validate_realm_name(realm_name),
{:ok, %Xandra.SetKeyspace{}} <- Xandra.execute(conn, "USE #{realm_name}") do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
{:error, reason} ->
_ =
Logger.warn("Cannot USE realm: #{inspect(reason)}.",
tag: "use_realm_error",
realm: realm_name
)
{:error, reason}
end
end
defp create_realm_keyspace(conn, realm_name, replication_map_str) do
query = """
CREATE KEYSPACE #{realm_name}
WITH replication = #{replication_map_str}
AND durable_writes = true;
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_realm_kv_store(realm_conn) do
query = """
CREATE TABLE kv_store (
group varchar,
key varchar,
value blob,
PRIMARY KEY ((group), key)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_names_table(realm_conn) do
query = """
CREATE TABLE names (
object_name varchar,
object_type int,
object_uuid uuid,
PRIMARY KEY ((object_name), object_type)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_devices_table(realm_conn) do
query = """
CREATE TABLE devices (
device_id uuid,
aliases map<ascii, varchar>,
introspection map<ascii, int>,
introspection_minor map<ascii, int>,
old_introspection map<frozen<tuple<ascii, int>>, int>,
protocol_revision int,
first_registration timestamp,
credentials_secret ascii,
inhibit_credentials_request boolean,
cert_serial ascii,
cert_aki ascii,
first_credentials_request timestamp,
last_connection timestamp,
last_disconnection timestamp,
connected boolean,
pending_empty_cache boolean,
total_received_msgs bigint,
total_received_bytes bigint,
exchanged_bytes_by_interface map<frozen<tuple<ascii, int>>, bigint>,
exchanged_msgs_by_interface map<frozen<tuple<ascii, int>>, bigint>,
last_credentials_request_ip inet,
last_seen_ip inet,
groups map<text, timeuuid>,
PRIMARY KEY (device_id)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_endpoints_table(realm_conn) do
query = """
CREATE TABLE endpoints (
interface_id uuid,
endpoint_id uuid,
interface_name ascii,
interface_major_version int,
interface_minor_version int,
interface_type int,
endpoint ascii,
value_type int,
reliability int,
retention int,
expiry int,
database_retention_ttl int,
database_retention_policy int,
allow_unset boolean,
explicit_timestamp boolean,
description varchar,
doc varchar,
PRIMARY KEY ((interface_id), endpoint_id)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_interfaces_table(realm_conn) do
query = """
CREATE TABLE interfaces (
name ascii,
major_version int,
minor_version int,
interface_id uuid,
storage_type int,
storage ascii,
type int,
ownership int,
aggregation int,
automaton_transitions blob,
automaton_accepting_states blob,
description varchar,
doc varchar,
PRIMARY KEY (name, major_version)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_individual_properties_table(realm_conn) do
query = """
CREATE TABLE individual_properties (
device_id uuid,
interface_id uuid,
endpoint_id uuid,
path varchar,
reception_timestamp timestamp,
reception_timestamp_submillis smallint,
double_value double,
integer_value int,
boolean_value boolean,
longinteger_value bigint,
string_value varchar,
binaryblob_value blob,
datetime_value timestamp,
doublearray_value list<double>,
integerarray_value list<int>,
booleanarray_value list<boolean>,
longintegerarray_value list<bigint>,
stringarray_value list<varchar>,
binaryblobarray_value list<blob>,
datetimearray_value list<timestamp>,
PRIMARY KEY((device_id, interface_id), endpoint_id, path)
)
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_simple_triggers_table(realm_conn) do
query = """
CREATE TABLE simple_triggers (
object_id uuid,
object_type int,
parent_trigger_id uuid,
simple_trigger_id uuid,
trigger_data blob,
trigger_target blob,
PRIMARY KEY ((object_id, object_type), parent_trigger_id, simple_trigger_id)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_grouped_devices_table(realm_conn) do
query = """
CREATE TABLE grouped_devices (
group_name varchar,
insertion_uuid timeuuid,
device_id uuid,
PRIMARY KEY ((group_name), insertion_uuid, device_id)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp insert_realm_public_key(realm_conn, public_key_pem) do
query = """
INSERT INTO kv_store (group, key, value)
VALUES ('auth', 'jwt_public_key_pem', varcharAsBlob(:public_key_pem));
"""
params = %{"public_key_pem" => public_key_pem}
with {:ok, prepared} <- Xandra.prepare(realm_conn, query),
{:ok, %Xandra.Void{}} <-
Xandra.execute(realm_conn, prepared, params, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp insert_realm_astarte_schema_version(realm_conn) do
query = """
INSERT INTO kv_store
(group, key, value)
VALUES ('astarte', 'schema_version', bigintAsBlob(#{@current_realm_schema_version}));
"""
with {:ok, %Xandra.Void{}} <-
Xandra.execute(realm_conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp insert_realm(conn, realm_name) do
query = """
INSERT INTO astarte.realms (realm_name)
VALUES (:realm_name);
"""
params = %{"realm_name" => realm_name}
with {:ok, prepared} <- Xandra.prepare(conn, query),
{:ok, %Xandra.Void{}} <-
Xandra.execute(conn, prepared, params, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
def initialize_database do
Xandra.Cluster.run(:xandra, [timeout: 60_000], fn conn ->
with :ok <- create_astarte_keyspace(conn),
:ok <- create_realms_table(conn),
:ok <- create_astarte_kv_store(conn),
:ok <- insert_astarte_schema_version(conn) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ =
Logger.error(
"Database error while initializing database: #{inspect(err)}. ASTARTE WILL NOT WORK.",
tag: "init_database_error"
)
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.error(
"Database connection error while initializing database: #{inspect(err)}. ASTARTE WILL NOT WORK.",
tag: "init_database_connection_error"
)
{:error, :database_connection_error}
{:error, reason} ->
_ =
Logger.error(
"Error while initializing database: #{inspect(reason)}. ASTARTE WILL NOT WORK.",
tag: "init_error"
)
{:error, reason}
end
end)
end
defp create_astarte_keyspace(conn) do
# TODO: add support for creating the astarte keyspace with NetworkTopologyStrategy,
# right now the replication factor is an integer so SimpleStrategy is always used
astarte_keyspace_replication = Config.astarte_keyspace_replication_factor()
with {:ok, replication_map_str} <- build_replication_map_str(astarte_keyspace_replication),
query = """
CREATE KEYSPACE astarte
WITH replication = #{replication_map_str}
AND durable_writes = true;
""",
:ok <- check_replication(conn, astarte_keyspace_replication),
{:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
{:error, reason} ->
_ =
Logger.warn("Cannot create Astarte Keyspace: #{inspect(reason)}.",
tag: "astarte_keyspace_creation_failed"
)
{:error, reason}
end
end
defp create_realms_table(conn) do
query = """
CREATE TABLE astarte.realms (
realm_name varchar,
PRIMARY KEY (realm_name)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp create_astarte_kv_store(conn) do
query = """
CREATE TABLE astarte.kv_store (
group varchar,
key varchar,
value blob,
PRIMARY KEY ((group), key)
);
"""
with {:ok, %Xandra.SchemaChange{}} <-
Xandra.execute(conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp insert_astarte_schema_version(conn) do
query = """
INSERT INTO astarte.kv_store
(group, key, value)
VALUES ('astarte', 'schema_version', bigintAsBlob(#{@current_astarte_schema_version}));
"""
with {:ok, %Xandra.Void{}} <- Xandra.execute(conn, query, %{}, consistency: :each_quorum) do
:ok
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
def is_realm_existing(realm_name) do
Xandra.Cluster.run(:xandra, &is_realm_existing(&1, realm_name))
end
def is_astarte_keyspace_existing do
query = """
SELECT keyspace_name
FROM system_schema.keyspaces
WHERE keyspace_name='astarte'
"""
case Xandra.Cluster.execute(:xandra, query) do
{:ok, %Xandra.Page{} = page} ->
if Enum.count(page) > 0 do
{:ok, true}
else
{:ok, false}
end
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
def check_astarte_health(consistency) do
query = """
SELECT COUNT(*)
FROM astarte.realms
"""
with {:ok, %Xandra.Page{} = page} <-
Xandra.Cluster.execute(:xandra, query, %{}, consistency: consistency),
{:ok, _} <- Enum.fetch(page, 0) do
:ok
else
:error ->
_ =
Logger.warn("Cannot retrieve count for astarte.realms table.",
tag: "health_check_error"
)
{:error, :health_check_bad}
{:error, %Xandra.Error{} = err} ->
_ =
Logger.warn("Database error, health is not good: #{inspect(err)}.",
tag: "health_check_database_error"
)
{:error, :health_check_bad}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database error, health is not good: #{inspect(err)}.",
tag: "health_check_database_connection_error"
)
{:error, :database_connection_error}
end
end
def list_realms do
query = """
SELECT realm_name
FROM astarte.realms;
"""
case Xandra.Cluster.execute(:xandra, query, %{}, consistency: :quorum) do
{:ok, %Xandra.Page{} = page} ->
{:ok, Enum.map(page, fn %{"realm_name" => realm_name} -> realm_name end)}
{:error, %Xandra.Error{} = err} ->
_ =
Logger.warn("Database error while listing realms: #{inspect(err)}.",
tag: "database_error"
)
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error while listing realms: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
def get_realm(realm_name) do
Xandra.Cluster.run(:xandra, fn conn ->
with {:ok, true} <- is_realm_existing(conn, realm_name),
{:ok, public_key} <- get_public_key(conn, realm_name),
{:ok, replication_map} <- get_realm_replication(conn, realm_name) do
case replication_map do
%{
"class" => "org.apache.cassandra.locator.SimpleStrategy",
"replication_factor" => replication_factor_string
} ->
{replication_factor, ""} = Integer.parse(replication_factor_string)
%{
realm_name: realm_name,
jwt_public_key_pem: public_key,
replication_class: "SimpleStrategy",
replication_factor: replication_factor
}
%{"class" => "org.apache.cassandra.locator.NetworkTopologyStrategy"} ->
datacenter_replication_factors =
Enum.reduce(replication_map, %{}, fn
{"class", _}, acc ->
acc
{datacenter, replication_factor_string}, acc ->
{replication_factor, ""} = Integer.parse(replication_factor_string)
Map.put(acc, datacenter, replication_factor)
end)
%{
realm_name: realm_name,
jwt_public_key_pem: public_key,
replication_class: "NetworkTopologyStrategy",
datacenter_replication_factors: datacenter_replication_factors
}
end
else
# Returned by is_realm_existing
{:ok, false} ->
{:error, :realm_not_found}
{:error, reason} ->
_ =
Logger.warn("Error while getting realm: #{inspect(reason)}.",
tag: "get_realm_error",
realm: realm_name
)
{:error, reason}
end
end)
end
defp is_realm_existing(conn, realm_name) do
query = """
SELECT realm_name from astarte.realms
WHERE realm_name=:realm_name;
"""
with {:ok, prepared} <- Xandra.prepare(conn, query),
{:ok, %Xandra.Page{} = page} <-
Xandra.execute(conn, prepared, %{"realm_name" => realm_name}, consistency: :quorum) do
if Enum.count(page) > 0 do
{:ok, true}
else
{:ok, false}
end
else
{:error, reason} ->
_ =
Logger.warn("Cannot check if realm exists: #{inspect(reason)}.",
tag: "is_realm_existing_error",
realm: realm_name
)
{:error, reason}
end
end
defp get_public_key(conn, realm_name) do
statement = """
SELECT blobAsVarchar(value)
FROM :realm_name.kv_store
WHERE group='auth' AND key='jwt_public_key_pem';
"""
with :ok <- validate_realm_name(realm_name),
query = String.replace(statement, ":realm_name", realm_name),
{:ok, %Xandra.Page{} = page} <- Xandra.execute(conn, query, %{}, consistency: :quorum) do
case Enum.fetch(page, 0) do
{:ok, %{"system.blobasvarchar(value)" => public_key}} ->
{:ok, public_key}
:error ->
{:error, :public_key_not_found}
end
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
{:error, reason} ->
_ =
Logger.warn("Cannot get public key: #{inspect(reason)}.",
tag: "get_public_key_error",
realm: realm_name
)
{:error, reason}
end
end
defp get_realm_replication(conn, realm_name) do
query = """
SELECT replication
FROM system_schema.keyspaces
WHERE keyspace_name=:realm_name
"""
with {:ok, prepared} <- Xandra.prepare(conn, query),
{:ok, page} <- Xandra.execute(conn, prepared, %{"realm_name" => realm_name}) do
case Enum.fetch(page, 0) do
{:ok, %{"replication" => replication_map}} ->
{:ok, replication_map}
:error ->
# Something really wrong here, but we still cover this
_ =
Logger.error("Cannot find realm replication.",
tag: "realm_replication_not_found",
realm: realm_name
)
{:error, :realm_replication_not_found}
end
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
# Replication factor of 1 is always ok
defp check_replication(_conn, 1) do
:ok
end
# If replication factor is an integer, we're using SimpleStrategy
# Check that the replication factor is <= the number of nodes in the same datacenter
defp check_replication(conn, replication_factor)
when is_integer(replication_factor) and replication_factor > 1 do
with {:ok, local_datacenter} <- get_local_datacenter(conn) do
check_replication_for_datacenter(conn, local_datacenter, replication_factor, local: true)
end
end
defp check_replication(conn, datacenter_replication_factors)
when is_map(datacenter_replication_factors) do
with {:ok, local_datacenter} <- get_local_datacenter(conn) do
Enum.reduce_while(datacenter_replication_factors, :ok, fn
{datacenter, replication_factor}, _acc ->
opts =
if datacenter == local_datacenter do
[local: true]
else
[]
end
with {:valid_dc_name, true} <-
{:valid_dc_name, Regex.match?(@datacenter_name_regex, datacenter)},
:ok <-
check_replication_for_datacenter(conn, datacenter, replication_factor, opts) do
{:cont, :ok}
else
{:valid_dc_name, false} ->
{:halt, {:error, :invalid_datacenter_name}}
{:error, reason} ->
{:halt, {:error, reason}}
end
end)
end
end
defp get_local_datacenter(conn) do
query = """
SELECT data_center
FROM system.local;
"""
with {:ok, %Xandra.Page{} = page} <- Xandra.execute(conn, query) do
case Enum.fetch(page, 0) do
{:ok, %{"data_center" => datacenter}} ->
{:ok, datacenter}
:error ->
_ =
Logger.error(
"Empty dataset while getting local datacenter, something is really wrong.",
tag: "get_local_datacenter_error"
)
{:error, :local_datacenter_not_found}
end
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
defp check_replication_for_datacenter(conn, datacenter, replication_factor, opts) do
query = """
SELECT COUNT(*)
FROM system.peers
WHERE data_center=:data_center
ALLOW FILTERING;
"""
with {:ok, prepared} <- Xandra.prepare(conn, query),
{:ok, %Xandra.Page{} = page} <-
Xandra.execute(conn, prepared, %{"data_center" => datacenter}) do
case Enum.fetch(page, 0) do
:error ->
_ =
Logger.warn("Cannot retrieve node count for datacenter #{datacenter}.",
tag: "datacenter_not_found",
datacenter: datacenter
)
{:error, :datacenter_not_found}
{:ok, %{"count" => dc_node_count}} ->
# If we're querying the datacenter of the local node, add 1 (itself) to the count
actual_node_count =
if opts[:local] do
dc_node_count + 1
else
dc_node_count
end
if replication_factor <= actual_node_count do
:ok
else
_ =
Logger.warn(
"Trying to set replication_factor #{replication_factor} " <>
"in datacenter #{datacenter} that has #{actual_node_count} nodes.",
tag: "invalid_replication_factor",
datacenter: datacenter,
replication_factor: replication_factor
)
error_message =
"replication_factor #{replication_factor} is >= #{actual_node_count} nodes " <>
"in datacenter #{datacenter}"
{:error, {:invalid_replication, error_message}}
end
end
else
{:error, %Xandra.Error{} = err} ->
_ = Logger.warn("Database error: #{inspect(err)}.", tag: "database_error")
{:error, :database_error}
{:error, %Xandra.ConnectionError{} = err} ->
_ =
Logger.warn("Database connection error: #{inspect(err)}.",
tag: "database_connection_error"
)
{:error, :database_connection_error}
end
end
end
| 30.059837 | 111 | 0.594221 |
732db31d3045ab631514b9db8f2357dcc9b284bd
| 1,864 |
ex
|
Elixir
|
clients/admin/lib/google_api/admin/directory_v1/model/batch_delete_printers_response.ex
|
pojiro/elixir-google-api
|
928496a017d3875a1929c6809d9221d79404b910
|
[
"Apache-2.0"
] | 1 |
2021-12-20T03:40:53.000Z
|
2021-12-20T03:40:53.000Z
|
clients/admin/lib/google_api/admin/directory_v1/model/batch_delete_printers_response.ex
|
pojiro/elixir-google-api
|
928496a017d3875a1929c6809d9221d79404b910
|
[
"Apache-2.0"
] | 1 |
2020-08-18T00:11:23.000Z
|
2020-08-18T00:44:16.000Z
|
clients/admin/lib/google_api/admin/directory_v1/model/batch_delete_printers_response.ex
|
pojiro/elixir-google-api
|
928496a017d3875a1929c6809d9221d79404b910
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Admin.Directory_v1.Model.BatchDeletePrintersResponse do
@moduledoc """
Response for deleting existing printers in batch.
## Attributes
* `failedPrinters` (*type:* `list(GoogleApi.Admin.Directory_v1.Model.FailureInfo.t)`, *default:* `nil`) - A list of update failures.
* `printerIds` (*type:* `list(String.t)`, *default:* `nil`) - A list of Printer.id that were successfully deleted.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:failedPrinters => list(GoogleApi.Admin.Directory_v1.Model.FailureInfo.t()) | nil,
:printerIds => list(String.t()) | nil
}
field(:failedPrinters, as: GoogleApi.Admin.Directory_v1.Model.FailureInfo, type: :list)
field(:printerIds, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.Admin.Directory_v1.Model.BatchDeletePrintersResponse do
def decode(value, options) do
GoogleApi.Admin.Directory_v1.Model.BatchDeletePrintersResponse.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Admin.Directory_v1.Model.BatchDeletePrintersResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 37.28 | 136 | 0.744635 |
732dc4d82a75c345517dc560f8fa79a48506fce4
| 211 |
exs
|
Elixir
|
app/priv/repo/migrations/20200725173349_add_status_to_recording.exs
|
nathanjohnson320/noodl
|
2e449aab15b54fc5a1dc45ebf4b79e7b64b7c967
|
[
"MIT"
] | 1 |
2021-01-20T20:00:50.000Z
|
2021-01-20T20:00:50.000Z
|
app/priv/repo/migrations/20200725173349_add_status_to_recording.exs
|
nathanjohnson320/noodl
|
2e449aab15b54fc5a1dc45ebf4b79e7b64b7c967
|
[
"MIT"
] | null | null | null |
app/priv/repo/migrations/20200725173349_add_status_to_recording.exs
|
nathanjohnson320/noodl
|
2e449aab15b54fc5a1dc45ebf4b79e7b64b7c967
|
[
"MIT"
] | null | null | null |
defmodule Noodl.Repo.Migrations.AddStatusToRecording do
use Ecto.Migration
def change do
alter table(:recordings) do
add :status, :string
end
create index(:recordings, :status)
end
end
| 17.583333 | 55 | 0.7109 |
732dd3ae0c40ee60738459a0bbfe53a49f27bd64
| 469 |
exs
|
Elixir
|
.workshop/exercises/hello_world/test/test_helper.exs
|
silesian-beamers/elixir-from-the-ground-up
|
1ad8c2a4d429175461dc45e218849eb6a212c776
|
[
"MIT"
] | 10 |
2015-12-13T07:29:08.000Z
|
2016-09-22T03:47:35.000Z
|
.workshop/exercises/hello_world/test/test_helper.exs
|
silesian-beamers/elixir-from-the-ground-up
|
1ad8c2a4d429175461dc45e218849eb6a212c776
|
[
"MIT"
] | 4 |
2015-12-02T12:12:14.000Z
|
2016-01-11T07:33:24.000Z
|
.workshop/exercises/hello_world/test/test_helper.exs
|
silesian-beamers/elixir-from-the-ground-up
|
1ad8c2a4d429175461dc45e218849eb6a212c776
|
[
"MIT"
] | null | null | null |
defmodule Workshop.Exercise.HelloWorldCheck.Helper do
def exec(solution_dir) do
# Locate and load and perhaps start the users solution.
# The following example assumes that the user solution is located
# in a file called *exercise.exs*:
"greeter.exs"
|> Path.expand(solution_dir)
|> Code.require_file
# load and run the solution checker
Code.require_file("check.exs", __DIR__)
Workshop.Exercise.HelloWorldCheck.run()
end
end
| 26.055556 | 69 | 0.720682 |
732dd78a61e1b55418ddc76b4643a8da90d4c940
| 606 |
ex
|
Elixir
|
lib/grizzly/zwave/commands/version_get.ex
|
jellybob/grizzly
|
290bee04cb16acbb9dc996925f5c501697b7ac94
|
[
"Apache-2.0"
] | 76 |
2019-09-04T16:56:58.000Z
|
2022-03-29T06:54:36.000Z
|
lib/grizzly/zwave/commands/version_get.ex
|
jellybob/grizzly
|
290bee04cb16acbb9dc996925f5c501697b7ac94
|
[
"Apache-2.0"
] | 124 |
2019-09-05T14:01:24.000Z
|
2022-02-28T22:58:14.000Z
|
lib/grizzly/zwave/commands/version_get.ex
|
jellybob/grizzly
|
290bee04cb16acbb9dc996925f5c501697b7ac94
|
[
"Apache-2.0"
] | 10 |
2019-10-23T19:25:45.000Z
|
2021-11-17T13:21:20.000Z
|
defmodule Grizzly.ZWave.Commands.VersionGet do
@moduledoc """
This module implements command VERSION_GET of command class
COMMAND_CLASS_VERSION
Params: - none -
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave.Command
alias Grizzly.ZWave.CommandClasses.Version
@impl true
def new(_opts \\ []) do
command = %Command{
name: :version_get,
command_byte: 0x11,
command_class: Version,
impl: __MODULE__
}
{:ok, command}
end
@impl true
def encode_params(_command), do: <<>>
@impl true
def decode_params(_binary), do: {:ok, []}
end
| 18.363636 | 61 | 0.671617 |
732ddb0e3ea39790a4707306c44d671589356022
| 1,268 |
exs
|
Elixir
|
exercises/matrix/test/matrix_test.exs
|
DuoPan/elixir
|
e96388f242c383c1f45935570ed2f42394171fc6
|
[
"MIT"
] | 2 |
2019-07-09T05:23:38.000Z
|
2019-07-29T01:39:59.000Z
|
exercises/matrix/test/matrix_test.exs
|
DuoPan/elixir
|
e96388f242c383c1f45935570ed2f42394171fc6
|
[
"MIT"
] | null | null | null |
exercises/matrix/test/matrix_test.exs
|
DuoPan/elixir
|
e96388f242c383c1f45935570ed2f42394171fc6
|
[
"MIT"
] | null | null | null |
defmodule MatrixTest do
use ExUnit.Case
@input "1 2 3\n4 5 6\n7 8 9"
# @tag :pending
test "reading from and writing to string" do
matrix = Matrix.from_string(@input)
assert Matrix.to_string(matrix) == @input
end
@tag :pending
test "rows should return nested lists regardless of internal structure" do
matrix = Matrix.from_string(@input)
assert Matrix.rows(matrix) == [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
end
@tag :pending
test "row should return list at index" do
matrix = Matrix.from_string(@input)
assert Matrix.row(matrix, 0) == [1, 2, 3]
assert Matrix.row(matrix, 1) == [4, 5, 6]
assert Matrix.row(matrix, 2) == [7, 8, 9]
end
@tag :pending
test "columns should return nested lists regardless of internal structure" do
matrix = Matrix.from_string(@input)
assert Matrix.columns(matrix) == [
[1, 4, 7],
[2, 5, 8],
[3, 6, 9]
]
end
@tag :pending
test "column should return list at index" do
matrix = Matrix.from_string(@input)
assert Matrix.column(matrix, 0) == [1, 4, 7]
assert Matrix.column(matrix, 1) == [2, 5, 8]
assert Matrix.column(matrix, 2) == [3, 6, 9]
end
end
| 24.384615 | 79 | 0.585962 |
732de92dd7765a89dc563ff3fc85c1182f0c9c1d
| 442 |
ex
|
Elixir
|
lib/hologram/compiler/decoders/map_type_decoder.ex
|
gregjohnsonsaltaire/hologram
|
aa8e9ea0d599def864c263cc37cc8ee31f02ac4a
|
[
"MIT"
] | 40 |
2022-01-19T20:27:36.000Z
|
2022-03-31T18:17:41.000Z
|
lib/hologram/compiler/decoders/map_type_decoder.ex
|
gregjohnsonsaltaire/hologram
|
aa8e9ea0d599def864c263cc37cc8ee31f02ac4a
|
[
"MIT"
] | 42 |
2022-02-03T22:52:43.000Z
|
2022-03-26T20:57:32.000Z
|
lib/hologram/compiler/decoders/map_type_decoder.ex
|
gregjohnsonsaltaire/hologram
|
aa8e9ea0d599def864c263cc37cc8ee31f02ac4a
|
[
"MIT"
] | 3 |
2022-02-10T04:00:37.000Z
|
2022-03-08T22:07:45.000Z
|
defmodule Hologram.Compiler.MapTypeDecoder do
alias Hologram.Compiler.Decoder
def decode(%{"data" => data}) do
Enum.map(data, fn {key, value} -> {decode_key(key), Decoder.decode(value)} end)
|> Enum.into(%{})
end
defp decode_key(key) do
[_, type, value] =
~r/~(\w+)\[(.+)\]/
|> Regex.run(key)
case type do
"atom" ->
String.to_atom(value)
"string" ->
value
end
end
end
| 19.217391 | 83 | 0.561086 |
732df914deae73d74c20da5aaff9c83d53590001
| 157 |
exs
|
Elixir
|
machine_translation/MorpHIN/Learned/Resources/Set5/TrainingInstances/27.exs
|
AdityaPrasadMishra/NLP--Project-Group-16
|
fb62cc6a1db4a494058171f11c14a2be3933a9a1
|
[
"MIT"
] | null | null | null |
machine_translation/MorpHIN/Learned/Resources/Set5/TrainingInstances/27.exs
|
AdityaPrasadMishra/NLP--Project-Group-16
|
fb62cc6a1db4a494058171f11c14a2be3933a9a1
|
[
"MIT"
] | null | null | null |
machine_translation/MorpHIN/Learned/Resources/Set5/TrainingInstances/27.exs
|
AdityaPrasadMishra/NLP--Project-Group-16
|
fb62cc6a1db4a494058171f11c14a2be3933a9a1
|
[
"MIT"
] | null | null | null |
**EXAMPLE FILE**
verb_aux pnoun noun cm P_wh;
noun noun adjective noun P_wh;
cm noun verb cm P_wh;
verb conj nst verb P_wh;
conj pn SYM particle P_wh;
| 19.625 | 31 | 0.732484 |
732e14bdb22a18a7c275e729351993017a858cee
| 2,742 |
ex
|
Elixir
|
lib/2020/day19.ex
|
hallski/adventofcode
|
03efb385688e8072b0b44d35012297833498f799
|
[
"MIT"
] | null | null | null |
lib/2020/day19.ex
|
hallski/adventofcode
|
03efb385688e8072b0b44d35012297833498f799
|
[
"MIT"
] | null | null | null |
lib/2020/day19.ex
|
hallski/adventofcode
|
03efb385688e8072b0b44d35012297833498f799
|
[
"MIT"
] | null | null | null |
defmodule AdventOfCode.Y2020.Day19 do
import ExProf.Macro
alias AdventOfCode.Y2020.Day19.Parser
def run1() do
AdventOfCode.Helpers.Data.read_from_file_no_split("2020/day19.txt")
|> Parser.parse()
|> count_valid()
end
def run2() do
AdventOfCode.Helpers.Data.read_from_file_no_split("2020/day19.txt")
|> Parser.parse()
|> patch_rules()
|> count_valid()
end
def profiled_run2() do
profile do
run2()
end
|> (fn {_, res} -> res end).()
end
def patch_rules(%{rules: rules, messages: messages}) do
new_rules =
rules
|> Map.put("8", Parser.parse_rule("42 | 42 8"))
|> Map.put("11", Parser.parse_rule("42 31 | 42 11 31"))
%{rules: new_rules, messages: messages}
end
def count_valid(%{messages: messages, rules: rules}) do
zero = Map.get(rules, "0")
messages
|> Task.async_stream(fn msg -> validate(msg, zero, rules) end)
|> Stream.filter(fn {:ok, result} -> result end)
|> Enum.count()
end
def validate([], [], _rules), do: true
def validate([], _rule, _rules), do: false
def validate(_str, [], _rules), do: false
def validate([first | unprocessed], [{:char, char} | rest], rules) do
if first == char, do: validate(unprocessed, rest, rules), else: false
end
def validate(str, [[a, b] | rest], rules) when is_list(a) and is_list(b) do
validate(str, [a | rest], rules) or validate(str, [b | rest], rules)
end
def validate(str, [next | rest], rules) when is_list(next) do
validate(str, next ++ rest, rules)
end
def validate(str, [next | rest], rules) when is_binary(next) do
rule = Map.get(rules, next)
validate(str, [rule | rest], rules)
end
end
defmodule AdventOfCode.Y2020.Day19.Parser do
def parse(input) do
input
|> String.split("\n\n", trim: true)
|> parse_rules_and_messages()
end
def parse_rules_and_messages([rules, messages]) do
%{rules: parse_rules(rules), messages: parse_messages(messages)}
end
def parse_rules(rules) do
rules
|> String.split("\n", trim: true)
|> Enum.map(&parse_rule_line/1)
|> Map.new()
end
def parse_rule_line(rule) do
[number, rule_line] = String.split(rule, ":", trim: true)
rule = parse_rule(rule_line)
{number, rule}
end
def parse_rule(rule) do
rule
|> String.split("|", trim: true)
|> Stream.map(&String.trim/1)
|> Stream.map(&parse_case/1)
|> Enum.to_list()
end
def parse_case(<<"\"", char::binary-size(1), "\"">>) do
{:char, char}
end
def parse_case(list) do
list |> String.split(" ", trim: true)
end
def parse_messages(messages) do
messages
|> String.split("\n", trim: true)
|> Enum.map(&String.graphemes/1)
end
end
| 24.052632 | 77 | 0.62655 |
732e54882d288f0222ed99d09f3a55f7d338b108
| 343 |
exs
|
Elixir
|
priv/repo/migrations/20150806111137_create_contestant.exs
|
thommay/bakeoff
|
4c6cca860c45371f04021c3afceb4fe2e0b8d380
|
[
"MIT"
] | null | null | null |
priv/repo/migrations/20150806111137_create_contestant.exs
|
thommay/bakeoff
|
4c6cca860c45371f04021c3afceb4fe2e0b8d380
|
[
"MIT"
] | null | null | null |
priv/repo/migrations/20150806111137_create_contestant.exs
|
thommay/bakeoff
|
4c6cca860c45371f04021c3afceb4fe2e0b8d380
|
[
"MIT"
] | null | null | null |
defmodule Bakeoff.Repo.Migrations.CreateContestant do
use Ecto.Migration
def change do
create table(:contestants) do
add :name, :string
add :out, :boolean, default: false
add :knockedout, :integer
add :user_id, references(:users)
timestamps
end
create index(:contestants, [:user_id])
end
end
| 20.176471 | 53 | 0.670554 |
732e862f303046d3b032ed76c81ceff2844ff7e8
| 1,982 |
ex
|
Elixir
|
apps/omg_performance/lib/block_creator.ex
|
hoardexchange/elixir-omg
|
423528699d467f1cc0d02c596290ab907af38c2c
|
[
"Apache-2.0"
] | null | null | null |
apps/omg_performance/lib/block_creator.ex
|
hoardexchange/elixir-omg
|
423528699d467f1cc0d02c596290ab907af38c2c
|
[
"Apache-2.0"
] | null | null | null |
apps/omg_performance/lib/block_creator.ex
|
hoardexchange/elixir-omg
|
423528699d467f1cc0d02c596290ab907af38c2c
|
[
"Apache-2.0"
] | 2 |
2020-06-07T11:14:54.000Z
|
2020-08-02T07:36:32.000Z
|
# Copyright 2018 OmiseGO Pte Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule OMG.Performance.BlockCreator do
@moduledoc """
Module simulates forming new block on the child chain at specified time intervals
"""
use GenServer
use OMG.API.LoggerExt
@initial_block_number 1000
@doc """
Starts the process. Only one process of BlockCreator can be started.
"""
def start_link(block_every_ms) do
GenServer.start_link(__MODULE__, {@initial_block_number, block_every_ms}, name: __MODULE__)
end
@doc """
Initializes the process with @initial_block_number stored in the process state.
Reschedules call to itself wchich starts block forming loop.
"""
@spec init({integer, integer}) :: {:ok, {integer, integer}}
def init({blknum, block_every_ms}) do
_ = Logger.debug("init called with args: '#{inspect(blknum)}'")
reschedule_task(block_every_ms)
{:ok, {blknum, block_every_ms}}
end
@doc """
Forms new block, reports time consumed by API response and reschedule next call
in @request_block_creation_every_ms milliseconds.
"""
def handle_info(:do, {blknum, block_every_ms}) do
child_block_interval = 1000
OMG.API.State.form_block()
OMG.Performance.SenderManager.block_forming_time(blknum, 0)
reschedule_task(block_every_ms)
{:noreply, {blknum + child_block_interval, block_every_ms}}
end
defp reschedule_task(block_every_ms) do
Process.send_after(self(), :do, block_every_ms)
end
end
| 32.491803 | 95 | 0.74218 |
732e9a83423c32f7f1fd3acec3305f43dc21b583
| 118 |
exs
|
Elixir
|
deps/gen_stage/.formatter.exs
|
fast-radius/kinesis
|
f98e1792c650c18bb1967d1067e77fe365245f2a
|
[
"MIT"
] | null | null | null |
deps/gen_stage/.formatter.exs
|
fast-radius/kinesis
|
f98e1792c650c18bb1967d1067e77fe365245f2a
|
[
"MIT"
] | null | null | null |
deps/gen_stage/.formatter.exs
|
fast-radius/kinesis
|
f98e1792c650c18bb1967d1067e77fe365245f2a
|
[
"MIT"
] | null | null | null |
[
inputs: ["{mix,.formatter}.exs", "{lib,test}/**/*.{ex,exs}"],
locals_without_parens: [
assert_kill: 2
]
]
| 16.857143 | 63 | 0.559322 |
732e9cc6aa3611186b5e9d9f85c7a1f9abe810e8
| 185 |
exs
|
Elixir
|
priv/repo/migrations/20190124214848_add_covert_extension_to_games.exs
|
shanesveller/grapevine
|
fe74ade1adff88dfe4c1ab55fee3902dbb4664fe
|
[
"MIT"
] | null | null | null |
priv/repo/migrations/20190124214848_add_covert_extension_to_games.exs
|
shanesveller/grapevine
|
fe74ade1adff88dfe4c1ab55fee3902dbb4664fe
|
[
"MIT"
] | null | null | null |
priv/repo/migrations/20190124214848_add_covert_extension_to_games.exs
|
shanesveller/grapevine
|
fe74ade1adff88dfe4c1ab55fee3902dbb4664fe
|
[
"MIT"
] | null | null | null |
defmodule Grapevine.Repo.Migrations.AddCovertExtensionToGames do
use Ecto.Migration
def change do
alter table(:games) do
add(:cover_extension, :string)
end
end
end
| 18.5 | 64 | 0.735135 |
732eb7fc1c6ce7d0facb3d828bde1b1ce7582824
| 2,010 |
ex
|
Elixir
|
clients/monitoring/lib/google_api/monitoring/v3/model/linear.ex
|
matehat/elixir-google-api
|
c1b2523c2c4cdc9e6ca4653ac078c94796b393c3
|
[
"Apache-2.0"
] | 1 |
2018-12-03T23:43:10.000Z
|
2018-12-03T23:43:10.000Z
|
clients/monitoring/lib/google_api/monitoring/v3/model/linear.ex
|
matehat/elixir-google-api
|
c1b2523c2c4cdc9e6ca4653ac078c94796b393c3
|
[
"Apache-2.0"
] | null | null | null |
clients/monitoring/lib/google_api/monitoring/v3/model/linear.ex
|
matehat/elixir-google-api
|
c1b2523c2c4cdc9e6ca4653ac078c94796b393c3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Monitoring.V3.Model.Linear do
@moduledoc """
Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-1): offset + (width * i). Lower bound (1 <= i < N): offset + (width * (i - 1)).
## Attributes
* `numFiniteBuckets` (*type:* `integer()`, *default:* `nil`) - Must be greater than 0.
* `offset` (*type:* `float()`, *default:* `nil`) - Lower bound of the first bucket.
* `width` (*type:* `float()`, *default:* `nil`) - Must be greater than 0.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:numFiniteBuckets => integer(),
:offset => float(),
:width => float()
}
field(:numFiniteBuckets)
field(:offset)
field(:width)
end
defimpl Poison.Decoder, for: GoogleApi.Monitoring.V3.Model.Linear do
def decode(value, options) do
GoogleApi.Monitoring.V3.Model.Linear.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Monitoring.V3.Model.Linear do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 37.924528 | 384 | 0.70398 |
732eb99377f741d2fc4fd9801630ac38efe5d823
| 38,777 |
exs
|
Elixir
|
test/mix/tasks/phx.gen.auth_test.exs
|
samkenxstream/phoenix
|
18433f95a90fc948e1d6d0f5606dfa339fdc6d61
|
[
"MIT"
] | 1 |
2022-02-09T00:31:50.000Z
|
2022-02-09T00:31:50.000Z
|
test/mix/tasks/phx.gen.auth_test.exs
|
samkenxstream/phoenix
|
18433f95a90fc948e1d6d0f5606dfa339fdc6d61
|
[
"MIT"
] | null | null | null |
test/mix/tasks/phx.gen.auth_test.exs
|
samkenxstream/phoenix
|
18433f95a90fc948e1d6d0f5606dfa339fdc6d61
|
[
"MIT"
] | 1 |
2018-10-01T19:42:35.000Z
|
2018-10-01T19:42:35.000Z
|
Code.require_file "../../../installer/test/mix_helper.exs", __DIR__
defmodule Mix.Tasks.Phx.Gen.AuthTest do
use ExUnit.Case
@moduletag :mix_phx_new
import MixHelper
alias Mix.Tasks.Phx.Gen
setup do
Mix.Task.clear()
:ok
end
defp in_tmp_phx_project(test, additional_args \\ [], func) do
in_tmp(test, fn ->
Mix.Tasks.Phx.New.run(~w(my_app --no-install) ++ additional_args)
in_project(:my_app, "my_app", fn _module ->
func.()
end)
end)
end
defp in_tmp_phx_umbrella_project(test, func) do
in_tmp(test, fn ->
Mix.Tasks.Phx.New.run(~w(my_app --umbrella --no-install))
File.cd!("my_app_umbrella", fn ->
func.()
end)
end)
end
test "invalid mix arguments", config do
in_tmp_phx_project(config.test, fn ->
assert_raise Mix.Error, ~r/Expected the context, "accounts", to be a valid module name.*phx\.gen\.auth/s, fn ->
Gen.Auth.run(~w(accounts User users))
end
assert_raise Mix.Error, ~r/Expected the schema, "user", to be a valid module name/, fn ->
Gen.Auth.run(~w(Accounts user users))
end
assert_raise Mix.Error, ~r/The context and schema should have different names/, fn ->
Gen.Auth.run(~w(User User users))
end
assert_raise Mix.Error, ~r/Cannot generate context MyApp because it has the same name as the application/, fn ->
Gen.Auth.run(~w(MyApp User users))
end
assert_raise Mix.Error, ~r/Cannot generate schema MyApp because it has the same name as the application/, fn ->
Gen.Auth.run(~w(Accounts MyApp users))
end
assert_raise Mix.Error, ~r/Invalid arguments/, fn ->
Gen.Auth.run(~w())
end
assert_raise Mix.Error, ~r/Invalid arguments/, fn ->
Gen.Auth.run(~w(Accounts))
end
assert_raise Mix.Error, ~r/Invalid arguments.*phx\.gen\.auth/s, fn ->
Gen.Auth.run(~w(Accounts User users name:string))
end
assert_raise OptionParser.ParseError, ~r/unknown option/i, fn ->
Gen.Auth.run(~w(Accounts User users --no-schema))
end
assert_raise Mix.Error, ~r/Unknown value for --hashing-lib/, fn ->
Gen.Auth.run(~w(Accounts User users --hashing-lib unknown))
end
end)
end
test "generates with defaults", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "config/test.exs", fn file ->
assert file =~ "config :bcrypt_elixir, :log_rounds, 1"
end
assert_file "lib/my_app/accounts.ex"
assert_file "lib/my_app/accounts/user.ex"
assert_file "lib/my_app/accounts/user_token.ex"
assert_file "lib/my_app/accounts/user_notifier.ex", fn file ->
assert file =~ "defmodule MyApp.Accounts.UserNotifier do"
assert file =~ "import Swoosh.Email"
assert file =~ "Mailer.deliver(email)"
assert file =~ ~s|deliver(user.email, "Confirmation instructions",|
assert file =~ ~s|deliver(user.email, "Reset password instructions",|
assert file =~ ~s|deliver(user.email, "Update email instructions",|
end
assert_file "test/my_app/accounts_test.exs"
assert_file "test/support/fixtures/accounts_fixtures.ex"
assert_file "lib/my_app_web/controllers/user_auth.ex"
assert_file "test/my_app_web/controllers/user_auth_test.exs"
assert_file "lib/my_app_web/views/user_confirmation_view.ex"
assert_file "lib/my_app_web/templates/user_confirmation/new.html.heex"
assert_file "lib/my_app_web/controllers/user_confirmation_controller.ex"
assert_file "test/my_app_web/controllers/user_confirmation_controller_test.exs"
assert_file "lib/my_app_web/templates/layout/_user_menu.html.heex"
assert_file "lib/my_app_web/controllers/user_registration_controller.ex"
assert_file "lib/my_app_web/views/user_registration_view.ex"
assert_file "test/my_app_web/controllers/user_registration_controller_test.exs"
assert_file "lib/my_app_web/controllers/user_reset_password_controller.ex"
assert_file "lib/my_app_web/templates/user_reset_password/edit.html.heex"
assert_file "lib/my_app_web/templates/user_reset_password/new.html.heex"
assert_file "lib/my_app_web/views/user_reset_password_view.ex"
assert_file "test/my_app_web/controllers/user_reset_password_controller_test.exs"
assert_file "lib/my_app_web/controllers/user_session_controller.ex"
assert_file "lib/my_app_web/templates/user_session/new.html.heex"
assert_file "test/my_app_web/controllers/user_session_controller_test.exs"
assert_file "lib/my_app_web/views/user_session_view.ex"
assert_file "lib/my_app_web/controllers/user_settings_controller.ex"
assert_file "lib/my_app_web/templates/user_settings/edit.html.heex"
assert_file "lib/my_app_web/views/user_settings_view.ex"
assert_file "test/my_app_web/controllers/user_settings_controller_test.exs"
assert [migration] = Path.wildcard("priv/repo/migrations/*_create_users_auth_tables.exs")
assert_file migration, fn file ->
assert file =~ "create table(:users) do"
assert file =~ "create table(:users_tokens) do"
end
assert_file "mix.exs", fn file ->
assert file =~ ~s|{:bcrypt_elixir, "~> 2.0"},|
end
assert_file "lib/my_app_web/router.ex", fn file ->
assert file =~ "import MyAppWeb.UserAuth"
assert file =~ "plug :fetch_current_user"
assert file =~ """
## Authentication routes
scope "/", MyAppWeb do
pipe_through [:browser, :redirect_if_user_is_authenticated]
get "/users/register", UserRegistrationController, :new
post "/users/register", UserRegistrationController, :create
get "/users/log_in", UserSessionController, :new
post "/users/log_in", UserSessionController, :create
get "/users/reset_password", UserResetPasswordController, :new
post "/users/reset_password", UserResetPasswordController, :create
get "/users/reset_password/:token", UserResetPasswordController, :edit
put "/users/reset_password/:token", UserResetPasswordController, :update
end
scope "/", MyAppWeb do
pipe_through [:browser, :require_authenticated_user]
get "/users/settings", UserSettingsController, :edit
put "/users/settings", UserSettingsController, :update
get "/users/settings/confirm_email/:token", UserSettingsController, :confirm_email
end
scope "/", MyAppWeb do
pipe_through [:browser]
delete "/users/log_out", UserSessionController, :delete
get "/users/confirm", UserConfirmationController, :new
post "/users/confirm", UserConfirmationController, :create
get "/users/confirm/:token", UserConfirmationController, :edit
post "/users/confirm/:token", UserConfirmationController, :update
end
"""
end
assert_file "lib/my_app_web/templates/layout/root.html.heex", fn file ->
assert file =~ ~s|<%= render "_user_menu.html", assigns %>|
end
assert_file "test/support/conn_case.ex", fn file ->
assert file =~ "def register_and_log_in_user(%{conn: conn})"
assert file =~ "def log_in_user(conn, user)"
end
assert_received {:mix_shell, :info, ["Unable to find the \"MyApp.Mailer\"" <> mailer_notice]}
assert mailer_notice =~ ~s(A mailer module like the following is expected to be defined)
assert mailer_notice =~ ~s(in your application in order to send emails.)
assert mailer_notice =~ ~s(defmodule MyApp.Mailer do)
assert mailer_notice =~ ~s(use Swoosh.Mailer, otp_app: :my_app)
assert mailer_notice =~ ~s(def deps do)
assert mailer_notice =~ ~s(https://hexdocs.pm/swoosh)
end)
end
test "works with apps generated with --live", config do
in_tmp_phx_project(config.test, ~w(--live), fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "lib/my_app_web/templates/layout/root.html.heex", fn file ->
assert file =~ ~s|<%= render "_user_menu.html", assigns %>|
end
assert_file "lib/my_app_web/templates/layout/app.html.heex", fn file ->
refute file =~ ~s|<%= render "_user_menu.html", assigns %>|
end
end)
end
test "generates with --web option", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users --web warehouse),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "lib/my_app/accounts.ex"
assert_file "lib/my_app/accounts/user.ex"
assert_file "lib/my_app/accounts/user_token.ex"
assert_file "lib/my_app/accounts/user_notifier.ex"
assert_file "test/my_app/accounts_test.exs"
assert_file "test/support/fixtures/accounts_fixtures.ex", fn file ->
assert file =~ ~s|def valid_user_attributes(attrs \\\\ %{}) do|
end
assert_file "lib/my_app_web/controllers/warehouse/user_auth.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserAuth do"
end
assert_file "test/my_app_web/controllers/warehouse/user_auth_test.exs", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserAuthTest do"
end
assert_file "lib/my_app_web/views/warehouse/user_confirmation_view.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserConfirmationView do"
end
assert_file "lib/my_app_web/templates/warehouse/user_confirmation/new.html.heex", fn file ->
assert file =~ ~S|<.form let={f} for={:user} action={Routes.warehouse_user_confirmation_path(@conn, :create)}>|
assert file =~ ~S|<%= link "Register", to: Routes.warehouse_user_registration_path(@conn, :new) %>|
assert file =~ ~S|<%= link "Log in", to: Routes.warehouse_user_session_path(@conn, :new) %>|
end
assert_file "lib/my_app_web/controllers/warehouse/user_confirmation_controller.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserConfirmationController do"
end
assert_file "test/my_app_web/controllers/warehouse/user_confirmation_controller_test.exs", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserConfirmationControllerTest do"
end
assert_file "lib/my_app_web/templates/layout/_user_menu.html.heex", fn file ->
assert file =~ ~S|<%= link "Settings", to: Routes.warehouse_user_settings_path(@conn, :edit) %>|
assert file =~ ~S|<%= link "Log out", to: Routes.warehouse_user_session_path(@conn, :delete), method: :delete %>|
assert file =~ ~S|<%= link "Register", to: Routes.warehouse_user_registration_path(@conn, :new) %>|
assert file =~ ~S|<%= link "Log in", to: Routes.warehouse_user_session_path(@conn, :new) %>|
end
assert_file "lib/my_app_web/controllers/warehouse/user_registration_controller.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserRegistrationController do"
end
assert_file "lib/my_app_web/views/warehouse/user_registration_view.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserRegistrationView do"
end
assert_file "test/my_app_web/controllers/warehouse/user_registration_controller_test.exs", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserRegistrationControllerTest do"
end
assert_file "lib/my_app_web/controllers/warehouse/user_reset_password_controller.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserResetPasswordController do"
end
assert_file "lib/my_app_web/templates/warehouse/user_reset_password/edit.html.heex", fn file ->
assert file =~ ~S|<.form let={f} for={@changeset} action={Routes.warehouse_user_reset_password_path(@conn, :update, @token)}>|
assert file =~ ~S|<%= link "Register", to: Routes.warehouse_user_registration_path(@conn, :new) %>|
assert file =~ ~S|<%= link "Log in", to: Routes.warehouse_user_session_path(@conn, :new) %>|
end
assert_file "lib/my_app_web/templates/warehouse/user_reset_password/new.html.heex", fn file ->
assert file =~ ~S|<.form let={f} for={:user} action={Routes.warehouse_user_reset_password_path(@conn, :create)}>|
assert file =~ ~S|<%= link "Register", to: Routes.warehouse_user_registration_path(@conn, :new) %>|
assert file =~ ~S|<%= link "Log in", to: Routes.warehouse_user_session_path(@conn, :new) %>|
end
assert_file "lib/my_app_web/views/warehouse/user_reset_password_view.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserResetPasswordView do"
end
assert_file "test/my_app_web/controllers/warehouse/user_reset_password_controller_test.exs", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserResetPasswordControllerTest do"
end
assert_file "lib/my_app_web/controllers/warehouse/user_session_controller.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserSessionController do"
end
assert_file "lib/my_app_web/templates/warehouse/user_session/new.html.heex", fn file ->
assert file =~ ~S|<.form let={f} for={@conn} action={Routes.warehouse_user_session_path(@conn, :create)} as={:user}>|
assert file =~ ~S|<%= link "Register", to: Routes.warehouse_user_registration_path(@conn, :new) %>|
assert file =~ ~S|<%= link "Forgot your password?", to: Routes.warehouse_user_reset_password_path(@conn, :new) %>|
end
assert_file "test/my_app_web/controllers/warehouse/user_session_controller_test.exs", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserSessionControllerTest do"
end
assert_file "lib/my_app_web/views/warehouse/user_session_view.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserSessionView do"
end
assert_file "lib/my_app_web/controllers/warehouse/user_settings_controller.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserSettingsController do"
end
assert_file "lib/my_app_web/templates/warehouse/user_settings/edit.html.heex", fn file ->
assert file =~ ~S|<.form let={f} for={@email_changeset} action={Routes.warehouse_user_settings_path(@conn, :update)} id="update_email">|
assert file =~ ~S|<.form let={f} for={@password_changeset} action={Routes.warehouse_user_settings_path(@conn, :update)} id="update_password">|
end
assert_file "lib/my_app_web/views/warehouse/user_settings_view.ex", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserSettingsView do"
end
assert_file "test/my_app_web/controllers/warehouse/user_settings_controller_test.exs", fn file ->
assert file =~ "defmodule MyAppWeb.Warehouse.UserSettingsControllerTest do"
end
assert [migration] = Path.wildcard("priv/repo/migrations/*_create_users_auth_tables.exs")
assert_file migration, fn file ->
assert file =~ "create table(:users) do"
assert file =~ "create table(:users_tokens) do"
end
assert_file "lib/my_app_web/router.ex", fn file ->
assert file =~ "import MyAppWeb.Warehouse.UserAuth"
assert file =~ "plug :fetch_current_user"
assert file =~ """
## Authentication routes
scope "/warehouse", MyAppWeb.Warehouse, as: :warehouse do
pipe_through [:browser, :redirect_if_user_is_authenticated]
get "/users/register", UserRegistrationController, :new
post "/users/register", UserRegistrationController, :create
get "/users/log_in", UserSessionController, :new
post "/users/log_in", UserSessionController, :create
get "/users/reset_password", UserResetPasswordController, :new
post "/users/reset_password", UserResetPasswordController, :create
get "/users/reset_password/:token", UserResetPasswordController, :edit
put "/users/reset_password/:token", UserResetPasswordController, :update
end
scope "/warehouse", MyAppWeb.Warehouse, as: :warehouse do
pipe_through [:browser, :require_authenticated_user]
get "/users/settings", UserSettingsController, :edit
put "/users/settings", UserSettingsController, :update
get "/users/settings/confirm_email/:token", UserSettingsController, :confirm_email
end
scope "/warehouse", MyAppWeb.Warehouse, as: :warehouse do
pipe_through [:browser]
delete "/users/log_out", UserSessionController, :delete
get "/users/confirm", UserConfirmationController, :new
post "/users/confirm", UserConfirmationController, :create
get "/users/confirm/:token", UserConfirmationController, :edit
post "/users/confirm/:token", UserConfirmationController, :update
end
"""
end
assert_file "lib/my_app_web/templates/layout/root.html.heex", fn file ->
assert file =~ ~s|<%= render "_user_menu.html", assigns %>|
end
assert_file "test/support/conn_case.ex", fn file ->
assert file =~ "def register_and_log_in_user(%{conn: conn})"
assert file =~ "def log_in_user(conn, user)"
end
end)
end
describe "--database option" do
test "when the database is postgres", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert [migration] = Path.wildcard("priv/repo/migrations/*_create_users_auth_tables.exs")
assert_file migration, fn file ->
assert file =~ ~r/execute "CREATE EXTENSION IF NOT EXISTS citext", ""$/m
assert file =~ ~r/add :email, :citext, null: false$/m
end
assert_file "test/my_app_web/controllers/user_auth_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase, async: true$/m
end
assert_file "test/my_app_web/controllers/user_confirmation_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase, async: true$/m
end
assert_file "test/my_app_web/controllers/user_registration_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase, async: true$/m
end
assert_file "test/my_app_web/controllers/user_reset_password_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase, async: true$/m
end
assert_file "test/my_app_web/controllers/user_session_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase, async: true$/m
end
assert_file "test/my_app_web/controllers/user_settings_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase, async: true$/m
end
end)
end
test "when the database is mysql", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.MyXQL, validate_dependencies?: false]
)
assert [migration] = Path.wildcard("priv/repo/migrations/*_create_users_auth_tables.exs")
assert_file migration, fn file ->
refute file =~ ~r/execute "CREATE EXTENSION IF NOT EXISTS citext", ""$/m
assert file =~ ~r/add :email, :string, null: false, size: 160$/m
end
assert_file "test/my_app_web/controllers/user_auth_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_confirmation_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_registration_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_reset_password_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_session_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_settings_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
end)
end
test "when the database is sqlite3", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.SQLite3, validate_dependencies?: false]
)
assert [migration] = Path.wildcard("priv/repo/migrations/*_create_users_auth_tables.exs")
assert_file migration, fn file ->
refute file =~ ~r/execute "CREATE EXTENSION IF NOT EXISTS citext", ""$/m
assert file =~ ~r/add :email, :string, null: false, collate: :nocase$/m
end
assert_file "test/my_app_web/controllers/user_auth_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_confirmation_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_registration_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_reset_password_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_session_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_settings_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
end)
end
test "when the database is mssql", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.TDS, validate_dependencies?: false]
)
assert [migration] = Path.wildcard("priv/repo/migrations/*_create_users_auth_tables.exs")
assert_file migration, fn file ->
refute file =~ ~r/execute "CREATE EXTENSION IF NOT EXISTS citext", ""$/m
assert file =~ ~r/add :email, :string, null: false, size: 160$/m
end
assert_file "test/my_app_web/controllers/user_auth_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_confirmation_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_registration_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_reset_password_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_session_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
assert_file "test/my_app_web/controllers/user_settings_controller_test.exs", fn file ->
assert file =~ ~r/use MyAppWeb\.ConnCase$/m
end
end)
end
end
test "supports --binary-id option", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users --binary-id),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "lib/my_app/accounts/user.ex", fn file ->
assert file =~ "@primary_key {:id, :binary_id, autogenerate: true}"
assert file =~ "@foreign_key_type :binary_id"
end
assert_file "lib/my_app/accounts/user_token.ex", fn file ->
assert file =~ "@primary_key {:id, :binary_id, autogenerate: true}"
assert file =~ "@foreign_key_type :binary_id"
end
assert [migration] = Path.wildcard("priv/repo/migrations/*_create_users_auth_tables.exs")
assert_file migration, fn file ->
assert file =~ "create table(:users, primary_key: false)"
assert file =~ "create table(:users_tokens, primary_key: false)"
assert file =~ "add :id, :binary_id, primary_key: true"
end
end)
end
describe "--hashing-lib option" do
test "when bcrypt", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users --hashing-lib bcrypt),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "mix.exs", fn file ->
assert file =~ ~s|{:bcrypt_elixir, "~> 2.0"}|
end
assert_file "config/test.exs", fn file ->
assert file =~ "config :bcrypt_elixir, :log_rounds, 1"
end
assert_file "lib/my_app/accounts/user.ex", fn file ->
assert file =~ "Bcrypt.verify_pass(password, hashed_password)"
end
end)
end
test "when pbkdf2", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users --hashing-lib pbkdf2),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "mix.exs", fn file ->
assert file =~ ~s|{:pbkdf2_elixir, "~> 1.0"}|
end
assert_file "config/test.exs", fn file ->
assert file =~ "config :pbkdf2_elixir, :rounds, 1"
end
assert_file "lib/my_app/accounts/user.ex", fn file ->
assert file =~ "Pbkdf2.verify_pass(password, hashed_password)"
end
end)
end
test "when argon2", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users --hashing-lib argon2),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "mix.exs", fn file ->
assert file =~ ~s|{:argon2_elixir, "~> 2.0"}|
end
assert_file "config/test.exs", fn file ->
assert file =~ """
config :argon2_elixir, t_cost: 1, m_cost: 8
"""
end
assert_file "lib/my_app/accounts/user.ex", fn file ->
assert file =~ "Argon2.verify_pass(password, hashed_password)"
end
end)
end
end
test "with --table option", config do
in_tmp_phx_project(config.test, fn ->
Gen.Auth.run(
~w(Accounts User users --table my_users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "lib/my_app/accounts/user.ex", fn file ->
assert file =~ ~S|schema "my_users" do|
end
assert_file "lib/my_app/accounts/user_token.ex", fn file ->
assert file =~ ~S|schema "my_users_tokens" do|
end
assert [migration] = Path.wildcard("priv/repo/migrations/*_create_my_users_auth_tables.exs")
assert_file migration, fn file ->
assert file =~ "create table(:my_users) do"
assert file =~ "create table(:my_users_tokens) do"
end
end)
end
describe "inside umbrella" do
test "without context_app generators config uses web dir", config do
in_tmp_phx_umbrella_project(config.test, fn ->
in_project(:my_app, "apps/my_app", fn _module ->
with_generator_env(:my_app_web, [context_app: nil], fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
end)
end)
assert_file "apps/my_app/lib/my_app/accounts.ex"
assert_file "apps/my_app/lib/my_app/accounts/user.ex"
assert_file "apps/my_app/lib/my_app/accounts/user_token.ex"
assert_file "apps/my_app/lib/my_app/accounts/user_notifier.ex"
assert_file "apps/my_app/test/my_app/accounts_test.exs"
assert_file "apps/my_app/test/support/fixtures/accounts_fixtures.ex"
assert_file "apps/my_app/lib/my_app_web/controllers/user_auth.ex"
assert_file "apps/my_app/test/my_app_web/controllers/user_auth_test.exs"
assert_file "apps/my_app/lib/my_app_web/views/user_confirmation_view.ex"
assert_file "apps/my_app/lib/my_app_web/templates/user_confirmation/new.html.heex"
assert_file "apps/my_app/lib/my_app_web/controllers/user_confirmation_controller.ex"
assert_file "apps/my_app/test/my_app_web/controllers/user_confirmation_controller_test.exs"
assert_file "apps/my_app/lib/my_app_web/templates/layout/_user_menu.html.heex"
assert_file "apps/my_app/lib/my_app_web/controllers/user_registration_controller.ex"
assert_file "apps/my_app/lib/my_app_web/views/user_registration_view.ex"
assert_file "apps/my_app/test/my_app_web/controllers/user_registration_controller_test.exs"
assert_file "apps/my_app/lib/my_app_web/controllers/user_reset_password_controller.ex"
assert_file "apps/my_app/lib/my_app_web/templates/user_reset_password/edit.html.heex"
assert_file "apps/my_app/lib/my_app_web/templates/user_reset_password/new.html.heex"
assert_file "apps/my_app/lib/my_app_web/views/user_reset_password_view.ex"
assert_file "apps/my_app/test/my_app_web/controllers/user_reset_password_controller_test.exs"
assert_file "apps/my_app/lib/my_app_web/controllers/user_session_controller.ex"
assert_file "apps/my_app/lib/my_app_web/templates/user_session/new.html.heex"
assert_file "apps/my_app/test/my_app_web/controllers/user_session_controller_test.exs"
assert_file "apps/my_app/lib/my_app_web/views/user_session_view.ex"
assert_file "apps/my_app/lib/my_app_web/controllers/user_settings_controller.ex"
assert_file "apps/my_app/lib/my_app_web/templates/user_settings/edit.html.heex"
assert_file "apps/my_app/lib/my_app_web/views/user_settings_view.ex"
assert_file "apps/my_app/test/my_app_web/controllers/user_settings_controller_test.exs"
end)
end
test "with context_app generators config does not use web dir", config do
in_tmp_phx_umbrella_project(config.test, fn ->
in_project(:my_app_web, "apps/my_app_web", fn _module ->
with_generator_env(:my_app_web, [context_app: :my_app], fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
end)
end)
assert_file "apps/my_app/lib/my_app/accounts.ex"
assert_file "apps/my_app/lib/my_app/accounts/user.ex"
assert_file "apps/my_app/lib/my_app/accounts/user_token.ex"
assert_file "apps/my_app/lib/my_app/accounts/user_notifier.ex"
assert_file "apps/my_app/test/my_app/accounts_test.exs"
assert_file "apps/my_app/test/support/fixtures/accounts_fixtures.ex"
assert_file "apps/my_app_web/lib/my_app_web/controllers/user_auth.ex"
assert_file "apps/my_app_web/test/my_app_web/controllers/user_auth_test.exs"
assert_file "apps/my_app_web/lib/my_app_web/views/user_confirmation_view.ex"
assert_file "apps/my_app_web/lib/my_app_web/templates/user_confirmation/new.html.heex"
assert_file "apps/my_app_web/lib/my_app_web/controllers/user_confirmation_controller.ex"
assert_file "apps/my_app_web/test/my_app_web/controllers/user_confirmation_controller_test.exs"
assert_file "apps/my_app_web/lib/my_app_web/templates/layout/_user_menu.html.heex"
assert_file "apps/my_app_web/lib/my_app_web/controllers/user_registration_controller.ex"
assert_file "apps/my_app_web/lib/my_app_web/views/user_registration_view.ex"
assert_file "apps/my_app_web/test/my_app_web/controllers/user_registration_controller_test.exs"
assert_file "apps/my_app_web/lib/my_app_web/controllers/user_reset_password_controller.ex"
assert_file "apps/my_app_web/lib/my_app_web/templates/user_reset_password/edit.html.heex"
assert_file "apps/my_app_web/lib/my_app_web/templates/user_reset_password/new.html.heex"
assert_file "apps/my_app_web/lib/my_app_web/views/user_reset_password_view.ex"
assert_file "apps/my_app_web/test/my_app_web/controllers/user_reset_password_controller_test.exs"
assert_file "apps/my_app_web/lib/my_app_web/controllers/user_session_controller.ex"
assert_file "apps/my_app_web/lib/my_app_web/templates/user_session/new.html.heex"
assert_file "apps/my_app_web/test/my_app_web/controllers/user_session_controller_test.exs"
assert_file "apps/my_app_web/lib/my_app_web/views/user_session_view.ex"
assert_file "apps/my_app_web/lib/my_app_web/controllers/user_settings_controller.ex"
assert_file "apps/my_app_web/lib/my_app_web/templates/user_settings/edit.html.heex"
assert_file "apps/my_app_web/lib/my_app_web/views/user_settings_view.ex"
assert_file "apps/my_app_web/test/my_app_web/controllers/user_settings_controller_test.exs"
end)
end
test "raises with false context_app", config do
in_tmp_phx_umbrella_project config.test, fn ->
in_project(:my_app_web, "apps/my_app_web", fn _module ->
with_generator_env(:my_app_web, [context_app: :false], fn ->
assert_raise Mix.Error, ~r/no context_app configured/, fn ->
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
end
end)
end)
end
end
end
describe "user prompts" do
test "when unable to inject dependencies in mix.exs", config do
in_tmp_phx_project(config.test, fn ->
File.write!("mix.exs", "")
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_received {:mix_shell, :info, ["""
Add your {:bcrypt_elixir, "~> 2.0"} dependency to mix.exs:
defp deps do
[
{:bcrypt_elixir, "~> 2.0"},
...
]
end
"""]}
end)
end
test "when unable to inject authentication import into router.ex", config do
in_tmp_phx_project(config.test, fn ->
modify_file("lib/my_app_web/router.ex", fn file ->
String.replace(file, "use MyAppWeb, :router", "")
end)
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_received {:mix_shell, :info, ["""
Add your MyAppWeb.UserAuth import to lib/my_app_web/router.ex:
defmodule MyAppWeb.Router do
use MyAppWeb, :router
# Import authentication plugs
import MyAppWeb.UserAuth
...
end
"""]}
end)
end
test "when unable to inject plugs into router.ex", config do
in_tmp_phx_project(config.test, fn ->
modify_file("lib/my_app_web/router.ex", fn file ->
String.replace(file, "plug :put_secure_browser_headers\n", "")
end)
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_received {:mix_shell, :info, ["""
Add the :fetch_current_user plug to the :browser pipeline in lib/my_app_web/router.ex:
pipeline :browser do
...
plug :put_secure_browser_headers
plug :fetch_current_user
end
"""]}
end)
end
test "when layout file is not found", config do
in_tmp_phx_project(config.test, fn ->
File.rm!("lib/my_app_web/templates/layout/root.html.heex")
File.rm!("lib/my_app_web/templates/layout/app.html.heex")
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_received {:mix_shell, :error, ["""
Unable to find an application layout file to inject a render
call for "_user_menu.html".
Missing files:
* lib/my_app_web/templates/layout/root.html.heex
* lib/my_app_web/templates/layout/app.html.heex
Please ensure this phoenix app was not generated with
--no-html. If you have changed the name of your application
layout file, please add the following code to it where you'd
like "_user_menu.html" to be rendered.
<%= render "_user_menu.html", assigns %>
"""]}
end)
end
test "when user menu can't be injected into layout", config do
in_tmp_phx_project(config.test, fn ->
modify_file("lib/my_app_web/templates/layout/root.html.heex", fn _file ->
""
end)
Gen.Auth.run(
~w(Accounts User users),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_received {:mix_shell, :info, ["""
Add a render call for "_user_menu.html" to lib/my_app_web/templates/layout/root.html.heex:
<nav>
<%= render "_user_menu.html", assigns %>
</nav>
"""]}
end)
end
end
test "allows templates to be overridden", config do
in_tmp_phx_project(config.test, fn ->
File.mkdir_p!("priv/templates/phx.gen.auth")
File.write!("priv/templates/phx.gen.auth/_menu.html.heex", """
<ul>
<%%= if @current_<%= schema.singular %> do %>
You're logged in
<%% end %>
</ul>
""")
Gen.Auth.run(
~w(Accounts Admin admins),
[ecto_adapter: Ecto.Adapters.Postgres, validate_dependencies?: false]
)
assert_file "lib/my_app_web/templates/layout/_admin_menu.html.heex", fn file ->
assert file =~ ~S|<%= if @current_admin do %>|
assert file =~ ~S|You're logged in|
end
end)
end
end
| 42.057484 | 150 | 0.664776 |
732f2778aa3a9b3d0a59305b63e445c8e60a0141
| 1,873 |
ex
|
Elixir
|
lib/ecto_cassandra/migration.ex
|
ne1ro/ecto_cassandra
|
f53f422618966e1ae070f77dc2972d9f189c40b9
|
[
"MIT"
] | 4 |
2018-08-24T10:02:56.000Z
|
2019-10-20T20:21:04.000Z
|
lib/ecto_cassandra/migration.ex
|
ne1ro/ecto_cassandra
|
f53f422618966e1ae070f77dc2972d9f189c40b9
|
[
"MIT"
] | null | null | null |
lib/ecto_cassandra/migration.ex
|
ne1ro/ecto_cassandra
|
f53f422618966e1ae070f77dc2972d9f189c40b9
|
[
"MIT"
] | 2 |
2018-10-02T09:51:41.000Z
|
2019-11-15T19:44:35.000Z
|
defmodule EctoCassandra.Migration do
@moduledoc """
Implement Ecto migrations
"""
alias Ecto.Migration.{Index, Table}
alias EctoCassandra.{Conn, Query}
alias Xandra.SchemaChange
@spec execute_ddl(
repo :: Ecto.Repo.t(),
Ecto.Adapter.Migration.command(),
options :: Keyword.t()
) :: :ok | no_return
def execute_ddl(_repo, {command, %Table{name: table_name}, commands}, _opts)
when command in ~w(create_if_not_exists create)a do
cql = Query.new([{command, table_name}] ++ commands)
with %SchemaChange{effect: "CREATED"} <- Xandra.execute!(Conn, cql), do: :ok
end
def execute_ddl(_repo, {command, %Index{columns: columns, name: name, table: table}}, _opts)
when command in ~w(create_if_not_exists create)a do
cql = Query.new(create_index: {table, columns, name})
with %SchemaChange{effect: "CREATED"} <- Xandra.execute!(Conn, cql), do: :ok
end
def execute_ddl(_repo, {:drop, %Index{name: name}}, _opts) do
cql = Query.new(drop_index: name)
with %SchemaChange{effect: "DROPPED"} <- Xandra.execute!(Conn, cql), do: :ok
end
def execute_ddl(_repo, {:drop, %Table{name: table_name}}, _opts) do
cql = Query.new(drop: table_name)
with %SchemaChange{effect: "DROPPED"} <- Xandra.execute!(Conn, cql), do: :ok
end
def execute_ddl(_repo, {:alter, %Table{name: table_name}, commands}, _opts) do
cql = Query.new([{:alter, table_name}] ++ commands)
with %SchemaChange{effect: "CHANGED"} <- Xandra.execute!(Conn, cql), do: :ok
end
def execute_ddl(_repo, {:rename, %Table{name: table_name}, from, to}, _opts) do
cql = Query.new(rename: [table_name, from, to])
with %SchemaChange{effect: "CHANGED"} <- Xandra.execute!(Conn, cql), do: :ok
end
def execute_ddl(_repo, _command, _opts) do
raise ArgumentError, "Not acceptable arguments"
end
end
| 36.72549 | 94 | 0.667912 |
732f3f8c7a51d12fe1fd53063d77399a7052b23d
| 291 |
exs
|
Elixir
|
test/nerves_hub_user_api_test.exs
|
nerves-hub/nerves_hub_core
|
aeef481f80391ad124fa14349ffa0bde67eb96d6
|
[
"Apache-2.0"
] | 1 |
2018-12-06T04:06:37.000Z
|
2018-12-06T04:06:37.000Z
|
test/nerves_hub_user_api_test.exs
|
nerves-hub/nerves_hub_core
|
aeef481f80391ad124fa14349ffa0bde67eb96d6
|
[
"Apache-2.0"
] | 1 |
2019-01-09T18:07:00.000Z
|
2019-01-09T18:07:00.000Z
|
test/nerves_hub_user_api_test.exs
|
nerves-hub/nerves_hub_core
|
aeef481f80391ad124fa14349ffa0bde67eb96d6
|
[
"Apache-2.0"
] | null | null | null |
defmodule NervesHubCoreTest do
use NervesHubCoreTest.Case
doctest NervesHubUserAPI
alias NervesHubUserAPI.User
setup :create_peer_user
test "backwards support for user client certificate auth", %{auth: auth} do
assert auth.cert
assert {:ok, _} = User.me(auth)
end
end
| 20.785714 | 77 | 0.749141 |
732f4bac0dc68a7b4d6301b406900b3b870a29d8
| 919 |
exs
|
Elixir
|
test/enum_transform_misc_runner1_test.exs
|
ianrumford/plymio_enum
|
019165e369508eadcb723579e02a86670d894bb4
|
[
"MIT"
] | null | null | null |
test/enum_transform_misc_runner1_test.exs
|
ianrumford/plymio_enum
|
019165e369508eadcb723579e02a86670d894bb4
|
[
"MIT"
] | null | null | null |
test/enum_transform_misc_runner1_test.exs
|
ianrumford/plymio_enum
|
019165e369508eadcb723579e02a86670d894bb4
|
[
"MIT"
] | null | null | null |
defmodule PlymioEnumTransformMiscRunner1Test do
use PlymioEnumHelpersTest
test "functions: misc" do
test_value = [a: 1, b: 2, c: 3]
helper_run_and_realise_tests_default1(
test_value: test_value,
test_specs: [
# Other Discrete Transform forms
# arbitrary {mod,fun} tuple as the "key"
[[a: 1, b: 2, c: 3], [{{List,:flatten}, []}]],
# mfa - a will be passed through List.wrap/1
[[b: 2, c: 3], [{List,:delete_at, 0}]],
[[b: 2, c: 3], [{List,:delete_at, [0]}]],
[[a: 1, b: 2, c: 3, d: 4], [{{List,:flatten}, [[d: 4]]}]],
# fun + no args
[[a: 1, b: 2, c: 3], [{fn x -> x end, []}]],
# fun only
[[a: 1, b: 2, c: 3], [fn x -> x end]],
# not sensible but demonstrates an explicit fun
[[a: 1, b: 4, c: 9], [fn enum -> enum |> Enum.map(fn {k,v} -> {k,v*v} end) end]],
])
end
end
| 24.837838 | 89 | 0.495103 |
732f4cf3358b969ac23b47c14e57ac74cade1a8f
| 14 |
ex
|
Elixir
|
testData/org/elixir_lang/parser_definition/matched_three_operation_parsing_test_case/CaptureNonNumericOperation.ex
|
keyno63/intellij-elixir
|
4033e319992c53ddd42a683ee7123a97b5e34f02
|
[
"Apache-2.0"
] | 1,668 |
2015-01-03T05:54:27.000Z
|
2022-03-25T08:01:20.000Z
|
testData/org/elixir_lang/parser_definition/matched_three_operation_parsing_test_case/CaptureNonNumericOperation.ex
|
keyno63/intellij-elixir
|
4033e319992c53ddd42a683ee7123a97b5e34f02
|
[
"Apache-2.0"
] | 2,018 |
2015-01-01T22:43:39.000Z
|
2022-03-31T20:13:08.000Z
|
testData/org/elixir_lang/parser_definition/matched_three_operation_parsing_test_case/CaptureNonNumericOperation.ex
|
keyno63/intellij-elixir
|
4033e319992c53ddd42a683ee7123a97b5e34f02
|
[
"Apache-2.0"
] | 145 |
2015-01-15T11:37:16.000Z
|
2021-12-22T05:51:02.000Z
|
&one ^^^ &two
| 7 | 13 | 0.428571 |
732f8850176f24e3a2b119d1268922d090adcadb
| 260 |
ex
|
Elixir
|
myApp/lib/myApp/repo.ex
|
CaptainAwesomeDi/unnamedproject
|
1b2bbdbc9774a073e70eb8fcd255339d7a36df70
|
[
"MIT"
] | null | null | null |
myApp/lib/myApp/repo.ex
|
CaptainAwesomeDi/unnamedproject
|
1b2bbdbc9774a073e70eb8fcd255339d7a36df70
|
[
"MIT"
] | null | null | null |
myApp/lib/myApp/repo.ex
|
CaptainAwesomeDi/unnamedproject
|
1b2bbdbc9774a073e70eb8fcd255339d7a36df70
|
[
"MIT"
] | null | null | null |
defmodule MyApp.Repo do
use Ecto.Repo, otp_app: :myApp
@doc """
Dynamically loads the repository url from the
DATABASE_URL environment variable.
"""
def init(_, opts) do
{:ok, Keyword.put(opts, :url, System.get_env("DATABASE_URL"))}
end
end
| 21.666667 | 66 | 0.692308 |
732f9176e1f2a13838a4cf4584d0eb08d1ad5238
| 401 |
exs
|
Elixir
|
process/messages_3.exs
|
hectorip/ErlangExercises
|
4a1aa5de0504da1bfe5a6c31c1d20277524ab363
|
[
"MIT"
] | 4 |
2016-09-22T03:47:56.000Z
|
2017-02-02T17:42:57.000Z
|
process/messages_3.exs
|
hectorip/ErlangExercises
|
4a1aa5de0504da1bfe5a6c31c1d20277524ab363
|
[
"MIT"
] | null | null | null |
process/messages_3.exs
|
hectorip/ErlangExercises
|
4a1aa5de0504da1bfe5a6c31c1d20277524ab363
|
[
"MIT"
] | null | null | null |
defmodule MessageQueue do
def send_message(pid) do
send pid, ":)"
exit(:boom)
end
def receiveMessages() do
receive do
msg ->
IO.inspect msg
receiveMessages()
end
end
def run() do
Process.flag(:trap_exit, true)
spawn_link(MessageQueue, :send_message, [self])
import :timer, only: [ sleep: 1 ]
sleep 500
receiveMessages()
end
end
| 16.708333 | 51 | 0.613466 |
732fa9ef11389b171e72cfc10a066443d1a68f1f
| 182 |
ex
|
Elixir
|
test/fixtures/components/compound/view.ex
|
tsenturk/phoenix_components
|
dc1905743a9d30078b581112af105b48282eca79
|
[
"MIT"
] | 3 |
2018-11-22T15:39:09.000Z
|
2019-06-13T19:48:17.000Z
|
test/fixtures/components/compound/view.ex
|
tsenturk/phoenix_components
|
dc1905743a9d30078b581112af105b48282eca79
|
[
"MIT"
] | 1 |
2021-07-15T13:01:36.000Z
|
2021-07-15T13:01:36.000Z
|
test/fixtures/components/compound/view.ex
|
tsenturk/phoenix_components
|
dc1905743a9d30078b581112af105b48282eca79
|
[
"MIT"
] | 5 |
2018-11-07T12:48:40.000Z
|
2021-07-14T11:09:54.000Z
|
defmodule MyApp.Components.Compound do
@moduledoc """
Dummy compound component
"""
use MyApp.Component
import_components [:button, :jumbotron], from: MyApp.Components
end
| 20.222222 | 65 | 0.747253 |
732fb76e784d2dbaf9d95f3f759acae3518b2092
| 500 |
exs
|
Elixir
|
test/norm/alt_test.exs
|
blackbox-solutions/norm
|
12ea8e191b48b58a5a92f7e4ae92084a4ccd8669
|
[
"MIT"
] | null | null | null |
test/norm/alt_test.exs
|
blackbox-solutions/norm
|
12ea8e191b48b58a5a92f7e4ae92084a4ccd8669
|
[
"MIT"
] | null | null | null |
test/norm/alt_test.exs
|
blackbox-solutions/norm
|
12ea8e191b48b58a5a92f7e4ae92084a4ccd8669
|
[
"MIT"
] | null | null | null |
defmodule Norm.Spec.AltTest do
use ExUnit.Case, async: true
import Norm
describe "generation" do
test "returns one of the options" do
spec = alt(s: spec(is_binary()), i: spec(is_integer()), a: spec(is_atom()))
for {type, value} <- Enum.take(gen(spec), 5) do
case type do
:s ->
assert is_binary(value)
:i ->
assert is_integer(value)
:a ->
assert is_atom(value)
end
end
end
end
end
| 20.833333 | 81 | 0.542 |
732fbf6e328b6e1f0cfe01caaeec5db5d5c5ee97
| 2,355 |
exs
|
Elixir
|
test/flicks/accounts_test.exs
|
KTSCode/flicks-backend
|
e484b7a69cf24d7e474373c1ad627a3a4eaddbc6
|
[
"MIT"
] | null | null | null |
test/flicks/accounts_test.exs
|
KTSCode/flicks-backend
|
e484b7a69cf24d7e474373c1ad627a3a4eaddbc6
|
[
"MIT"
] | null | null | null |
test/flicks/accounts_test.exs
|
KTSCode/flicks-backend
|
e484b7a69cf24d7e474373c1ad627a3a4eaddbc6
|
[
"MIT"
] | null | null | null |
defmodule Flicks.AccountsTest do
use Flicks.DataCase
alias Flicks.Accounts
describe "users" do
alias Flicks.Accounts.User
@valid_attrs %{
email: "some email",
unique_identifier: "some unique_identifier",
username: "some username"
}
@update_attrs %{
email: "some updated email",
unique_identifier: "some updated unique_identifier",
username: "some updated username"
}
@invalid_attrs %{email: nil, unique_identifier: nil, username: nil}
def user_fixture(attrs \\ %{}) do
{:ok, user} =
attrs
|> Enum.into(@valid_attrs)
|> Accounts.create_user()
user
end
test "list_users/0 returns all users" do
user = user_fixture()
assert Accounts.list_users() == [user]
end
test "get_user!/1 returns the user with given id" do
user = user_fixture()
assert Accounts.get_user!(user.id) == user
end
test "create_user/1 with valid data creates a user" do
assert {:ok, %User{} = user} = Accounts.create_user(@valid_attrs)
assert user.email == "some email"
assert user.unique_identifier == "some unique_identifier"
assert user.username == "some username"
end
test "create_user/1 with invalid data returns error changeset" do
assert {:error, %Ecto.Changeset{}} = Accounts.create_user(@invalid_attrs)
end
test "update_user/2 with valid data updates the user" do
user = user_fixture()
assert {:ok, %User{} = user} = Accounts.update_user(user, @update_attrs)
assert user.email == "some updated email"
assert user.unique_identifier == "some updated unique_identifier"
assert user.username == "some updated username"
end
test "update_user/2 with invalid data returns error changeset" do
user = user_fixture()
assert {:error, %Ecto.Changeset{}} = Accounts.update_user(user, @invalid_attrs)
assert user == Accounts.get_user!(user.id)
end
test "delete_user/1 deletes the user" do
user = user_fixture()
assert {:ok, %User{}} = Accounts.delete_user(user)
assert_raise Ecto.NoResultsError, fn -> Accounts.get_user!(user.id) end
end
test "change_user/1 returns a user changeset" do
user = user_fixture()
assert %Ecto.Changeset{} = Accounts.change_user(user)
end
end
end
| 30.584416 | 85 | 0.658174 |
732fdb3e8c596f9d4fe56da37c635dfeb839baa1
| 1,114 |
exs
|
Elixir
|
config/config.exs
|
thomasvolk/stressman
|
7c1b62d262ea8f2c38854ce053fe2888aaacd5b6
|
[
"Apache-2.0"
] | null | null | null |
config/config.exs
|
thomasvolk/stressman
|
7c1b62d262ea8f2c38854ce053fe2888aaacd5b6
|
[
"Apache-2.0"
] | null | null | null |
config/config.exs
|
thomasvolk/stressman
|
7c1b62d262ea8f2c38854ce053fe2888aaacd5b6
|
[
"Apache-2.0"
] | null | null | null |
# This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
use Mix.Config
# This configuration is loaded before any dependency and is restricted
# to this project. If another project depends on this project, this
# file won't be loaded nor affect the parent project. For this reason,
# if you want to provide default values for your application for
# 3rd-party users, it should be done in your "mix.exs" file.
# You can configure your application as:
#
# config :stress, key: :value
#
# and access this configuration in your application as:
#
# Application.get_env(:stress, :key)
#
# You can also configure a 3rd-party app:
#
config :logger, level: :info
# It is also possible to import configuration files, relative to this
# directory. For example, you can emulate configuration per environment
# by uncommenting the line below and defining dev.exs, test.exs and such.
# Configuration from the imported file will override the ones defined
# here (which is why it is important to import them last).
#
# import_config "#{Mix.env}.exs"
| 35.935484 | 73 | 0.755835 |
732fdd17d1634439936689083c480a59e0cf5409
| 801 |
ex
|
Elixir
|
lib/sanbase/comments/entity_modules/wallet_hunters_proposal_comment.ex
|
santiment/sanbase2
|
9ef6e2dd1e377744a6d2bba570ea6bd477a1db31
|
[
"MIT"
] | 81 |
2017-11-20T01:20:22.000Z
|
2022-03-05T12:04:25.000Z
|
lib/sanbase/comments/entity_modules/wallet_hunters_proposal_comment.ex
|
rmoorman/sanbase2
|
226784ab43a24219e7332c49156b198d09a6dd85
|
[
"MIT"
] | 359 |
2017-10-15T14:40:53.000Z
|
2022-01-25T13:34:20.000Z
|
lib/sanbase/comments/entity_modules/wallet_hunters_proposal_comment.ex
|
rmoorman/sanbase2
|
226784ab43a24219e7332c49156b198d09a6dd85
|
[
"MIT"
] | 16 |
2017-11-19T13:57:40.000Z
|
2022-02-07T08:13:02.000Z
|
defmodule Sanbase.Comment.WalletHuntersProposalComment do
@moduledoc ~s"""
A mapping table connecting comments and wallet hunters proposals.
"""
use Ecto.Schema
import Ecto.{Query, Changeset}
schema "wallet_hunters_proposals_comments_mapping" do
belongs_to(:comment, Sanbase.Comment)
belongs_to(:proposal, Sanbase.WalletHunters.Proposal)
timestamps()
end
def changeset(%__MODULE__{} = mapping, attrs \\ %{}) do
mapping
|> cast(attrs, [:proposal_id, :comment_id])
|> validate_required([:proposal_id, :comment_id])
|> unique_constraint(:comment_id)
end
def has_type?(comment_id) do
from(pc in __MODULE__, where: pc.comment_id == ^comment_id)
|> Sanbase.Repo.one()
|> case do
%__MODULE__{} -> true
_ -> false
end
end
end
| 25.03125 | 67 | 0.690387 |
73301d47ead42a5d3da74e6ef8a14e373621b72e
| 872 |
ex
|
Elixir
|
lib/tortoise311/package/puback.ex
|
jsmestad/tortoise311
|
146b4e5bcf967d0c07b43d0d029790f9f0a7d6a9
|
[
"Apache-2.0"
] | null | null | null |
lib/tortoise311/package/puback.ex
|
jsmestad/tortoise311
|
146b4e5bcf967d0c07b43d0d029790f9f0a7d6a9
|
[
"Apache-2.0"
] | null | null | null |
lib/tortoise311/package/puback.ex
|
jsmestad/tortoise311
|
146b4e5bcf967d0c07b43d0d029790f9f0a7d6a9
|
[
"Apache-2.0"
] | null | null | null |
defmodule Tortoise311.Package.Puback do
@moduledoc false
@opcode 4
alias Tortoise311.Package
@opaque t :: %__MODULE__{
__META__: Package.Meta.t(),
identifier: Tortoise311.package_identifier()
}
@enforce_keys [:identifier]
defstruct __META__: %Package.Meta{opcode: @opcode, flags: 0b0000},
identifier: nil
@spec decode(<<_::32>>) :: t
def decode(<<@opcode::4, 0::4, 2, identifier::big-integer-size(16)>>)
when identifier in 0x0001..0xFFFF do
%__MODULE__{identifier: identifier}
end
# Protocols ----------------------------------------------------------
defimpl Tortoise311.Encodable do
def encode(%Package.Puback{identifier: identifier} = t)
when identifier in 0x0001..0xFFFF do
[Package.Meta.encode(t.__META__), <<2, identifier::big-integer-size(16)>>]
end
end
end
| 29.066667 | 80 | 0.614679 |
73302d4b245fb7ffd64714b8bac5f2aafb3fa897
| 2,931 |
ex
|
Elixir
|
lib/forcex/client.ex
|
sata/forcex
|
ac97cb5a9671063728bb12d443ba1cfbee072d77
|
[
"MIT"
] | null | null | null |
lib/forcex/client.ex
|
sata/forcex
|
ac97cb5a9671063728bb12d443ba1cfbee072d77
|
[
"MIT"
] | null | null | null |
lib/forcex/client.ex
|
sata/forcex
|
ac97cb5a9671063728bb12d443ba1cfbee072d77
|
[
"MIT"
] | null | null | null |
defmodule Forcex.Client do
require Logger
@default_endpoint "https://login.salesforce.com"
defstruct api_version: "41.0",
authorization_header: [],
endpoint: @default_endpoint,
services: %{}
@moduledoc """
This client delegates login to the appropriate endpoint depending on the
type of credentials you have, and upon successful authentication keeps track
of the authentication headers you'll need for subsequent calls.
"""
@doc """
Initially signs into Force.com API.
Login credentials may be supplied. Order for locating credentials:
1. Map supplied to `login/1`
2. Environment variables
3. Applications configuration
Supplying a Map of login credentials must be in the form of
%{
username: "...",
password: "...",
security_token: "...",
client_id: "...",
client_secret: "...",
endpoint: "..."
}
Environment variables
- `SALESFORCE_USERNAME`
- `SALESFORCE_PASSWORD`
- `SALESFORCE_SECURITY_TOKEN`
- `SALESFORCE_CLIENT_ID`
- `SALESFORCE_CLIENT_SECRET`
- `SALESFORCE_ENDPOINT`
Application configuration
config :forcex, Forcex.Client,
username: "[email protected]",
password: "my_super_secret_password",
security_token: "EMAILED_FROM_SALESFORCE",
client_id: "CONNECTED_APP_OAUTH_CLIENT_ID",
client_secret: "CONNECTED_APP_OAUTH_CLIENT_SECRET",
endpoint: "login.salesforce.com"
If no `client_id` is passed login via session id will be attempted with
`security_token`.
Will require additional call to `locate_services/1` to identify which Force.com
services are availabe for your deployment.
client =
Forcex.Client.login
|> Forcex.Client.locate_services
"""
def login(config \\ default_config()) do
login(config, %__MODULE__{endpoint: config[:endpoint] || @default_endpoint})
end
def login(conf, starting_struct) do
Logger.debug("conf=" <> inspect(conf))
case conf do
%{client_id: _} -> struct(__MODULE__, Forcex.Auth.OAuth.login(conf, starting_struct))
%{security_token: _} -> struct(__MODULE__, Forcex.Auth.SessionId.login(conf, starting_struct))
end
end
def locate_services(client) do
services = Forcex.services(client)
client = %{client | services: services}
Logger.debug(inspect(client))
client
end
def default_config() do
[:username, :password, :security_token, :client_id, :client_secret, :endpoint]
|> Enum.map(&({&1, get_val_from_env(&1)}))
|> Enum.filter(fn {_, v} -> v end)
|> Enum.into(%{})
end
defp get_val_from_env(key) do
key
|> env_var
|> System.get_env
|> case do
nil ->
Application.get_env(:forcex, __MODULE__, [])
|> Keyword.get(key)
val -> val
end
end
defp env_var(key), do: "SALESFORCE_#{key |> to_string |> String.upcase}"
end
| 28.182692 | 100 | 0.66189 |
73302fa46653e31569591b47801a917c7f9e3bca
| 50,610 |
ex
|
Elixir
|
lib/mix/lib/mix/tasks/release.ex
|
brandondrew/elixir
|
0bbd33fa9a2411920dce505340097a6c0e73bc46
|
[
"Apache-2.0"
] | null | null | null |
lib/mix/lib/mix/tasks/release.ex
|
brandondrew/elixir
|
0bbd33fa9a2411920dce505340097a6c0e73bc46
|
[
"Apache-2.0"
] | null | null | null |
lib/mix/lib/mix/tasks/release.ex
|
brandondrew/elixir
|
0bbd33fa9a2411920dce505340097a6c0e73bc46
|
[
"Apache-2.0"
] | null | null | null |
defmodule Mix.Tasks.Release do
use Mix.Task
@shortdoc "Assembles a self-contained release"
@moduledoc """
Assembles a self-contained release for the current project:
MIX_ENV=prod mix release
MIX_ENV=prod mix release NAME
Once a release is assembled, it can be packaged and deployed to a
target, as long as the target runs on the same operating system (OS)
distribution and version as the machine running the `mix release`
command.
A release can be configured in your `mix.exs` file under the `:releases`
key inside `def project`:
def project do
[
releases: [
demo: [
include_executables_for: [:unix],
applications: [runtime_tools: :permanent]
],
...
]
]
end
You can specify multiple releases where the key is the release name
and the value is a keyword list with the release configuration.
Releasing a certain name is done with:
MIX_ENV=prod mix release demo
If the given name does not exist, an error is raised.
If `mix release`, without a name, is invoked and there are multiple names,
an error will be raised unless you set `default_release: NAME` at the root
of your project configuration.
If `mix release` is invoked and there are no names, a release using the
application name and default values is assembled.
## Why releases?
Releases allow developers to precompile and package all of their code
and the runtime into a single unit. The benefits of releases are:
* Code preloading. The VM has two mechanisms for loading code:
interactive and embedded. By default, it runs in the interactive
mode which dynamically loads modules when they are used for the
first time. The first time your application calls `Enum.map/2`,
the VM will find the `Enum` module and load it. There’s a downside.
When you start a new server in production, it may need to load
many other modules, causing the first requests to have an unusual
spike in response time. Releases run in embedded mode, which loads
all available modules upfront, guaranteeing your system is ready
to handle requests after booting.
* Configuration and customization. Releases give developers fine
grained control over system configuration and the VM flags used
to start the system.
* Self-contained. A release does not require the source code to be
included in your production artifacts. All of the code is precompiled
and packaged. Releases do not even require Erlang or Elixir in your
servers, as it includes the Erlang VM and its runtime by default.
Furthermore, both Erlang and Elixir standard libraries are stripped
to bring only the parts you are actually using.
* Multiple releases. You can assemble different releases with
different configuration per application or even with different
applications altogether.
* Management scripts. Releases come with scripts to start, restart,
connect to the running system remotely, execute RPC calls, run as
daemon, run as a Windows service, and more.
## Running the release
Once a release is assembled, you can start it by calling
`bin/RELEASE_NAME start` inside the release. In production, you would do:
MIX_ENV=prod mix release
_build/prod/rel/my_app/bin/my_app start
`bin/my_app start` will start the system connected to the current standard
input/output, where logs are also written to by default. This is the
preferred way to run the system. Many tools, such as `systemd`, platforms
as a service, such as Heroku, and many containers platforms, such as Docker,
are capable of processing the standard input/output and redirecting
the log contents elsewhere. Those tools and platforms also take care
of restarting the system in case it crashes.
You can also execute one-off commands, run the release as a daemon on
Unix-like system, or install it as a service on Windows. We will take a
look at those next. You can also list all available commands by invoking
`bin/RELEASE_NAME`.
### One-off commands (eval and rpc)
If you want to invoke specific modules and functions in your release,
you can do so in two ways: using `eval` or `rpc`.
bin/RELEASE_NAME eval "IO.puts(:hello)"
bin/RELEASE_NAME rpc "IO.puts(:hello)"
The `eval` command starts its own instance of the VM but without
starting any of the applications in the release and without starting
distribution. For example, if you need to do some prep work before
running the actual system, like migrating your database, `eval` can
be a good fit. Just keep in mind any application you may use during
eval has to be explicitly loaded and/or started.
You can start an application by calling `Application.ensure_all_started/1`.
However, if for some reason you cannot start an application, maybe
because it will run other services you do not want, you must at least
load the application by calling `Application.load/1`. If you don't
load the application, any attempt at reading its environment or
configuration may fail. Note that if you start an application,
it is automatically loaded before started.
Another way to run commands is with `rpc`, which will connect to the
system currently running and instruct it to execute the given
expression. This means you need to guarantee the system was already
started and be careful with the instructions you are executing.
You can also use `remote` to connect a remote IEx session to the
system.
#### Helper module
As you operate your system, you may find yourself running some piece of code
as a one-off command quite often. You may consider creating a module to group
these tasks:
# lib/my_app/release_tasks.ex
defmodule MyApp.ReleaseTasks do
def eval_purge_stale_data() do
# Eval commands needs to start the app before
# Or Application.load(:my_app) if you can't start it
Application.ensure_all_started(:my_app)
# Code that purges stale data
...
end
def rpc_print_connected_users() do
# Code that print users connected to the current running system
...
end
end
In the example above, we prefixed the function names with the command
name used to execute them, but that is entirely optional.
And to run them:
bin/RELEASE_NAME eval "MyApp.ReleaseTasks.eval_purge_stale_data()"
bin/RELEASE_NAME rpc "MyApp.ReleaseTasks.rpc_print_connected_users()"
### Daemon mode (Unix-like)
You can run the release in daemon mode with the command:
bin/RELEASE_NAME daemon_iex
In daemon mode, the system is started on the background via
[run_erl](http://erlang.org/doc/man/run_erl.html). You may also
want to enable [heart](http://erlang.org/doc/man/heart.html)
in daemon mode so it automatically restarts the system in case
of crashes. See the generated `releases/RELEASE_VSN/env.sh` file.
The daemon will write all of its standard output to the "tmp/log/"
directory in the release root. A developer can also attach
to the standard input of the daemon by invoking "to_erl tmp/pipe/"
from the release root. However, note that attaching to the system
should be done with extreme care, since the usual commands for
exiting an Elixir system, such as hitting Ctrl+C twice or Ctrl+\\,
will actually shut down the daemon. Therefore, using
`bin/RELEASE_NAME remote` should be preferred, even in daemon mode.
You can customize the tmp directory used both for logging and for
piping in daemon mode by setting the `RELEASE_TMP` environment
variable. See the "Customization" section.
### Services mode (Windows)
While daemons are not available on Windows, it is possible to install a
released system as a service on Windows with the help of
[erlsrv](http://erlang.org/doc/man/erlsrv.html). This can be done by
running:
bin/RELEASE_NAME install
Once installed, the service must be explicitly managed via the `erlsrv`
executable, which is included in the `erts-VSN/bin` directory.
The service is not started automatically after installing.
For example, if you have a release named `demo`, you can install
the service and then start it from the release root as follows:
bin/demo install
erts-VSN/bin/erlsrv.exs start demo_demo
The name of the service is `demo_demo` because the name is built
by concatenating the node name with the release name. Since Elixir
automatically uses the same name for both, the service will be
referenced as `demo_demo`.
The `install` command must be executed as an administrator.
### `bin/RELEASE_NAME` commands
The following commands are supported by `bin/RELEASE_NAME`:
start Starts the system
start_iex Starts the system with IEx attached
daemon Starts the system as a daemon (Unix-like only)
daemon_iex Starts the system as a daemon with IEx attached (Unix-like only)
install Installs this system as a Windows service (Windows only)
eval "EXPR" Executes the given expression on a new, non-booted system
rpc "EXPR" Executes the given expression remotely on the running system
remote Connects to the running system via a remote shell
restart Restarts the running system via a remote command
stop Stops the running system via a remote command
pid Prints the operating system PID of the running system via a remote command
version Prints the release name and version to be booted
## Deployments
### Requirements
A release is built on a **host**, a machine which contains Erlang, Elixir,
and any other dependencies needed to compile your application. A release is
then deployed to a **target**, potentially the same machine as the host,
but usually separate, and often there are many targets (either multiple
instances, or the release is deployed to heterogeneous environments).
To deploy straight from a host to a separate target without cross-compilation,
the following must be the same between the host and the target:
* Target architecture (for example, x86_64 or ARM)
* Target vendor + operating system (for example, Windows, Linux, or Darwin/macOS)
* Target ABI (for example, musl or gnu)
This is often represented in the form of target triples, for example,
`x86_64-unknown-linux-gnu`, `x86_64-unknown-linux-musl`, `x86_64-apple-darwin`.
So to be more precise, to deploy straight from a host to a separate target,
the Erlang Runtime System (ERTS), and any native dependencies (NIFs), must
be compiled for the same target triple. If you are building on a MacBook
(`x86_64-apple-darwin`) and trying to deploy to a typical Ubuntu machine
(`x86_64-unknown-linux-gnu`), the release will not work. Instead you should
build the release on a `x86_64-unknown-linux-gnu` host. As we will see, this
can be done in multiple ways, such as releasing on the target itself, or by
using virtual machines or containers, usually as part of your release pipeline.
In addition to matching the target triple, it is also important that the
target has all of the system packages that your application will need at
runtime. A common one is the need for OpenSSL when building an application
that uses `:crypto` or `:ssl`, which is dynamically linked to ERTS. The other
common source for native dependencies like this comes from dependencies
containing NIFs (natively-implemented functions) which may expect to
dynamically link to libraries they use.
Of course, some operating systems and package managers can differ between
versions, so if your goal is to have full compatibility between host and
target, it is best to ensure the operating system and system package manager
have the same versions on host and target. This may even be a requirement in
some systems, especially so with package managers that try to create fully
reproducible environments (Nix, Guix).
Alternatively, you can also bundle the compiled object files in the release,
as long as they were compiled for the same target. If doing so, you need to
update `LD_LIBRARY_PATH` with the paths containing the bundled objects.
Currently, there is no official way to cross-compile a release from one
target triple to another, due to the complexities involved in the process.
### Techniques
There are a couple of ways to guarantee that a release is built on a host with
the same properties as the target. A simple option is to fetch the source,
compile the code and assemble the release on the target itself. It would
be something like this:
git clone remote://path/to/my_app.git my_app_source
cd my_app_source
mix deps.get --only prod
MIX_ENV=prod mix release
_build/prod/rel/my_app/bin/my_app start
If you prefer, you can also compile the release to a separate directory,
so you can erase all source after the release is assembled:
git clone remote://path/to/my_app.git my_app_source
cd my_app_source
mix deps.get --only prod
MIX_ENV=prod mix release --path ../my_app_release
cd ../my_app_release
rm -rf ../my_app_source
bin/my_app start
However, this option can be expensive if you have multiple production
nodes or if the release assembling process is a long one, as each node
needs to individually assemble the release.
You can automate this process in a couple different ways. One option
is to make it part of your Continuous Integration (CI) / Continuous
Deployment (CD) pipeline. When you have a CI/CD pipeline, it is common
that the machines in your CI/CD pipeline run on the exact same target
triple as your production servers (if they don't, they should).
In this case, you can assemble the release at the end of your CI/CD
pipeline by calling `MIX_ENV=prod mix release` and push the artifact
to S3 or any other network storage. To perform the deployment, your
production machines can fetch the deployment from the network storage
and run `bin/my_app start`.
Another mechanism to automate deployments is to use images, such as
Amazon Machine Images, or container platforms, such as Docker.
For instance, you can use Docker to run locally a system with the
exact same target triple as your production servers. Inside the
container, you can invoke `MIX_ENV=prod mix release` and build
a complete image and/or container with the operating system, all
dependencies as well as the releases.
In other words, there are multiple ways systems can be deployed and
releases can be automated and incorporated into all of them as long
as you remember to build the system in the same target triple.
Once a system is deployed, shutting down the system can be done by
sending SIGINT/SIGTERM to the system, which is what most containers,
platforms and tools do, or by explicitly invoking `bin/RELEASE_NAME stop`.
Once the system receives the shutdown request, each application and
their respective supervision trees will stop, one by one, in the
opposite order that they were started.
## Customization
There are a couple ways in which developers can customize the generated
artifacts inside a release.
### Options
The following options can be set inside your mix.exs on each release definition:
* `:applications` - a keyword list that configures and adds new applications
to the release. The key is the application name and the value is one of:
* `:permanent` - the application is started and the node shuts down
if the application terminates, regardless of reason
* `:transient` - the application is started and the node shuts down
if the application terminates abnormally
* `:temporary` - the application is started and the node does not
shut down if the application terminates
* `:load` - the application is only loaded
* `:none` - the application is part of the release but it is neither loaded nor
started
All applications default to `:permanent`.
By default `:applications` includes the current application and all
applications the current application depends on, recursively.
You can include new applications or change the mode of
existing ones by listing them here. The order of the applications given
in `:applications` will be preserved as much as possible, with only
`:kernel`, `:stdlib`, `:sasl`, and `:elixir` listed before the given
application list.
Releases assembled from an umbrella project require this configuration
to be explicitly given.
* `:strip_beams` - a boolean that controls if BEAM files should have their debug
information, documentation chunks, and other non-essential metadata removed.
Defaults to `true`.
* `:cookie` - a string representing the Erlang Distribution cookie. If this
option is not set, a random cookie is written to the `releases/COOKIE` file
when the first release is assembled. At runtime, we will first attempt
to fetch the cookie from the `RELEASE_COOKIE` environment variable and
then we'll read the `releases/COOKIE` file.
If you are setting this option manually, we recommend the cookie option
to be a long and randomly generated string, such as:
`Base.url_encode64(:crypto.strong_rand_bytes(40))`. We also recommend to restrict
the characters in the cookie to the subset returned by `Base.url_encode64/1`.
* `:path` - the path the release should be installed to.
Defaults to `"_build/MIX_ENV/rel/RELEASE_NAME"`.
* `:version` - the release version as a string or `{:from_app, app_name}`.
Defaults to the current application version. The `{:from_app, app_name}` format
can be used to easily reference the application version from another application.
This is particularly useful in umbrella applications.
* `:quiet` - a boolean that controls if releases should write steps to
the standard output. Defaults to `false`.
* `:include_erts` - a boolean, string, or anonymous function of arity zero.
If a boolean, it indicates whether the Erlang Runtime System (ERTS), which
includes the Erlang VM, should be included in the release. The default is
`true`, which is also the recommended value. If a string, it represents
the path to an existing ERTS installation. If an anonymous function of
arity zero, it's a function that returns any of the above (boolean or string).
You may also set this option to `false` if you desire to use the ERTS version installed
on the target. Note, however, that the ERTS version on the target must have **the
exact version** as the ERTS version used when the release is assembled. Setting it to
`false` also disables hot code upgrades. Therefore, `:include_erts` should be
set to `false` with caution and only if you are assembling the release on the
same server that runs it.
* `:include_executables_for` - a list of atoms detailing for which Operating
Systems executable files should be generated for. By default, it is set to
`[:unix, :windows]`. You can customize those as follows:
releases: [
demo: [
include_executables_for: [:unix] # Or [:windows] or []
]
]
* `:steps` - a list of steps to execute when assembling the release. See
the "Steps" section for more information.
Besides the options above, it is possible to customize the generated
release with custom template files or by tweaking the release steps.
We will detail both approaches next.
### vm.args and env.sh (env.bat)
Developers may want to customize the VM flags and environment variables
given when the release starts. This is typically done by customizing
two files inside your release: `releases/RELEASE_VSN/vm.args` and
`releases/RELEASE_VSN/env.sh` (or `env.bat` on Windows).
However, instead of modifying those files after the release is built,
the simplest way to customize those files is by running `mix release.init`.
The Mix task will copy custom `rel/vm.args.eex`, `rel/env.sh.eex`, and
`rel/env.bat.eex` files to your project root. You can modify those
files and they will be evaluated every time you perform a new release.
Those files are regular EEx templates and they have a single assign,
called `@release`, with the `Mix.Release` struct.
The `vm.args` file may contain any of the VM flags accepted by the [`erl`
command](http://erlang.org/doc/man/erl.html).
The `env.sh` and `env.bat` is used to set environment variables.
In there, you can set vars such as `RELEASE_NODE`, `RELEASE_COOKIE`,
and `RELEASE_TMP` to customize your node name, cookie and tmp
directory respectively. Whenever `env.sh` or `env.bat` is invoked,
the variables `RELEASE_ROOT`, `RELEASE_NAME`, `RELEASE_VSN`, and
`RELEASE_COMMAND` have already been set, so you can rely on them.
See the section on environment variables for more information.
Furthermore, while `vm.args` is static, you can use `env.sh` and
`env.bat` to dynamically set VM options. For example, if you want
to make sure the Erlang Distribution listens only on a given port
known at runtime, you can set the following:
case $RELEASE_COMMAND in
start*|daemon*)
ELIXIR_ERL_OPTIONS="-kernel inet_dist_listen_min $BEAM_PORT inet_dist_listen_max $BEAM_PORT"
export ELIXIR_ERL_OPTIONS
;;
*)
;;
esac
Note we only set the port on start/daemon commands. If you also limit
the port on other commands, such as `rpc`, then you will be unable
to establish a remote connection as the port will already be in use
by the node.
On Windows, your `env.bat` would look like this:
IF NOT %RELEASE_COMMAND:start=%==%RELEASE_COMMAND% (
set ELIXIR_ERL_OPTIONS="-kernel inet_dist_listen_min %BEAM_PORT% inet_dist_listen_max %BEAM_PORT%"
)
### Steps
It is possible to add one or more steps before and after the release is
assembled. This can be done with the `:steps` option:
releases: [
demo: [
steps: [&set_configs/1, :assemble, ©_extra_files/1]
]
]
The `:steps` option must be a list and it must always include the
atom `:assemble`, which does most of the release assembling. You
can pass anonymous functions before and after the `:assemble` to
customize your release assembling pipeline. Those anonymous functions
will receive a `Mix.Release` struct and must return the same or
an updated `Mix.Release` struct. It is also possible to build a tarball
of the release by passing the `:tar` step anywhere after `:assemble`.
The tarball is created in `_build/MIX_ENV/RELEASE_NAME-RELEASE_VSN.tar.gz`
See `Mix.Release` for more documentation on the struct and which
fields can be modified. Note that `:steps` field itself can be
modified and it is updated every time a step is called. Therefore,
if you need to execute a command before and after assembling the
release, you only need to declare the first steps in your pipeline
and then inject the last step into the release struct. The steps
field can also be used to verify if the step was set before or
after assembling the release.
## Application configuration
Releases provides two mechanisms for configuring OTP applications:
build-time and runtime.
### Build-time configuration
Whenever you invoke a `mix` command, Mix loads the configuration
in `config/config.exs`, if said file exists. It is common for the
`config/config.exs` file itself to import other configuration based
on the current `MIX_ENV`, such as `config/dev.exs`, `config/test.exs`,
and `config/prod.exs`. We say that this configuration is a build-time
configuration as it is evaluated whenever you compile your code or
whenever you assemble the release.
In other words, if your configuration does something like:
config :my_app, :secret_key, System.fetch_env!("MY_APP_SECRET_KEY")
The `:secret_key` key under `:my_app` will be computed on the
host machine, whenever the release is built. Setting the
`MY_APP_SECRET_KEY` right before starting your release will have
no effect.
Luckily, releases also provide runtime configuration, which we will
see next.
### Runtime configuration
To enable runtime configuration in your release, all you need to do is
to create a file named `config/releases.exs`:
import Config
config :my_app, :secret_key, System.fetch_env!("MY_APP_SECRET_KEY")
Your `config/releases.exs` file needs to follow three important rules:
* It MUST `import Config` at the top instead of the deprecated `use Mix.Config`
* It MUST NOT import any other configuration file via `import_file`
* It MUST NOT access `Mix` in any way, as `Mix` is a build tool and it not
available inside releases
If a `config/releases.exs` exists, it will be copied to your release
and executed early in the boot process, when only Elixir and Erlang's
main applications have been started. Once the configuration is loaded,
the Erlang system will be restarted (within the same Operating System
process) and the new configuration will take place.
You can change the path to the runtime configuration file by setting
`:runtime_config_path`. This path is resolved at build time as the
given configuration file is always copied to inside the release.
Finally, in order for runtime configuration to work properly (as well
as any other "Config provider" as defined next), it needs to be able
to persist the newly computed configuration to disk. The computed config
file will be written to "tmp" directory inside the release every time
the system boots. You can configure the "tmp" directory by setting the
`RELEASE_TMP` environment variable, either explicitly or inside your
`releases/RELEASE_VSN/env.sh` (or `env.bat` on Windows).
### Config providers
Releases also supports custom mechanisms, called config providers, to load
any sort of runtime configuration to the system while it boots. For instance,
if you need to access a vault or load configuration from a JSON file, it
can be achieved with config providers. The runtime configuration outlined
in the previous section, which is handled by the `Config.Reader` provider.
See the `Config.Provider` module for more information and more examples.
The following options can be set inside your releases key in your `mix.exs`
to control how config providers work:
* `:start_distribution_during_config` - on Erlang/OTP 22+, releases
only start the Erlang VM distribution features after the config files
are evaluated. You can set it to `true` if you need distribution during
configuration. Defaults to `false`.
* `:prune_runtime_sys_config_after_boot` - every time your system boots,
the release will write a config file to your tmp directory. These
configuration files are generally small. But if you are concerned with
disk space or if you have other restrictions, you can ask the system to
remove said config files after boot. The downside is that you will no
longer be able to restart the system internally (neither via
`System.restart/0` nor `bin/RELEASE_NAME start`). If you need a restart,
you will have to terminate the Operating System process and start a new
one. Defaults to `false`.
* `:config_providers` - a list of tuples with custom config providers.
See `Config.Provider` for more information. Defaults to `[]`.
### Customization and configuration summary
Generally speaking, the following files are available for customizing
and configuring the running system:
* `config/config.exs` (and `config/prod.exs`) - provides build-time
application configuration, which are executed when the release is
assembled
* `config/releases.exs` - provides runtime application configuration.
It is executed every time the release boots and is further extensible
via config providers
* `rel/vm.args.eex` - a template file that is copied into every release
and provides static configuration of the Erlang Virtual Machine and
other runtime flags
* `rel/env.sh.eex` and `rel/env.bat.eex` - template files that are copied
into every release and are executed on every command to set up environment
variables, including specific ones to the VM, and the general environment
## Directory structure
A release is organized as follows:
bin/
RELEASE_NAME
erts-ERTS_VSN/
lib/
APP_NAME-APP_VSN/
ebin/
include/
priv/
releases/
RELEASE_VSN/
consolidated/
elixir
elixir.bat
env.bat
env.sh
iex
iex.bat
releases.exs
start.boot
start.script
start_clean.boot
start_clean.script
sys.config
vm.args
COOKIE
start_erl.data
tmp/
## Environment variables
The system sets different environment variables. The following variables
are set early on and can only be read by `env.sh` and `env.bat`:
* `RELEASE_ROOT` - points to the root of the release. If the system
includes ERTS, then it is the same as `:code.root_dir/0`. This
variable is always computed and it cannot be set to a custom value
* `RELEASE_COMMAND` - the command given to the release, such as `"start"`,
`"remote"`, `"eval"`, and so on. This is typically accessed inside `env.sh`
and `env.bat` to set different environment variables under different
conditions. Note, however, that `RELEASE_COMMAND` has not been
validated by the time `env.sh` and `env.bat` are called, so it may
be empty or contain invalid values. This variable is always computed
and it cannot be set to a custom value
* `RELEASE_NAME` - the name of the release. It can be set to a custom
value when invoking the release
* `RELEASE_VSN` - the version of the release, otherwise the latest
version is used. It can be set to a custom value when invoking the
release. The custom value must be an existing release version in
the `releases/` directory
The following variables can be set before you invoke the release or
inside `env.sh` and `env.bat`:
* `RELEASE_COOKIE` - the release cookie. By default uses the value
in `releases/COOKIE`. It can be set to a custom value
* `RELEASE_NODE` - the release node name, in the format `name@host`.
It can be set to a custom value. The name part must be made only
of letters, digits, underscores, and hyphens
* `RELEASE_VM_ARGS` - the location of the vm.args file. It can be set
to a custom path
* `RELEASE_TMP` - the directory in the release to write temporary
files to. It can be set to a custom directory. It defaults to
`$RELEASE_ROOT/tmp`
* `RELEASE_MODE` - if the release should start in embedded or
interactive mode. Defaults to "embedded". It applies only to
start/daemon/install commands
* `RELEASE_DISTRIBUTION` - how do we want to run the distribution.
May be `name` (long names), `sname` (short names) or `none`
(distribution is not started automatically). Defaults to
`sname` which allows access only within the current system.
`name` allows external connections. If `name` is used and you are
not running on Erlang/OTP 22 or later, you must set `RELEASE_NODE`
to `[email protected]` with an IP or a known host
* `RELEASE_BOOT_SCRIPT` - the name of the boot script to use when starting
the release. This script is used when running commands such as `start` and
`daemon`. The boot script is expected to be located at the
path `releases/RELEASE_VSN/RELEASE_BOOT_SCRIPT.boot`. Defaults to `start`
* `RELEASE_BOOT_SCRIPT_CLEAN` - the name of the boot script used when
starting the release clean, without your application or its dependencies.
This script is used by commands such as `eval`, `rpc`, and `remote`.
The boot script is expected to be located at the path
`releases/RELEASE_VSN/RELEASE_BOOT_SCRIPT_CLEAN.boot`. Defaults
to `start_clean`
## Umbrellas
Releases are well integrated with umbrella projects, allowing you to
release one or more subsets of your umbrella children. The only difference
between performing a release in the umbrella project compared to a
regular application is that umbrellas require you to explicitly list
your release and the starting point for each release. For example,
imagine this umbrella applications:
my_app_umbrella/
apps/
my_app_core/
my_app_event_processing/
my_app_web/
where both `my_app_event_processing` and `my_app_web` depend on
`my_app_core` but they do not depend on each other.
Inside your umbrella, you can define multiple releases:
releases: [
web_and_event_processing: [
applications: [
my_app_event_processing: :permanent,
my_app_web: :permanent
]
],
web_only: [
applications: [my_app_web: :permanent]
],
event_processing_only: [
applications: [my_app_event_processing: :permanent]
]
]
Note you don't need to define all applications in `:applications`,
only the entry points. Also remember that the recommended mode
for all applications in the system is `:permanent`.
Finally, keep in mind it is not required for you to assemble the
release from the umbrella root. You can also assemble the release
from each child application individually. Doing it from the root,
however, allows you to include two applications that do not depend
on each other as part of the same release.
## Hot Code Upgrades
Erlang and Elixir are sometimes known for the capability of upgrading
a node that is running in production without shutting down that node.
However, this feature is not supported out of the box by Elixir releases.
The reason we don't provide hot code upgrades is because they are very
complicated to perform in practice, as they require careful coding of
your processes and applications as well as extensive testing. Given most
teams can use other techniques that are language agnostic to upgrade
their systems, such as Blue/Green deployments, Canary deployments,
Rolling deployments, and others, hot upgrades are rarely a viable
option. Let's understand why.
In a hot code upgrade, you want to update a node from version A to
version B. To do so, the first step is to write recipes for every application
that changed between those two releases, telling exactly how the application
changed between versions, those recipes are called `.appup` files.
While some of the steps in building `.appup` files can be automated,
not all of them can. Furthermore, each process in the application needs
to be explicitly coded with hot code upgrades in mind. Let's see an example.
Imagine your application has a counter process as a GenServer:
defmodule Counter do
use GenServer
def start_link(_) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
def bump do
GenServer.call(__MODULE__, :bump)
end
## Callbacks
def init(:ok) do
{:ok, 0}
end
def handle_call(:bump, counter) do
{:reply, :ok, counter + 1}
end
end
You add this process as part of your supervision tree and ship version
0.1.0 of your system. Now let's imagine that on version 0.2.0 you added
two changes: instead of `bump/0`, that always increments the counter by
one, you introduce `bump/1` that passes the exact value to bump the
counter. You also change the state, because you want to store the maximum
bump value:
defmodule Counter do
use GenServer
def start_link(_) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
def bump(by) do
GenServer.call(__MODULE__, {:bump, by})
end
## Callbacks
def init(:ok) do
{:ok, {0, 0}}
end
def handle_call({:bump, by}, {counter, max}) do
{:reply, :ok, {counter + by, max(max, by)}}
end
end
If you to perform a hot code upgrade in such application, it would
crash, because in the initial version the state was just a counter
but in the new version the state is a tuple. Furthermore, you changed
the format of the `call` message from `:bump` to `{:bump, by}` and
the process may have both old and new messages temporarily mixed, so
we need to handle both. The final version would be:
defmodule Counter do
use GenServer
def start_link(_) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
def bump(by) do
GenServer.call(__MODULE__, {:bump, by})
end
## Callbacks
def init(:ok) do
{:ok, {0, 0}}
end
def handle_call(:bump, {counter, max}) do
{:reply, :ok, {counter + 1, max(max, 1)}}
end
def handle_call({:bump, by}, {counter, max}) do
{:reply, :ok, {counter + by, max(max, by)}}
end
def code_change(_, counter, _) do
{:ok, {counter, 0}}
end
end
Now you can proceed to list this process in the `.appup` file and
hot code upgrade it. This is one of the many steps necessary
to perform hot code upgrades and it must be taken into account by
every process and application being upgraded in the system.
The [`.appup` cookbook](http://erlang.org/doc/design_principles/appup_cookbook.html)
provides a good reference and more examples.
Once `.appup`s are created, the next step is to create a `.relup`
file with all instructions necessary to update the release itself.
Erlang documentation does provide a chapter on
[Creating and Upgrading a Target System](http://erlang.org/doc/system_principles/create_target.html).
[Learn You Some Erlang has a chapter on hot code upgrades](https://learnyousomeerlang.com/relups).
Overall, there are many steps, complexities and assumptions made
during hot code upgrades, which is ultimately why they are not
provided by Elixir out of the box. However, hot code upgrades can
still be achieved by teams who desire to implement those steps
on top of `mix release` in their projects or as separate libraries.
## Command line options
* `--force` - forces recompilation
* `--no-archives-check` - does not check archive
* `--no-deps-check` - does not check dependencies
* `--no-elixir-version-check` - does not check Elixir version
* `--no-compile` - does not compile before assembling the release
* `--overwrite` - if there is an existing release version, overwrite it
* `--path` - the path of the release
* `--quiet` - does not write progress to the standard output
* `--version` - the version of the release
"""
import Mix.Generator
@switches [
overwrite: :boolean,
force: :boolean,
quiet: :boolean,
path: :string,
version: :string,
compile: :boolean,
deps_check: :boolean,
archives_check: :boolean,
elixir_version_check: :boolean
]
@aliases [
f: :force
]
@impl true
def run(args) do
Mix.Project.get!()
config = Mix.Project.config()
Mix.Task.run("loadpaths", args)
unless "--no-compile" in args do
Mix.Project.compile(args, config)
end
release =
case OptionParser.parse!(args, strict: @switches, aliases: @aliases) do
{overrides, [name]} -> Mix.Release.from_config!(String.to_atom(name), config, overrides)
{overrides, []} -> Mix.Release.from_config!(nil, config, overrides)
{_, _} -> Mix.raise("Expected \"mix release\" or \"mix release NAME\"")
end
if not File.exists?(release.version_path) or
yes?(release, "Release #{release.name}-#{release.version} already exists. Overwrite?") do
run_steps(release)
end
end
defp yes?(release, message) do
release.options[:overwrite] or Mix.shell().yes?(message)
end
defp run_steps(%{steps: [step | steps]} = release) when is_function(step) do
case step.(%{release | steps: steps}) do
%Mix.Release{} = release ->
run_steps(release)
other ->
Mix.raise(
"Expected step #{inspect(step)} to return a Mix.Release, got: #{inspect(other)}"
)
end
end
defp run_steps(%{steps: [:tar | steps]} = release) do
%{release | steps: steps} |> make_tar() |> run_steps()
end
defp run_steps(%{steps: [:assemble | steps]} = release) do
%{release | steps: steps} |> assemble() |> run_steps()
end
defp run_steps(%{steps: []} = release) do
announce(release)
end
defp assemble(release) do
config = Mix.Project.config()
message = "#{release.name}-#{release.version} on MIX_ENV=#{Mix.env()}"
info(release, [:green, "* assembling ", :reset, message])
# releases/
# VERSION/
# consolidated/
# NAME.rel
# start.boot
# start.script
# start_clean.boot
# start_clean.script
# sys.config
# releases/
# COOKIE
# start_erl.data
consolidation_path = build_rel(release, config)
[
# erts-VSN/
:erts,
# releases/VERSION/consolidated
{:consolidated, consolidation_path},
# bin/
# RELEASE_NAME
# RELEASE_NAME.bat
# start
# start.bat
# releases/
# VERSION/
# elixir
# elixir.bat
# iex
# iex.bat
{:executables, Keyword.get(release.options, :include_executables_for, [:unix, :windows])}
# lib/APP_NAME-APP_VSN/
| Map.keys(release.applications)
]
|> Task.async_stream(©(&1, release), ordered: false, timeout: :infinity)
|> Stream.run()
release
end
defp make_tar(release) do
tar_filename = "#{release.name}-#{release.version}.tar.gz"
out_path = Path.join([release.path, "..", "..", tar_filename]) |> Path.expand()
info(release, [:green, "* building ", :reset, out_path])
lib_dirs =
Enum.reduce(release.applications, [], fn {name, app_config}, acc ->
vsn = Keyword.fetch!(app_config, :vsn)
[Path.join("lib", "#{name}-#{vsn}") | acc]
end)
release_files =
for basename <- File.ls!(Path.join(release.path, "releases")),
not File.dir?(Path.join([release.path, "releases", basename])),
do: Path.join("releases", basename)
dirs =
["bin", Path.join("releases", release.version), "erts-#{release.erts_version}"] ++
lib_dirs ++ release_files
files =
dirs
|> Enum.filter(&File.exists?(Path.join(release.path, &1)))
|> Enum.map(&{String.to_charlist(&1), String.to_charlist(Path.join(release.path, &1))})
File.rm(out_path)
:ok = :erl_tar.create(String.to_charlist(out_path), files, [:dereference, :compressed])
release
end
# build_rel
defp build_rel(release, config) do
version_path = release.version_path
File.rm_rf!(version_path)
File.mkdir_p!(version_path)
consolidation_path =
if config[:consolidate_protocols] do
Mix.Project.consolidation_path(config)
end
sys_config =
if File.regular?(config[:config_path]) do
config[:config_path] |> Config.Reader.read!()
else
[]
end
release = maybe_add_config_reader_provider(release, version_path)
vm_args_path = Path.join(version_path, "vm.args")
cookie_path = Path.join(release.path, "releases/COOKIE")
start_erl_path = Path.join(release.path, "releases/start_erl.data")
config_provider_path = {:system, "RELEASE_SYS_CONFIG", ".config"}
with :ok <- make_boot_scripts(release, version_path, consolidation_path),
:ok <- make_vm_args(release, vm_args_path),
:ok <- Mix.Release.make_sys_config(release, sys_config, config_provider_path),
:ok <- Mix.Release.make_cookie(release, cookie_path),
:ok <- Mix.Release.make_start_erl(release, start_erl_path) do
consolidation_path
else
{:error, message} ->
File.rm_rf!(version_path)
Mix.raise(message)
end
end
defp maybe_add_config_reader_provider(%{options: opts} = release, version_path) do
path =
cond do
path = opts[:runtime_config_path] ->
path
File.exists?("config/releases.exs") ->
"config/releases.exs"
true ->
nil
end
cond do
path ->
msg = "#{path} to configure the release at runtime"
Mix.shell().info([:green, "* using ", :reset, msg])
File.cp!(path, Path.join(version_path, "releases.exs"))
init = {:system, "RELEASE_ROOT", "/releases/#{release.version}/releases.exs"}
update_in(release.config_providers, &[{Config.Reader, init} | &1])
release.config_providers == [] ->
skipping("runtime configuration (config/releases.exs not found)")
release
true ->
release
end
end
defp make_boot_scripts(release, version_path, consolidation_path) do
prepend_paths =
if consolidation_path do
["$RELEASE_LIB/../releases/#{release.version}/consolidated"]
else
[]
end
results =
for {boot_name, modes} <- release.boot_scripts do
sys_path = Path.join(version_path, Atom.to_string(boot_name))
with :ok <- Mix.Release.make_boot_script(release, sys_path, modes, prepend_paths) do
if boot_name == :start do
rel_path = Path.join(Path.dirname(sys_path), "#{release.name}.rel")
File.rename!(sys_path <> ".rel", rel_path)
else
File.rm(sys_path <> ".rel")
end
:ok
end
end
Enum.find(results, :ok, &(&1 != :ok))
end
defp make_vm_args(release, path) do
if File.exists?("rel/vm.args.eex") do
copy_template("rel/vm.args.eex", path, [release: release], force: true)
else
File.write!(path, vm_args_template(release: release))
end
:ok
end
defp announce(release) do
path = Path.relative_to_cwd(release.path)
cmd = "#{path}/bin/#{release.name}"
info(release, """
Release created at #{path}!
# To start your system
#{cmd} start
Once the release is running:
# To connect to it remotely
#{cmd} remote
# To stop it gracefully (you may also send SIGINT/SIGTERM)
#{cmd} stop
To list all commands:
#{cmd}
""")
end
defp info(release, message) do
unless release.options[:quiet] do
Mix.shell().info(message)
end
end
defp skipping(message) do
Mix.shell().info([:yellow, "* skipping ", :reset, message])
end
## Copy operations
defp copy(:erts, release) do
_ = Mix.Release.copy_erts(release)
:ok
end
defp copy(app, release) when is_atom(app) do
Mix.Release.copy_app(release, app)
end
defp copy({:consolidated, consolidation_path}, release) do
if consolidation_path do
consolidation_target = Path.join(release.version_path, "consolidated")
_ = Mix.Release.copy_ebin(release, consolidation_path, consolidation_target)
end
:ok
end
defp copy({:executables, include_executables_for}, release) do
elixir_bin_path = Application.app_dir(:elixir, "../../bin")
bin_path = Path.join(release.path, "bin")
File.mkdir_p!(bin_path)
for os <- include_executables_for do
{env, env_fun, clis} = cli_for(os, release)
env_path = Path.join(release.version_path, env)
env_template_path = Path.join("rel", env <> ".eex")
if File.exists?(env_template_path) do
copy_template(env_template_path, env_path, [release: release], force: true)
else
File.write!(env_path, env_fun.(release))
end
for {filename, contents} <- clis do
target = Path.join(bin_path, filename)
File.write!(target, contents)
executable!(target)
end
for {filename, contents_fun} <- elixir_cli_for(os, release) do
source = Path.join(elixir_bin_path, filename)
if File.regular?(source) do
target = Path.join(release.version_path, filename)
File.write!(target, contents_fun.(source))
executable!(target)
else
skipping("#{filename} for #{os} (bin/#{filename} not found in the Elixir installation)")
end
end
end
end
defp cli_for(:unix, release) do
{"env.sh", &env_template(release: &1), [{"#{release.name}", cli_template(release: release)}]}
end
defp cli_for(:windows, release) do
{"env.bat", &env_bat_template(release: &1),
[{"#{release.name}.bat", cli_bat_template(release: release)}]}
end
defp elixir_cli_for(:unix, release) do
[
{"elixir",
&(&1
|> File.read!()
|> String.replace(~s[ -pa "$SCRIPT_PATH"/../lib/*/ebin], "")
|> replace_erts_bin(release, ~s["$SCRIPT_PATH"/../../erts-#{release.erts_version}/bin/]))},
{"iex", &File.read!/1}
]
end
defp elixir_cli_for(:windows, release) do
[
{"elixir.bat",
&(&1
|> File.read!()
|> String.replace(~s[goto expand_erl_libs], ~s[goto run])
|> replace_erts_bin(release, ~s[%~dp0\\..\\..\\erts-#{release.erts_version}\\bin\\]))},
{"iex.bat", &File.read!/1}
]
end
defp replace_erts_bin(contents, release, new_path) do
if release.erts_source do
String.replace(contents, ~s[ERTS_BIN=], ~s[ERTS_BIN=#{new_path}])
else
contents
end
end
defp executable!(path), do: File.chmod!(path, 0o744)
embed_template(:vm_args, Mix.Tasks.Release.Init.vm_args_text())
embed_template(:env, Mix.Tasks.Release.Init.env_text())
embed_template(:cli, Mix.Tasks.Release.Init.cli_text())
embed_template(:env_bat, Mix.Tasks.Release.Init.env_bat_text())
embed_template(:cli_bat, Mix.Tasks.Release.Init.cli_bat_text())
end
| 38.900846 | 106 | 0.696246 |
733041f5486f98b8552fc01c04cba6e693880818
| 61 |
ex
|
Elixir
|
web/views/layout_view.ex
|
sgeos/memo_api
|
d57d0a1190296364a559510de9b4dd9d50b034e8
|
[
"CC0-1.0"
] | null | null | null |
web/views/layout_view.ex
|
sgeos/memo_api
|
d57d0a1190296364a559510de9b4dd9d50b034e8
|
[
"CC0-1.0"
] | null | null | null |
web/views/layout_view.ex
|
sgeos/memo_api
|
d57d0a1190296364a559510de9b4dd9d50b034e8
|
[
"CC0-1.0"
] | null | null | null |
defmodule MemoApi.LayoutView do
use MemoApi.Web, :view
end
| 15.25 | 31 | 0.786885 |
733051990c802f6af9b7d1bf370bd88ad3d7f174
| 3,289 |
ex
|
Elixir
|
lib/yakusu_web/controllers/books/page_controller.ex
|
jiegillet/yakusu
|
3d9cbc19b0a6112604b362186211400a2e4923b2
|
[
"MIT"
] | 1 |
2021-08-17T06:54:02.000Z
|
2021-08-17T06:54:02.000Z
|
lib/yakusu_web/controllers/books/page_controller.ex
|
jiegillet/yakusu
|
3d9cbc19b0a6112604b362186211400a2e4923b2
|
[
"MIT"
] | null | null | null |
lib/yakusu_web/controllers/books/page_controller.ex
|
jiegillet/yakusu
|
3d9cbc19b0a6112604b362186211400a2e4923b2
|
[
"MIT"
] | null | null | null |
defmodule YakusuWeb.Books.PageController do
use YakusuWeb, :controller
alias Yakusu.{Books, Repo}
alias Yakusu.Books.Page
action_fallback YakusuWeb.FallbackController
def index(conn, _params) do
pages = Books.list_pages()
render(conn, "index.json", pages: pages)
end
def get_book_pages(conn, %{"book_id" => book_id}) do
pages =
Books.get_book!(book_id)
|> Books.list_pages()
|> Enum.sort_by(& &1.page_number)
render(conn, "index.json", pages: pages)
end
def create(conn, %{"page" => page_params}) do
with {:ok, %Page{} = page} <- Books.create_page(page_params) do
conn
|> put_status(:created)
|> render("show.json", page: page)
end
end
def show(conn, %{"id" => id}) do
page = Books.get_page!(id)
render(conn, "show.json", page: page)
end
def update(conn, %{"id" => id, "page" => page_params}) do
page = Books.get_page!(id)
with {:ok, %Page{} = page} <- Books.update_page(page, page_params) do
render(conn, "show.json", page: page)
end
end
def delete(conn, %{"id" => id}) do
page = Books.get_page!(id)
with {:ok, %Page{}} <- Books.delete_page(page) do
send_resp(conn, :no_content, "")
end
end
def image(conn, %{"id" => id}) do
page = Repo.get!(Page, id)
conn
|> put_resp_content_type(page.image_type, "utf-8")
|> send_resp(200, page.image)
end
def compress_image(conn, %{"page" => page, "image" => %Plug.Upload{path: path}}) do
page = Jason.decode!(page)
{:ok, image} =
path
|> Yakusu.Mogrify.blur_image()
|> File.read()
response = <<page::size(16)>> <> <<byte_size(image)::size(32)>> <> image
send_resp(conn, :ok, response)
end
@default_attrs %{
"new_pages" => [],
"new_pages_number" => [],
"delete_pages" => [],
"reorder_pages" => [],
"reorder_pages_number" => []
}
def create_pages(conn, attrs) do
# name all arguments, some are optionals
%{
"book_id" => book_id,
"new_pages" => new_pages,
"new_pages_number" => new_pages_number,
"delete_pages" => delete_pages,
"reorder_pages" => reorder_pages,
"reorder_pages_number" => reorder_pages_number
} = Map.merge(@default_attrs, attrs)
# Add new pages
Enum.zip(new_pages, new_pages_number)
|> Enum.each(fn {%Plug.Upload{path: path}, page_number} ->
{:ok, image} = File.read(path)
%{width: width, height: height} =
path
|> Mogrify.open()
|> Mogrify.verbose()
{:ok, _page} =
Books.create_page(%{
page_number: page_number,
book_id: book_id,
image: image,
height: height,
width: width,
image_type: "image/jpg"
})
end)
# Delete old pages
delete_pages
|> Enum.each(fn page_id ->
page_id
|> String.to_integer()
|> Books.get_page!()
|> Books.delete_page()
end)
# Reorder existing pages
Enum.zip(reorder_pages, reorder_pages_number)
|> Enum.each(fn {page_id, page_number} ->
page_id
|> String.to_integer()
|> Books.get_page!()
|> Books.update_page(%{page_number: String.to_integer(page_number)})
end)
send_resp(conn, :created, "")
end
end
| 24.544776 | 85 | 0.590453 |
733066aed63cc1bb1563e350b9cc59d2cac46509
| 529 |
exs
|
Elixir
|
examples/basic/mix.exs
|
kianmeng/benchee_dsl
|
79e120ded84645192af372a5a45f9256ddb83313
|
[
"MIT"
] | 3 |
2020-07-18T11:02:23.000Z
|
2021-06-24T12:32:56.000Z
|
examples/basic/mix.exs
|
kianmeng/benchee_dsl
|
79e120ded84645192af372a5a45f9256ddb83313
|
[
"MIT"
] | 6 |
2020-05-02T15:32:26.000Z
|
2020-10-22T11:33:08.000Z
|
examples/basic/mix.exs
|
kianmeng/benchee_dsl
|
79e120ded84645192af372a5a45f9256ddb83313
|
[
"MIT"
] | 1 |
2020-10-22T11:06:05.000Z
|
2020-10-22T11:06:05.000Z
|
defmodule Basic.MixProject do
use Mix.Project
def project do
[
app: :basic,
version: "0.1.0",
elixir: "~> 1.10",
start_permanent: Mix.env() == :prod,
deps: deps()
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:benchee, "~> 1.0", only: :dev},
{:benchee_dsl, path: "../..", only: :dev}
]
end
end
| 18.241379 | 59 | 0.553875 |
733085cba1cf48e76798daae20a4e9f791c99208
| 1,684 |
ex
|
Elixir
|
lib/visualizer_web/endpoint.ex
|
ajanes/booster-ui
|
63a1cd32f873d11ff2448666692c2f637dfc466b
|
[
"CC-BY-3.0",
"BSD-3-Clause"
] | null | null | null |
lib/visualizer_web/endpoint.ex
|
ajanes/booster-ui
|
63a1cd32f873d11ff2448666692c2f637dfc466b
|
[
"CC-BY-3.0",
"BSD-3-Clause"
] | null | null | null |
lib/visualizer_web/endpoint.ex
|
ajanes/booster-ui
|
63a1cd32f873d11ff2448666692c2f637dfc466b
|
[
"CC-BY-3.0",
"BSD-3-Clause"
] | null | null | null |
defmodule VisualizerWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :visualizer
socket "/socket", VisualizerWeb.UserSocket
# Serve at "/" the static files from "priv/static" directory.
#
# You should set gzip to true if you are running phoenix.digest
# when deploying your static files in production.
plug Plug.Static,
at: "/", from: :visualizer, gzip: false,
only: ~w(css fonts images js favicon.ico robots.txt)
# Code reloading can be explicitly enabled under the
# :code_reloader configuration of your endpoint.
if code_reloading? do
socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket
plug Phoenix.LiveReloader
plug Phoenix.CodeReloader
end
plug Plug.RequestId
plug Plug.Logger
plug Plug.Parsers,
parsers: [:urlencoded, :multipart, :json],
pass: ["*/*"],
json_decoder: Poison
plug Plug.MethodOverride
plug Plug.Head
# The session will be stored in the cookie and signed,
# this means its contents can be read but not tampered with.
# Set :encryption_salt if you would also like to encrypt it.
plug Plug.Session,
store: :cookie,
key: "_visualizer_key",
signing_salt: "xej2MQ2D"
plug VisualizerWeb.Router
@doc """
Callback invoked for dynamically configuring the endpoint.
It receives the endpoint configuration and checks if
configuration should be loaded from the system environment.
"""
def init(_key, config) do
if config[:load_from_system_env] do
port = System.get_env("PORT") || raise "expected the PORT environment variable to be set"
{:ok, Keyword.put(config, :http, [:inet6, port: port])}
else
{:ok, config}
end
end
end
| 29.034483 | 95 | 0.710808 |
7330ae77f3be65ecd781cdd46e61cc01e4aafc25
| 228 |
ex
|
Elixir
|
solomon/web/models/resource.ex
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | 10 |
2018-04-12T22:29:52.000Z
|
2021-10-18T17:07:45.000Z
|
solomon/web/models/resource.ex
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | null | null | null |
solomon/web/models/resource.ex
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | 1 |
2018-07-06T18:42:05.000Z
|
2018-07-06T18:42:05.000Z
|
defmodule Solomon.Resource do
use Solomon.Web, :model
schema "resources" do
field :name, :string
field :description, :string
field :actions, {:array, :string}
belongs_to :system, Solomon.System
end
end
| 17.538462 | 38 | 0.684211 |
7330d1179a8e2222e0d7e02c02966b3ac03d8833
| 1,157 |
exs
|
Elixir
|
mix.exs
|
maxneuvians/nio_google_authenticator
|
0ed7719e58334b71d8eff766aa23baebd5d7138c
|
[
"MIT"
] | 13 |
2016-01-16T14:12:27.000Z
|
2021-02-05T13:50:32.000Z
|
mix.exs
|
maxneuvians/nio_google_authenticator
|
0ed7719e58334b71d8eff766aa23baebd5d7138c
|
[
"MIT"
] | 3 |
2016-08-01T13:01:10.000Z
|
2017-08-04T14:33:48.000Z
|
mix.exs
|
maxneuvians/nio_google_authenticator
|
0ed7719e58334b71d8eff766aa23baebd5d7138c
|
[
"MIT"
] | 5 |
2016-08-01T00:08:17.000Z
|
2020-05-05T08:36:30.000Z
|
defmodule NioGoogleAuthenticator.Mixfile do
use Mix.Project
def project do
[app: :nio_google_authenticator,
version: "2.1.2",
elixir: "~> 1.2",
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
description: description(),
package: package(),
deps: deps()]
end
def application do
[]
end
defp deps do
[
{:ecto, "~> 2.0"},
{:pot, "~> 0.9.3"},
{:ex_doc, ">= 0.0.0", only: :dev}
]
end
defp description do
"""
NioGoogleAuthenticator is a collection of Elixir convenience functions
to generate secrets and validate tokens used in conjunction with Google
Authenticator. It also includes functions that automatically add a secret to
an Ecto.Changeset.
"""
end
defp package do
[# These are the default files included in the package
files: ["lib", "mix.exs", "README*", "LICENSE*"],
maintainers: ["Max Neuvians"],
licenses: ["MIT"],
links: %{"GitHub" => "https://github.com/maxneuvians/nio_google_authenticator",
"Docs" => "https://github.com/maxneuvians/nio_google_authenticator"}]
end
end
| 25.711111 | 84 | 0.626621 |
7330d77cff3dabafcb1429b301e42c4a1dac26d2
| 1,254 |
exs
|
Elixir
|
config/config.exs
|
ScrimpyCat/Resx
|
b7ed996a6202ecaa6e411ebd989198e16d97719d
|
[
"BSD-2-Clause"
] | null | null | null |
config/config.exs
|
ScrimpyCat/Resx
|
b7ed996a6202ecaa6e411ebd989198e16d97719d
|
[
"BSD-2-Clause"
] | null | null | null |
config/config.exs
|
ScrimpyCat/Resx
|
b7ed996a6202ecaa6e411ebd989198e16d97719d
|
[
"BSD-2-Clause"
] | null | null | null |
# This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
use Mix.Config
# This configuration is loaded before any dependency and is restricted
# to this project. If another project depends on this project, this
# file won't be loaded nor affect the parent project. For this reason,
# if you want to provide default values for your application for
# 3rd-party users, it should be done in your "mix.exs" file.
# You can configure your application as:
#
# config :resx, key: :value
#
# and access this configuration in your application as:
#
# Application.get_env(:resx, :key)
#
# You can also configure a 3rd-party app:
#
# config :logger, level: :info
#
# It is also possible to import configuration files, relative to this
# directory. For example, you can emulate configuration per environment
# by uncommenting the line below and defining dev.exs, test.exs and such.
# Configuration from the imported file will override the ones defined
# here (which is why it is important to import them last).
#
# import_config "#{Mix.env()}.exs"
if Mix.env == :dev do
import_config "simple_markdown_rules.exs"
config :ex_doc, :markdown_processor, ExDocSimpleMarkdown
end
| 33.891892 | 73 | 0.749601 |
73312d59c52f3e38fb5226a6aaf97ae212e3d95d
| 883 |
exs
|
Elixir
|
template_method/report.exs
|
joshnuss/design-patterns-in-elixir
|
7f07cae0701ad460b6b275e382fad03324656989
|
[
"MIT"
] | 516 |
2015-09-25T18:43:37.000Z
|
2022-03-22T16:33:08.000Z
|
template_method/report.exs
|
joshnuss/design-patterns-in-elixir
|
7f07cae0701ad460b6b275e382fad03324656989
|
[
"MIT"
] | 2 |
2017-10-01T22:33:34.000Z
|
2019-02-21T18:21:54.000Z
|
template_method/report.exs
|
joshnuss/design-patterns-in-elixir
|
7f07cae0701ad460b6b275e382fad03324656989
|
[
"MIT"
] | 52 |
2015-11-15T05:58:45.000Z
|
2022-01-21T20:01:17.000Z
|
defmodule Report do
defmacro __using__(_) do
quote do
@title "Monthly Report"
@text ["Things are going", "really really well"]
def output_report do
output_start
output_head
output_body_start
output_lines
output_body_end
output_end
end
def output_start, do: nil
def output_head, do: nil
def output_body_start, do: nil
def output_lines,
do: Enum.each(@text, &output_line/1)
def output_line(line),
do: raise(ArgumentError, "not implemented")
def output_body_end, do: nil
def output_end, do: nil
defoverridable output_start: 0,
output_head: 0,
output_body_start: 0,
output_line: 1,
output_body_end: 0,
output_end: 0
end
end
end
| 23.236842 | 54 | 0.562854 |
73313216c2d2a3988cd74fbb68a425c8916f55eb
| 1,681 |
ex
|
Elixir
|
clients/dialogflow/lib/google_api/dialogflow/v2/model/google_cloud_dialogflow_cx_v3_create_version_operation_metadata.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | null | null | null |
clients/dialogflow/lib/google_api/dialogflow/v2/model/google_cloud_dialogflow_cx_v3_create_version_operation_metadata.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | 1 |
2020-12-18T09:25:12.000Z
|
2020-12-18T09:25:12.000Z
|
clients/dialogflow/lib/google_api/dialogflow/v2/model/google_cloud_dialogflow_cx_v3_create_version_operation_metadata.ex
|
mcrumm/elixir-google-api
|
544f22797cec52b3a23dfb6e39117f0018448610
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Dialogflow.V2.Model.GoogleCloudDialogflowCxV3CreateVersionOperationMetadata do
@moduledoc """
Metadata associated with the long running operation for Versions.CreateVersion.
## Attributes
* `version` (*type:* `String.t`, *default:* `nil`) - Name of the created version. Format: `projects//locations//agents//flows//versions/`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:version => String.t()
}
field(:version)
end
defimpl Poison.Decoder,
for: GoogleApi.Dialogflow.V2.Model.GoogleCloudDialogflowCxV3CreateVersionOperationMetadata do
def decode(value, options) do
GoogleApi.Dialogflow.V2.Model.GoogleCloudDialogflowCxV3CreateVersionOperationMetadata.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.Dialogflow.V2.Model.GoogleCloudDialogflowCxV3CreateVersionOperationMetadata do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 32.326923 | 142 | 0.755503 |
7331642fe7c1a06b1fcb8edbc3e1b55abb74d6d2
| 502 |
exs
|
Elixir
|
priv/repo/migrations/20170709060016_add_events_fields.exs
|
Apps-Team/conferencetools
|
ce2e16a3e4a521dc4682e736a209e6dd380c050d
|
[
"Apache-2.0"
] | null | null | null |
priv/repo/migrations/20170709060016_add_events_fields.exs
|
Apps-Team/conferencetools
|
ce2e16a3e4a521dc4682e736a209e6dd380c050d
|
[
"Apache-2.0"
] | 6 |
2017-10-05T20:16:34.000Z
|
2017-10-05T20:36:11.000Z
|
priv/repo/migrations/20170709060016_add_events_fields.exs
|
apps-team/events-tools
|
ce2e16a3e4a521dc4682e736a209e6dd380c050d
|
[
"Apache-2.0"
] | null | null | null |
defmodule EventsTools.Repo.Migrations.AddEventsFields do
use Ecto.Migration
def change do
alter table(:events) do
add :background, :text
add :color_accent, :string
add :color_primary, :string
add :color_secondary, :string
add :location, :text
add :news, :text
add :schedule, :text
add :speakers, :text
add :sponsors, :text
add :tagline, :text
add :teaser, :text
add :team, :text
add :venue, :text
end
end
end
| 22.818182 | 56 | 0.61753 |
7331798a3c5ac229f106381c8e4840ea295adaf2
| 58,748 |
ex
|
Elixir
|
lib/aws/generated/organizations.ex
|
salemove/aws-elixir
|
debdf6482158a71a57636ac664c911e682093395
|
[
"Apache-2.0"
] | null | null | null |
lib/aws/generated/organizations.ex
|
salemove/aws-elixir
|
debdf6482158a71a57636ac664c911e682093395
|
[
"Apache-2.0"
] | null | null | null |
lib/aws/generated/organizations.ex
|
salemove/aws-elixir
|
debdf6482158a71a57636ac664c911e682093395
|
[
"Apache-2.0"
] | null | null | null |
# WARNING: DO NOT EDIT, AUTO-GENERATED CODE!
# See https://github.com/aws-beam/aws-codegen for more details.
defmodule AWS.Organizations do
@moduledoc """
AWS Organizations
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Organizations",
api_version: "2016-11-28",
content_type: "application/x-amz-json-1.1",
credential_scope: "us-east-1",
endpoint_prefix: "organizations",
global?: true,
protocol: "json",
service_id: "Organizations",
signature_version: "v4",
signing_name: "organizations",
target_prefix: "AWSOrganizationsV20161128"
}
end
@doc """
Sends a response to the originator of a handshake agreeing to the action
proposed by the handshake request.
This operation can be called only by the following principals when they also
have the relevant IAM permissions:
* **Invitation to join** or ## Approve all features request
handshakes: only a principal from the member account.
The user who calls the API for an invitation to join must have the
`organizations:AcceptHandshake` permission. If you enabled all features in the
organization, the user must also have the `iam:CreateServiceLinkedRole`
permission so that AWS Organizations can create the required service-linked role
named `AWSServiceRoleForOrganizations`. For more information, see [AWS Organizations and Service-Linked
Roles](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integration_services.html#orgs_integration_service-linked-roles)
in the *AWS Organizations User Guide*.
* **Enable all features final confirmation** handshake: only a
principal from the management account.
For more information about invitations, see [Inviting an AWS Account to Join Your
Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_invites.html)
in the *AWS Organizations User Guide.* For more information about requests to
enable all features in the organization, see [Enabling All Features in Your Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html)
in the *AWS Organizations User Guide.*
After you accept a handshake, it continues to appear in the results of relevant
APIs for only 30 days. After that, it's deleted.
"""
def accept_handshake(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AcceptHandshake", input, options)
end
@doc """
Attaches a policy to a root, an organizational unit (OU), or an individual
account.
How the policy affects accounts depends on the type of policy. Refer to the *AWS
Organizations User Guide* for information about each policy type:
*
[AISERVICES_OPT_OUT_POLICY](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) *
[BACKUP_POLICY](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html)
*
[SERVICE_CONTROL_POLICY](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) *
[TAG_POLICY](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html)
This operation can be called only from the organization's management account.
"""
def attach_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AttachPolicy", input, options)
end
@doc """
Cancels a handshake.
Canceling a handshake sets the handshake state to `CANCELED`.
This operation can be called only from the account that originated the
handshake. The recipient of the handshake can't cancel it, but can use
`DeclineHandshake` instead. After a handshake is canceled, the recipient can no
longer respond to that handshake.
After you cancel a handshake, it continues to appear in the results of relevant
APIs for only 30 days. After that, it's deleted.
"""
def cancel_handshake(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelHandshake", input, options)
end
@doc """
Creates an AWS account that is automatically a member of the organization whose
credentials made the request.
This is an asynchronous request that AWS performs in the background. Because
`CreateAccount` operates asynchronously, it can return a successful completion
message even though account initialization might still be in progress. You might
need to wait a few minutes before you can successfully access the account. To
check the status of the request, do one of the following:
* Use the `Id` member of the `CreateAccountStatus` response element
from this operation to provide as a parameter to the
`DescribeCreateAccountStatus` operation.
* Check the AWS CloudTrail log for the `CreateAccountResult` event.
For information on using AWS CloudTrail with AWS Organizations, see [Monitoring the Activity in Your
Organization](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_monitoring.html)
in the *AWS Organizations User Guide.*
The user who calls the API to create an account must have the
`organizations:CreateAccount` permission. If you enabled all features in the
organization, AWS Organizations creates the required service-linked role named
`AWSServiceRoleForOrganizations`. For more information, see [AWS Organizations and Service-Linked
Roles](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs)
in the *AWS Organizations User Guide*.
If the request includes tags, then the requester must have the
`organizations:TagResource` permission.
AWS Organizations preconfigures the new member account with a role (named
`OrganizationAccountAccessRole` by default) that grants users in the management
account administrator permissions in the new member account. Principals in the
management account can assume the role. AWS Organizations clones the company
name and address information for the new account from the organization's
management account.
This operation can be called only from the organization's management account.
For more information about creating accounts, see [Creating an AWS Account in Your
Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html)
in the *AWS Organizations User Guide.*
When you create an account in an organization using the AWS
Organizations console, API, or CLI commands, the information required for the
account to operate as a standalone account, such as a payment method and signing
the end user license agreement (EULA) is *not* automatically collected. If you
must remove an account from your organization later, you can do so only after
you provide the missing information. Follow the steps at [ To leave an organization as a member
account](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *AWS Organizations User Guide*.
If you get an exception that indicates that you exceeded your
account limits for the organization, contact [AWS Support](https://console.aws.amazon.com/support/home#/).
If you get an exception that indicates that the operation failed
because your organization is still initializing, wait one hour and then try
again. If the error persists, contact [AWS Support](https://console.aws.amazon.com/support/home#/).
Using `CreateAccount` to create multiple temporary accounts isn't
recommended. You can only close an account from the Billing and Cost Management
Console, and you must be signed in as the root user. For information on the
requirements and process for closing an account, see [Closing an AWS Account](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html)
in the *AWS Organizations User Guide*.
When you create a member account with this operation, you can choose whether to
create the account with the ## IAM User and Role Access to Billing Information
switch enabled. If you enable it, IAM users and roles that have appropriate
permissions can view billing information for the account. If you disable it,
only the account root user can access billing information. For information about
how to disable this switch for an account, see [Granting Access to Your Billing Information and
Tools](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html).
"""
def create_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAccount", input, options)
end
@doc """
This action is available if all of the following are true:
* You're authorized to create accounts in the AWS GovCloud (US)
Region.
For more information on the AWS GovCloud (US) Region, see the [ *AWS GovCloud User
Guide*.](http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/welcome.html)
* You already have an account in the AWS GovCloud (US) Region that
is paired with a management account of an organization in the commercial Region.
* You call this action from the management account of your
organization in the commercial Region.
* You have the `organizations:CreateGovCloudAccount` permission.
AWS Organizations automatically creates the required service-linked role named
`AWSServiceRoleForOrganizations`. For more information, see [AWS Organizations and Service-Linked
Roles](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs)
in the *AWS Organizations User Guide.*
AWS automatically enables AWS CloudTrail for AWS GovCloud (US) accounts, but you
should also do the following:
* Verify that AWS CloudTrail is enabled to store logs.
* Create an S3 bucket for AWS CloudTrail log storage.
For more information, see [Verifying AWS CloudTrail Is Enabled](http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/verifying-cloudtrail.html)
in the *AWS GovCloud User Guide*.
If the request includes tags, then the requester must have the
`organizations:TagResource` permission. The tags are attached to the commercial
account associated with the GovCloud account, rather than the GovCloud account
itself. To add tags to the GovCloud account, call the `TagResource` operation in
the GovCloud Region after the new GovCloud account exists.
You call this action from the management account of your organization in the
commercial Region to create a standalone AWS account in the AWS GovCloud (US)
Region. After the account is created, the management account of an organization
in the AWS GovCloud (US) Region can invite it to that organization. For more
information on inviting standalone accounts in the AWS GovCloud (US) to join an
organization, see [AWS Organizations](http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html)
in the *AWS GovCloud User Guide.*
Calling `CreateGovCloudAccount` is an asynchronous request that AWS performs in
the background. Because `CreateGovCloudAccount` operates asynchronously, it can
return a successful completion message even though account initialization might
still be in progress. You might need to wait a few minutes before you can
successfully access the account. To check the status of the request, do one of
the following:
* Use the `OperationId` response element from this operation to
provide as a parameter to the `DescribeCreateAccountStatus` operation.
* Check the AWS CloudTrail log for the `CreateAccountResult` event.
For information on using AWS CloudTrail with Organizations, see [Monitoring the Activity in Your
Organization](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_monitoring.html)
in the *AWS Organizations User Guide.*
When you call the `CreateGovCloudAccount` action, you create two accounts: a
standalone account in the AWS GovCloud (US) Region and an associated account in
the commercial Region for billing and support purposes. The account in the
commercial Region is automatically a member of the organization whose
credentials made the request. Both accounts are associated with the same email
address.
A role is created in the new account in the commercial Region that allows the
management account in the organization in the commercial Region to assume it. An
AWS GovCloud (US) account is then created and associated with the commercial
account that you just created. A role is also created in the new AWS GovCloud
(US) account that can be assumed by the AWS GovCloud (US) account that is
associated with the management account of the commercial organization. For more
information and to view a diagram that explains how account access works, see
[AWS Organizations](http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html)
in the *AWS GovCloud User Guide.*
For more information about creating accounts, see [Creating an AWS Account in Your
Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html)
in the *AWS Organizations User Guide.*
When you create an account in an organization using the AWS
Organizations console, API, or CLI commands, the information required for the
account to operate as a standalone account is *not* automatically collected.
This includes a payment method and signing the end user license agreement
(EULA). If you must remove an account from your organization later, you can do
so only after you provide the missing information. Follow the steps at [ To leave an organization as a member
account](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *AWS Organizations User Guide.*
If you get an exception that indicates that you exceeded your
account limits for the organization, contact [AWS Support](https://console.aws.amazon.com/support/home#/).
If you get an exception that indicates that the operation failed
because your organization is still initializing, wait one hour and then try
again. If the error persists, contact [AWS Support](https://console.aws.amazon.com/support/home#/).
Using `CreateGovCloudAccount` to create multiple temporary accounts
isn't recommended. You can only close an account from the AWS Billing and Cost
Management console, and you must be signed in as the root user. For information
on the requirements and process for closing an account, see [Closing an AWS Account](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html)
in the *AWS Organizations User Guide*.
When you create a member account with this operation, you can choose whether to
create the account with the ## IAM User and Role Access to Billing Information
switch enabled. If you enable it, IAM users and roles that have appropriate
permissions can view billing information for the account. If you disable it,
only the account root user can access billing information. For information about
how to disable this switch for an account, see [Granting Access to Your Billing Information and
Tools](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html).
"""
def create_gov_cloud_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGovCloudAccount", input, options)
end
@doc """
Creates an AWS organization.
The account whose user is calling the `CreateOrganization` operation
automatically becomes the [management account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#account)
of the new organization.
This operation must be called using credentials from the account that is to
become the new organization's management account. The principal must also have
the relevant IAM permissions.
By default (or if you set the `FeatureSet` parameter to `ALL`), the new
organization is created with all features enabled and service control policies
automatically enabled in the root. If you instead choose to create the
organization supporting only the consolidated billing features by setting the
`FeatureSet` parameter to `CONSOLIDATED_BILLING"`, no policy types are enabled
by default, and you can't use organization policies
"""
def create_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateOrganization", input, options)
end
@doc """
Creates an organizational unit (OU) within a root or parent OU.
An OU is a container for accounts that enables you to organize your accounts to
apply policies according to your business requirements. The number of levels
deep that you can nest OUs is dependent upon the policy types enabled for that
root. For service control policies, the limit is five.
For more information about OUs, see [Managing Organizational Units](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_ous.html)
in the *AWS Organizations User Guide.*
If the request includes tags, then the requester must have the
`organizations:TagResource` permission.
This operation can be called only from the organization's management account.
"""
def create_organizational_unit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateOrganizationalUnit", input, options)
end
@doc """
Creates a policy of a specified type that you can attach to a root, an
organizational unit (OU), or an individual AWS account.
For more information about policies and their use, see [Managing Organization Policies](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html).
If the request includes tags, then the requester must have the
`organizations:TagResource` permission.
This operation can be called only from the organization's management account.
"""
def create_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePolicy", input, options)
end
@doc """
Declines a handshake request.
This sets the handshake state to `DECLINED` and effectively deactivates the
request.
This operation can be called only from the account that received the handshake.
The originator of the handshake can use `CancelHandshake` instead. The
originator can't reactivate a declined request, but can reinitiate the process
with a new handshake request.
After you decline a handshake, it continues to appear in the results of relevant
APIs for only 30 days. After that, it's deleted.
"""
def decline_handshake(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeclineHandshake", input, options)
end
@doc """
Deletes the organization.
You can delete an organization only by using credentials from the management
account. The organization must be empty of member accounts.
"""
def delete_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteOrganization", input, options)
end
@doc """
Deletes an organizational unit (OU) from a root or another OU.
You must first remove all accounts and child OUs from the OU that you want to
delete.
This operation can be called only from the organization's management account.
"""
def delete_organizational_unit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteOrganizationalUnit", input, options)
end
@doc """
Deletes the specified policy from your organization.
Before you perform this operation, you must first detach the policy from all
organizational units (OUs), roots, and accounts.
This operation can be called only from the organization's management account.
"""
def delete_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeletePolicy", input, options)
end
@doc """
Removes the specified member AWS account as a delegated administrator for the
specified AWS service.
Deregistering a delegated administrator can have unintended impacts on the
functionality of the enabled AWS service. See the documentation for the enabled
service before you deregister a delegated administrator so that you understand
any potential impacts.
You can run this action only for AWS services that support this feature. For a
current list of services that support it, see the column *Supports Delegated
Administrator* in the table at [AWS Services that you can use with AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services_list.html)
in the *AWS Organizations User Guide.*
This operation can be called only from the organization's management account.
"""
def deregister_delegated_administrator(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterDelegatedAdministrator", input, options)
end
@doc """
Retrieves AWS Organizations-related information about the specified account.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def describe_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAccount", input, options)
end
@doc """
Retrieves the current status of an asynchronous request to create an account.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def describe_create_account_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCreateAccountStatus", input, options)
end
@doc """
Returns the contents of the effective policy for specified policy type and
account.
The effective policy is the aggregation of any policies of the specified type
that the account inherits, plus any policy of that type that is directly
attached to the account.
This operation applies only to policy types *other* than service control
policies (SCPs).
For more information about policy inheritance, see [How Policy Inheritance Works](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies-inheritance.html)
in the *AWS Organizations User Guide*.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def describe_effective_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEffectivePolicy", input, options)
end
@doc """
Retrieves information about a previously requested handshake.
The handshake ID comes from the response to the original
`InviteAccountToOrganization` operation that generated the handshake.
You can access handshakes that are `ACCEPTED`, `DECLINED`, or `CANCELED` for
only 30 days after they change to that state. They're then deleted and no longer
accessible.
This operation can be called from any account in the organization.
"""
def describe_handshake(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeHandshake", input, options)
end
@doc """
Retrieves information about the organization that the user's account belongs to.
This operation can be called from any account in the organization.
Even if a policy type is shown as available in the organization, you can disable
it separately at the root level with `DisablePolicyType`. Use `ListRoots` to see
the status of policy types for a specified root.
"""
def describe_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOrganization", input, options)
end
@doc """
Retrieves information about an organizational unit (OU).
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def describe_organizational_unit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOrganizationalUnit", input, options)
end
@doc """
Retrieves information about a policy.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def describe_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePolicy", input, options)
end
@doc """
Detaches a policy from a target root, organizational unit (OU), or account.
If the policy being detached is a service control policy (SCP), the changes to
permissions for AWS Identity and Access Management (IAM) users and roles in
affected accounts are immediate.
Every root, OU, and account must have at least one SCP attached. If you want to
replace the default `FullAWSAccess` policy with an SCP that limits the
permissions that can be delegated, you must attach the replacement SCP before
you can remove the default SCP. This is the authorization strategy of an "[allow list](https://docs.aws.amazon.com/organizations/latest/userguide/SCP_strategies.html#orgs_policies_allowlist)".
If you instead attach a second SCP and leave the `FullAWSAccess` SCP still
attached, and specify `"Effect": "Deny"` in the second SCP to override the
`"Effect": "Allow"` in the `FullAWSAccess` policy (or any other attached SCP),
you're using the authorization strategy of a "[deny list](https://docs.aws.amazon.com/organizations/latest/userguide/SCP_strategies.html#orgs_policies_denylist)".
This operation can be called only from the organization's management account.
"""
def detach_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetachPolicy", input, options)
end
@doc """
Disables the integration of an AWS service (the service that is specified by
`ServicePrincipal`) with AWS Organizations.
When you disable integration, the specified service no longer can create a
[service-linked role](http://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html)
in *new* accounts in your organization. This means the service can't perform
operations on your behalf on any new accounts in your organization. The service
can still perform operations in older accounts until the service completes its
clean-up from AWS Organizations.
We ** *strongly recommend* ** that you don't use this command to disable
integration between AWS Organizations and the specified AWS service. Instead,
use the console or commands that are provided by the specified service. This
lets the trusted service perform any required initialization when enabling
trusted access, such as creating any required resources and any required clean
up of resources when disabling trusted access.
For information about how to disable trusted service access to your organization
using the trusted service, see the **Learn more** link under the **Supports
Trusted Access** column at [AWS services that you can use with AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services_list.html).
on this page.
If you disable access by using this command, it causes the following actions to
occur:
The service can no longer create a service-linked role in the
accounts in your organization. This means that the service can't perform
operations on your behalf on any new accounts in your organization. The service
can still perform operations in older accounts until the service completes its
clean-up from AWS Organizations.
The service can no longer perform tasks in the member accounts in
the organization, unless those operations are explicitly permitted by the IAM
policies that are attached to your roles. This includes any data aggregation
from the member accounts to the management account, or to a delegated
administrator account, where relevant.
Some services detect this and clean up any remaining data or
resources related to the integration, while other services stop accessing the
organization but leave any historical data and configuration in place to support
a possible re-enabling of the integration.
Using the other service's console or commands to disable the integration ensures
that the other service is aware that it can clean up any resources that are
required only for the integration. How the service cleans up its resources in
the organization's accounts depends on that service. For more information, see
the documentation for the other AWS service.
After you perform the `DisableAWSServiceAccess` operation, the specified service
can no longer perform operations in your organization's accounts
For more information about integrating other services with AWS Organizations,
including the list of services that work with Organizations, see [Integrating AWS Organizations with Other AWS
Services](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *AWS Organizations User Guide.*
This operation can be called only from the organization's management account.
"""
def disable_aws_service_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableAWSServiceAccess", input, options)
end
@doc """
Disables an organizational policy type in a root.
A policy of a certain type can be attached to entities in a root only if that
type is enabled in the root. After you perform this operation, you no longer can
attach policies of the specified type to that root or to any organizational unit
(OU) or account in that root. You can undo this by using the `EnablePolicyType`
operation.
This is an asynchronous request that AWS performs in the background. If you
disable a policy type for a root, it still appears enabled for the organization
if [all features](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html)
are enabled for the organization. AWS recommends that you first use `ListRoots`
to see the status of policy types for a specified root, and then use this
operation.
This operation can be called only from the organization's management account.
To view the status of available policy types in the organization, use
`DescribeOrganization`.
"""
def disable_policy_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisablePolicyType", input, options)
end
@doc """
Enables all features in an organization.
This enables the use of organization policies that can restrict the services and
actions that can be called in each account. Until you enable all features, you
have access only to consolidated billing, and you can't use any of the advanced
account administration features that AWS Organizations supports. For more
information, see [Enabling All Features in Your Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html)
in the *AWS Organizations User Guide.*
This operation is required only for organizations that were created explicitly
with only the consolidated billing features enabled. Calling this operation
sends a handshake to every invited account in the organization. The feature set
change can be finalized and the additional features enabled only after all
administrators in the invited accounts approve the change by accepting the
handshake.
After you enable all features, you can separately enable or disable individual
policy types in a root using `EnablePolicyType` and `DisablePolicyType`. To see
the status of policy types in a root, use `ListRoots`.
After all invited member accounts accept the handshake, you finalize the feature
set change by accepting the handshake that contains `"Action":
"ENABLE_ALL_FEATURES"`. This completes the change.
After you enable all features in your organization, the management account in
the organization can apply policies on all member accounts. These policies can
restrict what users and even administrators in those accounts can do. The
management account can apply policies that prevent accounts from leaving the
organization. Ensure that your account administrators are aware of this.
This operation can be called only from the organization's management account.
"""
def enable_all_features(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableAllFeatures", input, options)
end
@doc """
Enables the integration of an AWS service (the service that is specified by
`ServicePrincipal`) with AWS Organizations.
When you enable integration, you allow the specified service to create a
[service-linked role](http://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html)
in all the accounts in your organization. This allows the service to perform
operations on your behalf in your organization and its accounts.
We recommend that you enable integration between AWS Organizations and the
specified AWS service by using the console or commands that are provided by the
specified service. Doing so ensures that the service is aware that it can create
the resources that are required for the integration. How the service creates
those resources in the organization's accounts depends on that service. For more
information, see the documentation for the other AWS service.
For more information about enabling services to integrate with AWS
Organizations, see [Integrating AWS Organizations with Other AWS Services](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *AWS Organizations User Guide.*
This operation can be called only from the organization's management account and
only if the organization has [enabled all features](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html).
"""
def enable_aws_service_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableAWSServiceAccess", input, options)
end
@doc """
Enables a policy type in a root.
After you enable a policy type in a root, you can attach policies of that type
to the root, any organizational unit (OU), or account in that root. You can undo
this by using the `DisablePolicyType` operation.
This is an asynchronous request that AWS performs in the background. AWS
recommends that you first use `ListRoots` to see the status of policy types for
a specified root, and then use this operation.
This operation can be called only from the organization's management account.
You can enable a policy type in a root only if that policy type is available in
the organization. To view the status of available policy types in the
organization, use `DescribeOrganization`.
"""
def enable_policy_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnablePolicyType", input, options)
end
@doc """
Sends an invitation to another account to join your organization as a member
account.
AWS Organizations sends email on your behalf to the email address that is
associated with the other account's owner. The invitation is implemented as a
`Handshake` whose details are in the response.
You can invite AWS accounts only from the same seller as the
management account. For example, if your organization's management account was
created by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in India,
you can invite only other AISPL accounts to your organization. You can't combine
accounts from AISPL and AWS or from any other AWS seller. For more information,
see [Consolidated Billing in India](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/useconsolidatedbilliing-India.html).
If you receive an exception that indicates that you exceeded your
account limits for the organization or that the operation failed because your
organization is still initializing, wait one hour and then try again. If the
error persists after an hour, contact [AWS Support](https://console.aws.amazon.com/support/home#/).
If the request includes tags, then the requester must have the
`organizations:TagResource` permission.
This operation can be called only from the organization's management account.
"""
def invite_account_to_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "InviteAccountToOrganization", input, options)
end
@doc """
Removes a member account from its parent organization.
This version of the operation is performed by the account that wants to leave.
To remove a member account as a user in the management account, use
`RemoveAccountFromOrganization` instead.
This operation can be called only from a member account in the organization.
The management account in an organization with all features enabled
can set service control policies (SCPs) that can restrict what administrators of
member accounts can do. This includes preventing them from successfully calling
`LeaveOrganization` and leaving the organization.
You can leave an organization as a member account only if the
account is configured with the information required to operate as a standalone
account. When you create an account in an organization using the AWS
Organizations console, API, or CLI commands, the information required of
standalone accounts is *not* automatically collected. For each account that you
want to make standalone, you must perform the following steps. If any of the
steps are already completed for this account, that step doesn't appear.
Choose a support plan
Provide and verify the required contact information
Provide a current payment method
AWS uses the payment method to charge for any billable (not free tier) AWS
activity that occurs while the account isn't attached to an organization. Follow
the steps at [ To leave an organization when all required account information has not yet been
provided](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *AWS Organizations User Guide.*
The account that you want to leave must not be a delegated
administrator account for any AWS service enabled for your organization. If the
account is a delegated administrator, you must first change the delegated
administrator account to another account that is remaining in the organization.
You can leave an organization only after you enable IAM user access
to billing in your account. For more information, see [Activating Access to the Billing and Cost Management
Console](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate)
in the *AWS Billing and Cost Management User Guide.*
After the account leaves the organization, all tags that were
attached to the account object in the organization are deleted. AWS accounts
outside of an organization do not support tags.
"""
def leave_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "LeaveOrganization", input, options)
end
@doc """
Lists all the accounts in the organization.
To request only the accounts in a specified root or organizational unit (OU),
use the `ListAccountsForParent` operation instead.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_accounts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAccounts", input, options)
end
@doc """
Lists the accounts in an organization that are contained by the specified target
root or organizational unit (OU).
If you specify the root, you get a list of all the accounts that aren't in any
OU. If you specify an OU, you get a list of all the accounts in only that OU and
not in any child OUs. To get a list of all accounts in the organization, use the
`ListAccounts` operation.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_accounts_for_parent(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAccountsForParent", input, options)
end
@doc """
Returns a list of the AWS services that you enabled to integrate with your
organization.
After a service on this list creates the resources that it requires for the
integration, it can perform operations on your organization and its accounts.
For more information about integrating other services with AWS Organizations,
including the list of services that currently work with Organizations, see
[Integrating AWS Organizations with Other AWS Services](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *AWS Organizations User Guide.*
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_aws_service_access_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"ListAWSServiceAccessForOrganization",
input,
options
)
end
@doc """
Lists all of the organizational units (OUs) or accounts that are contained in
the specified parent OU or root.
This operation, along with `ListParents` enables you to traverse the tree
structure that makes up this root.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_children(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListChildren", input, options)
end
@doc """
Lists the account creation requests that match the specified status that is
currently being tracked for the organization.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_create_account_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListCreateAccountStatus", input, options)
end
@doc """
Lists the AWS accounts that are designated as delegated administrators in this
organization.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_delegated_administrators(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDelegatedAdministrators", input, options)
end
@doc """
List the AWS services for which the specified account is a delegated
administrator.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_delegated_services_for_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDelegatedServicesForAccount", input, options)
end
@doc """
Lists the current handshakes that are associated with the account of the
requesting user.
Handshakes that are `ACCEPTED`, `DECLINED`, or `CANCELED` appear in the results
of this API for only 30 days after changing to that state. After that, they're
deleted and no longer accessible.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called from any account in the organization.
"""
def list_handshakes_for_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHandshakesForAccount", input, options)
end
@doc """
Lists the handshakes that are associated with the organization that the
requesting user is part of.
The `ListHandshakesForOrganization` operation returns a list of handshake
structures. Each structure contains details and status about a handshake.
Handshakes that are `ACCEPTED`, `DECLINED`, or `CANCELED` appear in the results
of this API for only 30 days after changing to that state. After that, they're
deleted and no longer accessible.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_handshakes_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHandshakesForOrganization", input, options)
end
@doc """
Lists the organizational units (OUs) in a parent organizational unit or root.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_organizational_units_for_parent(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListOrganizationalUnitsForParent", input, options)
end
@doc """
Lists the root or organizational units (OUs) that serve as the immediate parent
of the specified child OU or account.
This operation, along with `ListChildren` enables you to traverse the tree
structure that makes up this root.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
In the current release, a child can have only a single parent.
"""
def list_parents(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListParents", input, options)
end
@doc """
Retrieves the list of all policies in an organization of a specified type.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_policies(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPolicies", input, options)
end
@doc """
Lists the policies that are directly attached to the specified target root,
organizational unit (OU), or account.
You must specify the policy type that you want included in the returned list.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_policies_for_target(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPoliciesForTarget", input, options)
end
@doc """
Lists the roots that are defined in the current organization.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
Policy types can be enabled and disabled in roots. This is distinct from whether
they're available in the organization. When you enable all features, you make
policy types available for use in that organization. Individual policy types can
then be enabled and disabled in a root. To see the availability of a policy type
in an organization, use `DescribeOrganization`.
"""
def list_roots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRoots", input, options)
end
@doc """
Lists tags that are attached to the specified resource.
You can attach tags to the following resources in AWS Organizations.
* AWS account
* Organization root
* Organizational unit (OU)
* Policy (any type)
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Lists all the roots, organizational units (OUs), and accounts that the specified
policy is attached to.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an AWS service.
"""
def list_targets_for_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTargetsForPolicy", input, options)
end
@doc """
Moves an account from its current source parent root or organizational unit (OU)
to the specified destination parent root or OU.
This operation can be called only from the organization's management account.
"""
def move_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MoveAccount", input, options)
end
@doc """
Enables the specified member account to administer the Organizations features of
the specified AWS service.
It grants read-only access to AWS Organizations service data. The account still
requires IAM permissions to access and administer the AWS service.
You can run this action only for AWS services that support this feature. For a
current list of services that support it, see the column *Supports Delegated
Administrator* in the table at [AWS Services that you can use with AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services_list.html)
in the *AWS Organizations User Guide.*
This operation can be called only from the organization's management account.
"""
def register_delegated_administrator(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterDelegatedAdministrator", input, options)
end
@doc """
Removes the specified account from the organization.
The removed account becomes a standalone account that isn't a member of any
organization. It's no longer subject to any policies and is responsible for its
own bill payments. The organization's management account is no longer charged
for any expenses accrued by the member account after it's removed from the
organization.
This operation can be called only from the organization's management account.
Member accounts can remove themselves with `LeaveOrganization` instead.
You can remove an account from your organization only if the
account is configured with the information required to operate as a standalone
account. When you create an account in an organization using the AWS
Organizations console, API, or CLI commands, the information required of
standalone accounts is *not* automatically collected. For an account that you
want to make standalone, you must choose a support plan, provide and verify the
required contact information, and provide a current payment method. AWS uses the
payment method to charge for any billable (not free tier) AWS activity that
occurs while the account isn't attached to an organization. To remove an account
that doesn't yet have this information, you must sign in as the member account
and follow the steps at [ To leave an organization when all required account information has not yet been
provided](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *AWS Organizations User Guide.*
The account that you want to leave must not be a delegated
administrator account for any AWS service enabled for your organization. If the
account is a delegated administrator, you must first change the delegated
administrator account to another account that is remaining in the organization.
After the account leaves the organization, all tags that were
attached to the account object in the organization are deleted. AWS accounts
outside of an organization do not support tags.
"""
def remove_account_from_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveAccountFromOrganization", input, options)
end
@doc """
Adds one or more tags to the specified resource.
Currently, you can attach tags to the following resources in AWS Organizations.
* AWS account
* Organization root
* Organizational unit (OU)
* Policy (any type)
This operation can be called only from the organization's management account.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes any tags with the specified keys from the specified resource.
You can attach tags to the following resources in AWS Organizations.
* AWS account
* Organization root
* Organizational unit (OU)
* Policy (any type)
This operation can be called only from the organization's management account.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Renames the specified organizational unit (OU).
The ID and ARN don't change. The child OUs and accounts remain in place, and any
attached policies of the OU remain attached.
This operation can be called only from the organization's management account.
"""
def update_organizational_unit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateOrganizationalUnit", input, options)
end
@doc """
Updates an existing policy with a new name, description, or content.
If you don't supply any parameter, that value remains unchanged. You can't
change a policy's type.
This operation can be called only from the organization's management account.
"""
def update_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePolicy", input, options)
end
end
| 48.753527 | 194 | 0.767345 |
7331bb49769a994fc81808ce874c3f93080979f0
| 262 |
ex
|
Elixir
|
apps/ae_socket_connector/lib/contract_helper.ex
|
davidyuk/ae-channel-service
|
bfde1d3e6ca4fc6bcc4a387381d4838cd3f1ec42
|
[
"0BSD"
] | 9 |
2019-04-24T17:21:18.000Z
|
2021-07-19T16:41:33.000Z
|
apps/ae_socket_connector/lib/contract_helper.ex
|
davidyuk/ae-channel-service
|
bfde1d3e6ca4fc6bcc4a387381d4838cd3f1ec42
|
[
"0BSD"
] | 68 |
2019-08-06T09:25:13.000Z
|
2021-03-10T11:04:41.000Z
|
apps/ae_socket_connector/lib/contract_helper.ex
|
isabella232/ae-channel-service
|
bfde1d3e6ca4fc6bcc4a387381d4838cd3f1ec42
|
[
"ISC"
] | 6 |
2019-10-23T18:23:48.000Z
|
2022-03-06T07:02:45.000Z
|
defmodule ContractHelper do
def to_sophia_bytes(binary) when is_binary(binary) do
"#" <> Base.encode16(binary)
end
def add_quotes(b) when is_binary(b), do: <<"\"", b::binary, "\"">>
def add_quotes(str) when is_list(str), do: "\"" ++ str ++ "\""
end
| 29.111111 | 68 | 0.633588 |
7331c9e73c1f78ea2739f380740af1f2bd29bdd2
| 62 |
exs
|
Elixir
|
test/snowplow_tracker/emitters/server_test.exs
|
WTTJ/snowplow-elixir-tracker
|
86b6301b4b98b04fca0d79ab2f4fca03a5e748c1
|
[
"Apache-2.0"
] | 6 |
2018-04-30T11:02:45.000Z
|
2021-06-18T19:09:39.000Z
|
test/snowplow_tracker/emitters/server_test.exs
|
WTTJ/snowplow-elixir-tracker
|
86b6301b4b98b04fca0d79ab2f4fca03a5e748c1
|
[
"Apache-2.0"
] | 15 |
2018-05-23T05:26:27.000Z
|
2020-07-12T10:12:32.000Z
|
test/snowplow_tracker/emitters/server_test.exs
|
WTTJ/snowplow-elixir-tracker
|
86b6301b4b98b04fca0d79ab2f4fca03a5e748c1
|
[
"Apache-2.0"
] | 3 |
2020-06-05T23:30:14.000Z
|
2021-06-22T08:14:35.000Z
|
defmodule Snowplow.Events.ServerTest do
use ExUnit.Case
end
| 15.5 | 39 | 0.822581 |
7331eeb9666886cd9278848331c2c4ea4b857930
| 4,542 |
ex
|
Elixir
|
lib/con_cache/lock.ex
|
pap/con_cache
|
2e13b6abd7de76dd378017ee3ef972c48916097c
|
[
"MIT"
] | null | null | null |
lib/con_cache/lock.ex
|
pap/con_cache
|
2e13b6abd7de76dd378017ee3ef972c48916097c
|
[
"MIT"
] | null | null | null |
lib/con_cache/lock.ex
|
pap/con_cache
|
2e13b6abd7de76dd378017ee3ef972c48916097c
|
[
"MIT"
] | null | null | null |
defmodule ConCache.Lock do
@moduledoc false
alias ConCache.Lock.Resource
alias ConCache.Lock.Monitors
@type key :: any
@type result :: any
@type job :: (() -> result)
defstruct resources: HashDict.new, monitors: Monitors.new
use ExActor.Tolerant
@spec start :: {:ok, pid}
@spec start_link :: {:ok, pid}
defstart start(initial_state \\ nil), gen_server_opts: :runtime
defstart start_link(initial_state \\ nil), gen_server_opts: :runtime do
initial_state(initial_state || %__MODULE__{})
end
defcast stop, do: stop_server(:normal)
@spec exec(pid | atom, key, timeout, job) :: result
@spec exec(pid | atom, key, job) :: result
def exec(server, id, timeout \\ 5000, fun) do
lock_instance = make_ref
try do
:acquired = lock(server, id, lock_instance, timeout)
fun.()
after
unlock(server, id, lock_instance, self)
end
end
@spec try_exec(pid | atom, key, job) :: result | {:lock, :not_acquired}
@spec try_exec(pid | atom, key, timeout, job) :: result | {:lock, :not_acquired}
def try_exec(server, id, timeout \\ 5000, fun) do
lock_instance = make_ref
try do
case try_lock(server, id, lock_instance, timeout) do
:acquired -> fun.()
:not_acquired -> {:lock, :not_acquired}
end
after
unlock(server, id, lock_instance, self)
end
end
defcallp try_lock(id, lock_instance), from: {caller_pid, _} = from, state: state, timeout: timeout do
resource = resource(state, id)
if Resource.can_lock?(resource, caller_pid) do
add_resource_owner(state, id, lock_instance, resource, from)
else
reply(:not_acquired)
end
end
defcallp lock(id, lock_instance), from: from, state: state, timeout: timeout do
add_resource_owner(state, id, lock_instance, resource(state, id), from)
end
defp add_resource_owner(state, id, lock_instance, resource, {caller_pid, _} = from) do
state
|> inc_monitor_ref(caller_pid, lock_instance)
|> handle_resource_change(id, Resource.inc_lock(resource, lock_instance, caller_pid, from))
|> new_state
end
defcastp unlock(id, lock_instance, caller_pid), state: state do
state
|> dec_monitor_ref(caller_pid, lock_instance)
|> handle_resource_change(id, Resource.dec_lock(resource(state, id), lock_instance, caller_pid))
|> new_state
end
defp handle_resource_change(state, id, resource_change_result) do
resource = maybe_notify_caller(resource_change_result)
store_resource(state, id, resource)
end
defp maybe_notify_caller({:not_acquired, resource}), do: resource
defp maybe_notify_caller({{:acquired, from}, resource}) do
if Process.alive?(Resource.owner(resource)) do
GenServer.reply(from, :acquired)
resource
else
remove_caller_from_resource(resource, Resource.owner(resource))
end
end
defp remove_caller_from_resource(resource, caller_pid) do
resource
|> Resource.remove_caller(caller_pid)
|> maybe_notify_caller
end
defp remove_caller_from_all_resources(%__MODULE__{resources: resources} = state, caller_pid) do
Enum.reduce(resources, state,
fn({id, resource}, state_acc) ->
store_resource(
state_acc,
id,
remove_caller_from_resource(resource, caller_pid)
)
end
)
end
defp resource(%__MODULE__{resources: resources}, id) do
case HashDict.fetch(resources, id) do
{:ok, resource} -> resource
:error -> Resource.new
end
end
defp store_resource(%__MODULE__{resources: resources} = state, id, resource) do
if Resource.empty?(resource) do
%__MODULE__{state | resources: HashDict.delete(resources, id)}
else
%__MODULE__{state | resources: HashDict.put(resources, id, resource)}
end
end
defp inc_monitor_ref(%__MODULE__{monitors: monitors} = state, caller_pid, lock_instance) do
%__MODULE__{state | monitors: Monitors.inc_ref(monitors, caller_pid, lock_instance)}
end
defp dec_monitor_ref(%__MODULE__{monitors: monitors} = state, caller_pid, lock_instance) do
%__MODULE__{state | monitors: Monitors.dec_ref(monitors, caller_pid, lock_instance)}
end
defp unmonitor(%__MODULE__{monitors: monitors} = state, caller_pid) do
%__MODULE__{state | monitors: Monitors.remove(monitors, caller_pid)}
end
defhandleinfo {:DOWN, _, _, caller_pid, _}, state: state do
state
|> unmonitor(caller_pid)
|> remove_caller_from_all_resources(caller_pid)
|> new_state
end
defhandleinfo _, do: noreply
end
| 30.483221 | 103 | 0.696389 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.