code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule SSTable.Zip do
defmodule Index do
defstruct [:key, :offset]
end
defmodule ChunkAccum do
defstruct [:payload, :current_chunk, :chunk_key, :chunk_offset, :index, :current_offset]
def empty do
%__MODULE__{
payload: <<>>,
current_chunk: <<>>,
chunk_key: nil,
chunk_offset: nil,
index: [],
current_offset: 0
}
end
end
# wild guess at how much uncompressed data we should read in before gzipping
@uncompressed_data_chunk SSTable.Settings.unzipped_data_chunk()
def zip(kvs) do
{payload, reversed_index} =
case kvs
|> Enum.reduce(
ChunkAccum.empty(),
fn {key, value},
%ChunkAccum{
payload: payload,
current_chunk: current_chunk,
chunk_key: chunk_key,
chunk_offset: chunk_offset,
index: index,
current_offset: current_offset
} ->
kv_bin = SSTable.KV.to_binary(key, value)
{next_chunk_key, next_chunk_offset} =
case {chunk_key, chunk_offset} do
{nil, nil} -> {key, current_offset}
nck -> nck
end
if byte_size(current_chunk) + byte_size(kv_bin) >= @uncompressed_data_chunk do
gzip_chunk = :zlib.gzip(current_chunk <> kv_bin)
chunk_size = <<byte_size(gzip_chunk)::32>>
%ChunkAccum{
payload: payload <> chunk_size <> gzip_chunk,
current_chunk: <<>>,
index: [{next_chunk_key, next_chunk_offset} | index],
chunk_key: nil,
chunk_offset: nil,
current_offset:
current_offset + SSTable.Settings.gzip_length_bytes() + byte_size(gzip_chunk)
}
else
%ChunkAccum{
payload: payload,
current_chunk: current_chunk <> kv_bin,
index: index,
chunk_key: next_chunk_key,
chunk_offset: next_chunk_offset,
current_offset: current_offset
}
end
end
) do
c when byte_size(c.payload) == 0 ->
gzip_one_chunk = :zlib.gzip(c.current_chunk)
chunk_size = <<byte_size(gzip_one_chunk)::32>>
{chunk_size <> gzip_one_chunk, [{c.chunk_key, c.chunk_offset} | c.index]}
%ChunkAccum{
payload: leftover,
current_chunk: current_chunk,
index: index,
chunk_key: chunk_key,
chunk_offset: chunk_offset
}
when byte_size(current_chunk) > 0 ->
gzip_chunk = :zlib.gzip(current_chunk)
chunk_size = <<byte_size(gzip_chunk)::32>>
{leftover <> chunk_size <> gzip_chunk, [{chunk_key, chunk_offset} | index]}
c ->
{c.payload, c.index}
end
{payload, Enum.reverse(reversed_index)}
end
end
|
lib/august_db/sstable/zip.ex
| 0.555918 | 0.486454 |
zip.ex
|
starcoder
|
defmodule Wobserver.Page do
@moduledoc """
Page management for custom commands and pages in api and wobserver.
"""
alias Wobserver.Page
@pages_table :wobserver_pages
@typedoc ~S"""
Accepted page formats.
"""
@type data ::
Page.t
| map
| {String.t, atom, fun}
| {String.t, atom, fun, boolean}
@typedoc ~S"""
Page structure.
Fields:
- `title`, the name of the page. Is used for the web interface menu.
- `command`, single atom to associate the page with.
- `callback`, function to be evaluated, when the a api is called or page is viewd.
The result is converted to JSON and displayed.
- `options`, map containing options for the page.
Options:
- `api_only` (`boolean`), if set to true the page won't show up in the web interface, but will only be available as API.
- `refresh` (`float`, 0-1), sets the refresh time factor. Used in the web interface to refresh the data on the page. Set to `0` for no refresh.
"""
@type t :: %__MODULE__{
title: String.t,
command: atom,
callback: fun,
options: keyword,
}
defstruct [
:title,
:command,
:callback,
options: %{
api_only: false,
refresh: 1,
},
]
@doc ~S"""
List all registered pages.
For every page the following information is given:
- `title`
- `command`
- `api_only`
- `refresh`
"""
@spec list :: list(map)
def list do
ensure_table()
@pages_table
|> :ets.match(:"$1")
|> Enum.map(fn [{command, %Page{title: title, options: options}}] ->
%{
title: title,
command: command,
api_only: options.api_only,
refresh: options.refresh,
}
end)
end
@doc ~S"""
Find the page for a given `command`
Returns `:page_not_found`, if no page can be found.
"""
@spec find(command :: atom) :: Page.t
def find(command) do
ensure_table()
case :ets.lookup(@pages_table, command) do
[{^command, page}] -> page
_ -> :page_not_found
end
end
@doc ~S"""
Calls the function associated with the `command`/page.
Returns the result of the function or `:page_not_found`, if the page can not be found.
"""
@spec call(Page.t | atom) :: any
def call(:page_not_found), do: :page_not_found
def call(%Page{callback: callback}), do: callback.()
def call(command) when is_atom(command), do: command |> find() |> call()
def call(_), do: :page_not_found
@doc ~S"""
Registers a `page` with `:wobserver`.
Returns true if succesfully added. (otherwise false)
The following inputs are accepted:
- `{title, command, callback}`
- `{title, command, callback, options}`
- a `map` with the following fields:
- `title`
- `command`
- `callback`
- `options` (optional)
The fields are used as followed:
- `title`, the name of the page. Is used for the web interface menu.
- `command`, single atom to associate the page with.
- `callback`, function to be evaluated, when the a api is called or page is viewd.
The result is converted to JSON and displayed.
- `options`, options for the page.
The following options can be set:
- `api_only` (`boolean`), if set to true the page won't show up in the web interface, but will only be available as API.
- `refresh` (`float`, 0-1), sets the refresh time factor. Used in the web interface to refresh the data on the page. Set to `0` for no refresh.
"""
@spec register(page :: Page.data) :: boolean
def register(page)
def register({title, command, callback}),
do: register(title, command, callback)
def register({title, command, callback, options}),
do: register(title, command, callback, options)
def register(page = %Page{}) do
ensure_table()
:ets.insert @pages_table, {page.command, page}
end
def register(%{title: t, command: command, callback: call, options: options}),
do: register(t, command, call, options)
def register(%{title: title, command: command, callback: callback}),
do: register(title, command, callback)
def register(_), do: false
@doc ~S"""
Registers a `page` with `:wobserver`.
The arguments are used as followed:
- `title`, the name of the page. Is used for the web interface menu.
- `command`, single atom to associate the page with.
- `callback`, function to be evaluated, when the a api is called or page is viewd.
The result is converted to JSON and displayed.
- `options`, options for the page.
The following options can be set:
- `api_only` (`boolean`), if set to true the page won't show up in the web interface, but will only be available as API.
- `refresh` (`float`, 0-1), sets the refresh time factor. Used in the web interface to refresh the data on the page. Set to `0` for no refresh.
"""
@spec register(
title :: String.t,
command :: atom,
callback :: fun,
options :: keyword
) :: boolean
def register(title, command, callback, options \\ []) do
register(%Page{
title: title,
command: command,
callback: callback,
options: %{
api_only: Keyword.get(options, :api_only, false),
refresh: Keyword.get(options, :refresh, 1.0)
},
})
end
@doc ~S"""
Loads custom pages from configuration and adds them to `:wobserver`.
To add custom pages set the `:pages` option.
The `:pages` option must be a list of page data.
The page data can be formatted as:
- `{title, command, callback}`
- `{title, command, callback, options}`
- a `map` with the following fields:
- `title`
- `command`
- `callback`
- `options` (optional)
For more information and types see: `Wobserver.Page.register/1`.
Example:
```elixir
config :wobserver,
pages: [
{"Example", :example, fn -> %{x: 9} end}
]
```
"""
@spec load_config :: [any]
def load_config do
ensure_table()
:wobserver
|> Application.get_env(:pages, [])
|> Enum.map(®ister/1)
end
# Helpers
defp ensure_table do
case :ets.info(@pages_table) do
:undefined ->
:ets.new @pages_table, [:named_table, :public]
true
_ ->
true
end
end
end
|
lib/wobserver/page.ex
| 0.857798 | 0.609408 |
page.ex
|
starcoder
|
defmodule NaturalTime do
import NimbleParsec
ws = string(" ") |> repeat() |> ignore()
int2 = integer(min: 1, max: 2)
preposition =
optional(
choice([
string("in the"),
string("on the"),
string("at the"),
string("in"),
string("on"),
string("at")
])
)
with_optional_prep = fn p ->
concat(ignore(replace(preposition |> concat(ws), "")), p)
end
ampm =
with_optional_prep.(
choice([
string("am"),
string("pm"),
replace(string("a.m."), "am"),
replace(string("p.m."), "pm"),
replace(string("midnight"), "am"),
replace(string("morning"), "am"),
replace(string("noon"), "pm"),
replace(string("afternoon"), "pm"),
replace(string("evening"), "pm"),
replace(string("night"), "pm")
])
)
rel_day =
choice([
string("today"),
string("tomorrow"),
replace(string("tmr"), "tomorrow")
])
weekday =
with_optional_prep.(
choice([
string("monday"),
string("tuesday"),
string("wednesday"),
string("thursday"),
string("friday"),
string("saturday"),
string("sunday"),
replace(string("mon"), "monday"),
replace(string("tue"), "tuesday"),
replace(string("wed"), "wednesday"),
replace(string("thu"), "thursday"),
replace(string("fri"), "friday"),
replace(string("sat"), "saturday"),
replace(string("sun"), "sunday")
])
)
rel_adv =
choice([
string("this"),
times(string("next") |> ignore(ws), min: 1, max: 3)
])
time =
with_optional_prep.(
choice([
int2
|> concat(ignore(string(":")))
|> concat(int2)
|> concat(ws)
|> concat(ampm)
|> tag(:hm_ap),
int2 |> concat(ws) |> concat(ampm) |> tag(:h_ap),
int2 |> concat(ignore(string(":"))) |> concat(int2) |> tag(:hm),
int2 |> tag(:h)
])
)
day =
with_optional_prep.(
choice([
rel_day |> tag(:rel_day),
rel_adv |> concat(ws) |> concat(weekday) |> tag(:rel_weekday),
weekday |> tag(:weekday)
])
)
defparsecp(
:datetime,
choice([
day |> concat(ws) |> concat(time) |> tag(:day_time),
time |> concat(ws) |> concat(day) |> tag(:time_day),
time |> tag(:time_only)
])
)
@doc """
Specify a string and a DateTime object indicating the reference time.
The timezone information in the reference time will be used for
inference. For example, if the reference time has timezone of
"UTC+1", then "2pm" will parse to 2pm in UTC+1 timezone.
Example usage:
iex> now = Timex.parse!("2019-06-02T01:04:21+08:00", "{ISO:Extended}")
iex> parse("10pm", now) == Timex.parse!("2019-06-02T22:00:00+08:00", "{ISO:Extended}")
true
"""
@spec parse(String.t(), DateTime.t()) :: nil | DateTime.t()
def parse(str, rel \\ Timex.now()) do
str = str |> String.downcase() |> String.trim()
case datetime(str) do
{:ok, result, "", _, _, _} ->
parse_datetime(rel, result)
_ ->
nil
end
end
defp parse_datetime(now, day_time: [day, time]) do
date = parse_day(now, [day])
time = parse_time(now, [time])
Timex.set(now, date: date, time: time)
end
defp parse_datetime(now, time_day: [time, day]) do
date = parse_day(now, [day])
time = parse_time(now, [time])
Timex.set(now, date: date, time: time)
end
defp parse_datetime(now, time_only: [time]) do
time = parse_time(now, [time])
Timex.set(now, time: time)
end
defp parse_day(now, rel_day: ["today"]) do
Timex.to_date(now)
end
defp parse_day(now, rel_day: ["tomorrow"]) do
now |> Timex.to_date() |> Timex.shift(days: 1)
end
defp parse_day(now, weekday: [weekday]) do
parse_day(now, rel_weekday: ["", weekday])
end
defp parse_day(now, rel_weekday: [adv, weekday]) do
curr_day = Timex.weekday(now)
target_day = Timex.day_to_num(weekday)
offset =
case adv do
"" -> rem(target_day - curr_day + 7, 7)
"this" -> target_day - curr_day
"next" -> target_day + 7 - curr_day
end
now
|> Timex.to_date()
|> Timex.shift(days: offset)
end
defp parse_day(now, rel_weekday: ["next", "next" | rest]) do
weekday = List.last(rest)
curr_day = Timex.weekday(now)
target_day = Timex.day_to_num(weekday)
next_count = Enum.count(rest) + 1
offset = target_day + next_count * 7 - curr_day
now
|> Timex.to_date()
|> Timex.shift(days: offset)
end
defp parse_time(now, h_ap: [h, ap]) do
parse_time(now, hm_ap: [h, 0, ap])
end
defp parse_time(now, hm_ap: [h, m, "am"]) do
now
|> Timex.set(hour: rem(h, 12), minute: m, second: 0)
|> to_time()
end
defp parse_time(now, hm_ap: [h, m, "pm"]) do
now
|> Timex.set(hour: rem(h, 12) + 12, minute: m, second: 0)
|> to_time()
end
defp parse_time(now, hm: [h, m]) do
now
|> Timex.set(hour: h, minute: m, second: 0)
|> to_time()
end
defp parse_time(now, h: [h]) do
now
|> Timex.set(hour: h, minute: 0, second: 0)
|> to_time()
end
defp to_time(datetime) do
{datetime.hour, datetime.minute, datetime.second}
end
end
|
lib/natural_time.ex
| 0.669853 | 0.505371 |
natural_time.ex
|
starcoder
|
defmodule Reaper.DataExtract.Processor do
@moduledoc """
This module processes a data source and sends its data to the output topic
"""
require Logger
alias Reaper.{
Decoder,
DataSlurper,
UrlBuilder,
Persistence
}
alias Reaper.DataExtract.{ValidationStage, SchemaStage, LoadStage}
use Retry
@min_demand 500
@max_demand 1_000
@doc """
Downloads, decodes, and sends data to a topic
"""
@spec process(SmartCity.Dataset.t()) :: Redix.Protocol.redis_value() | no_return()
def process(%SmartCity.Dataset{} = unprovisioned_dataset) do
Process.flag(:trap_exit, true)
dataset =
unprovisioned_dataset
|> Providers.Helpers.Provisioner.provision()
validate_destination(dataset)
validate_cache(dataset)
generated_time_stamp = DateTime.utc_now()
{:ok, producer_stage} = create_producer_stage(dataset)
{:ok, validation_stage} = ValidationStage.start_link(cache: dataset.id, dataset: dataset)
{:ok, schema_stage} = SchemaStage.start_link(cache: dataset.id, dataset: dataset, start_time: generated_time_stamp)
{:ok, load_stage} = LoadStage.start_link(cache: dataset.id, dataset: dataset, start_time: generated_time_stamp)
GenStage.sync_subscribe(load_stage, to: schema_stage, min_demand: @min_demand, max_demand: @max_demand)
GenStage.sync_subscribe(schema_stage, to: validation_stage, min_demand: @min_demand, max_demand: @max_demand)
GenStage.sync_subscribe(validation_stage, to: producer_stage, min_demand: @min_demand, max_demand: @max_demand)
wait_for_completion([producer_stage, validation_stage, schema_stage, load_stage])
Persistence.remove_last_processed_index(dataset.id)
rescue
error ->
Logger.error(Exception.format_stacktrace(__STACKTRACE__))
Logger.error("Unable to continue processing dataset #{inspect(unprovisioned_dataset)} - Error #{inspect(error)}")
reraise error, __STACKTRACE__
after
unprovisioned_dataset.id
|> DataSlurper.determine_filename()
|> File.rm()
end
defp create_producer_stage(dataset) do
dataset
|> UrlBuilder.build()
|> DataSlurper.slurp(dataset.id, dataset.technical.sourceHeaders, dataset.technical.protocol)
|> Decoder.decode(dataset)
|> Stream.with_index()
|> GenStage.from_enumerable()
end
defp validate_destination(dataset) do
topic = "#{topic_prefix()}-#{dataset.id}"
create_topic(topic)
start_topic_producer(topic)
end
defp validate_cache(%SmartCity.Dataset{id: id, technical: %{allow_duplicates: false}}) do
Horde.DynamicSupervisor.start_child(Reaper.Horde.Supervisor, {Reaper.Cache, name: id})
end
defp validate_cache(_dataset), do: nil
defp wait_for_completion([]), do: true
defp wait_for_completion(pids) do
receive do
{:EXIT, from, :normal} ->
wait_for_completion(pids -- [from])
{:EXIT, _from, reason} ->
raise "Stage failed reason: #{inspect(reason)}"
unknown ->
Logger.warn("Unknown message received: #{inspect(unknown)}")
wait_for_completion(pids)
end
end
defp create_topic(topic) do
retry with: exponential_backoff() |> randomize() |> cap(2_000) |> expiry(30_000), atoms: [false] do
Elsa.create_topic(endpoints(), topic)
Process.sleep(100)
Elsa.topic?(endpoints(), topic)
after
true -> true
else
_ -> raise "Topic does not exist, everything is terrible!"
end
end
defp start_topic_producer(topic) do
{:ok, _pid} =
Elsa.Supervisor.start_link(connection: :"#{topic}_producer", endpoints: endpoints(), producer: [topic: topic])
retry with: constant_backoff(100) |> Stream.take(25) do
:brod.get_producer(:"#{topic}_producer", topic, 0)
after
{:ok, _pid} -> true
else
_ -> raise "Cannot verify kafka producer for topic #{topic}"
end
end
defp endpoints(), do: Application.get_env(:reaper, :elsa_brokers)
defp topic_prefix(), do: Application.get_env(:reaper, :output_topic_prefix)
end
|
apps/reaper/lib/reaper/data_extract/processor.ex
| 0.707506 | 0.505676 |
processor.ex
|
starcoder
|
defmodule Ockam.SecureChannel.XX do
@moduledoc """
Defines the XX Key Agreement protocol.
"""
alias Ockam.Vault
defstruct [:role, :vault, :s, :e, :rs, :re, :ck, :k, :n, :h, :prologue]
@protocol_name "Noise_XX_25519_AESGCM_SHA256"
defmacro zero_padded_protocol_name do
quote bind_quoted: binding() do
padding_size = (32 - byte_size(@protocol_name)) * 8
<<@protocol_name, 0::size(padding_size)>>
end
end
def initialize(role, vault, s, e \\ nil) do
state = %__MODULE__{role: role, vault: vault, s: s, e: e, prologue: ""}
with {:ok, state} <- initialize_role(state),
{:ok, state} <- initialize_vault(state),
{:ok, state} <- initialize_s(state),
{:ok, state} <- initialize_e(state),
{:ok, state} <- initialize_h(state),
{:ok, state} <- initialize_ck(state) do
mix_hash(state, state.prologue)
end
end
# initialize role - initiator or responder
defp initialize_role(%{role: role} = state) when role in [:initiator, :responder] do
{:ok, state}
end
defp initialize_role(%{role: role}),
do: {:error, {:role_argument_has_an_unexpected_value, role}}
# initialize vault
defp initialize_vault(%{vault: nil}), do: {:error, :vault_argument_is_nil}
defp initialize_vault(%{vault: _vault} = state), do: {:ok, state}
# initialize identity keypair s
defp initialize_s(%{s: %{private: _private_key, public: _public_key}} = state),
do: {:ok, state}
defp initialize_s(%{s: s}),
do: {:error, {:s_argument_does_not_have_the_expected_structue, s}}
# initialize ephemeral keypair e
defp initialize_e(%{e: nil, vault: vault} = state) do
secret_attributes = %{type: :curve25519, persistence: :ephemeral, purpose: :key_agreement}
with {:ok, private_key} <- Vault.secret_generate(vault, secret_attributes),
{:ok, public_key} <- Vault.secret_publickey_get(vault, private_key) do
e = %{private: private_key, public: public_key}
{:ok, %{state | e: e}}
else
{:error, reason} -> {:error, {:could_not_initialize_e, reason}}
end
end
defp initialize_e(%{e: %{private: _private, public: _public}} = state), do: {:ok, state}
defp initialize_e(%{e: e}), do: {:error, {:e_argument_does_not_have_the_expected_structue, e}}
# initialize h
defp initialize_h(state) do
h = zero_padded_protocol_name()
{:ok, %{state | h: h}}
end
# initialize ck
defp initialize_ck(%{vault: vault} = state) do
ck_attributes = %{type: :buffer, persistence: :ephemeral, purpose: :key_agreement}
case Vault.secret_import(vault, ck_attributes, zero_padded_protocol_name()) do
{:ok, ck} -> {:ok, %{state | ck: ck}}
{:error, reason} -> {:error, {:could_not_initialize_ck, reason}}
end
end
def encode_message_1(%__MODULE__{e: e} = state, payload) do
with {:ok, state} <- mix_hash(state, e.public),
{:ok, state} <- mix_hash(state, payload) do
{:ok, e.public <> payload, state}
end
end
def encode_message_2(%__MODULE__{e: e, s: s, re: re} = state, payload) do
with {:ok, state} <- mix_hash(state, e.public),
{:ok, shared_secret} <- dh(state, e, re),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, encrypted_s_and_tag} <- encrypt_and_hash(state, s.public),
{:ok, shared_secret} <- dh(state, s, re),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, encrypted_payload_and_tag} <- encrypt_and_hash(state, payload) do
{:ok, e.public <> encrypted_s_and_tag <> encrypted_payload_and_tag, state}
end
end
def encode_message_3(%__MODULE__{s: s, re: re} = state, payload) do
with {:ok, state, encrypted_s_and_tag} <- encrypt_and_hash(state, s.public),
{:ok, shared_secret} <- dh(state, s, re),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, encrypted_payload_and_tag} <- encrypt_and_hash(state, payload) do
{:ok, encrypted_s_and_tag <> encrypted_payload_and_tag, state}
end
end
def decode_message_1(state, message) do
<<re::32-bytes, payload::binary>> = message
with {:ok, state} <- mix_hash(state, re),
{:ok, state} <- mix_hash(state, payload) do
{:ok, payload, %{state | re: re}}
end
end
def decode_message_2(%__MODULE__{e: e} = state, message) do
<<re::32-bytes, encrypted_rs_and_tag::48-bytes, encrypted_payload_and_tag::binary>> = message
with {:ok, state} <- mix_hash(state, re),
{:ok, shared_secret} <- dh(state, e, re),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, rs} <- decrypt_and_hash(state, encrypted_rs_and_tag),
{:ok, shared_secret} <- dh(state, e, rs),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, payload} <- decrypt_and_hash(state, encrypted_payload_and_tag) do
{:ok, payload, %{state | re: re, rs: rs}}
end
end
def decode_message_3(%__MODULE__{e: e} = state, message) do
<<encrypted_rs_and_tag::48-bytes, encrypted_payload_and_tag::binary>> = message
with {:ok, state, rs} <- decrypt_and_hash(state, encrypted_rs_and_tag),
{:ok, shared_secret} <- dh(state, e, rs),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, payload} <- decrypt_and_hash(state, encrypted_payload_and_tag) do
{:ok, payload, %{state | rs: rs}}
end
end
def mix_hash(%__MODULE__{vault: vault, h: h} = state, data) do
case Vault.sha256(vault, h <> data) do
{:ok, h} -> {:ok, %{state | h: h}}
error -> {:error, error}
end
end
def mix_key(%__MODULE__{vault: vault, ck: ck} = state, input_key_material) do
with {:ok, [ck, k]} <- Vault.hkdf_sha256(vault, ck, input_key_material, 2) do
# :ok <- Vault.set_secret_type(vault, k, :aes256)
{:ok, %{state | n: 0, k: k, ck: ck}}
end
end
def dh(%__MODULE__{vault: vault}, keypair, peer_public) do
Vault.ecdh(vault, keypair.private, peer_public)
end
def encrypt_and_hash(%__MODULE__{vault: vault, k: k, n: n, h: h} = state, plaintext) do
secret_attributes = %{type: :aes256, persistence: :ephemeral, purpose: :key_agreement}
with {:ok, k} <- Vault.secret_export(vault, k),
{:ok, k} <- Vault.secret_import(vault, secret_attributes, k),
{:ok, ciphertext_and_tag} <- Vault.aead_aes_gcm_encrypt(vault, k, n, h, plaintext),
:ok <- Vault.secret_destroy(vault, k),
{:ok, state} <- mix_hash(state, ciphertext_and_tag) do
{:ok, %{state | n: n + 1}, ciphertext_and_tag}
end
end
def decrypt_and_hash(%__MODULE__{vault: vault, k: k, n: n, h: h} = state, ciphertext_and_tag) do
secret_attributes = %{type: :aes256, persistence: :ephemeral, purpose: :key_agreement}
with {:ok, k} <- Vault.secret_export(vault, k),
{:ok, k} <- Vault.secret_import(vault, secret_attributes, k),
{:ok, plaintext} <- Vault.aead_aes_gcm_decrypt(vault, k, n, h, ciphertext_and_tag),
:ok <- Vault.secret_destroy(vault, k),
{:ok, state} <- mix_hash(state, ciphertext_and_tag) do
{:ok, %{state | n: n + 1}, plaintext}
end
end
def split(%__MODULE__{vault: vault, ck: ck}) do
Vault.hkdf_sha256(vault, ck, nil, 2)
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/secure_channel/xx.ex
| 0.777173 | 0.433622 |
xx.ex
|
starcoder
|
defmodule Nerves.Grove.OLED.Display do
@moduledoc """
Seeed Studio [Grove OLED Display 96×96](http://wiki.seeedstudio.com/wiki/Grove_-_OLED_Display_1.12%22)
## Datasheet
http://garden.seeedstudio.com/images/8/82/SSD1327_datasheet.pdf
# Example
alias Nerves.Grove.OLED
{:ok, pid} = OLED.Display.start_link(address)
OLED.Display.reset(pid)
OLED.Display.clear(pid)
OLED.Display.set_text_position(pid, 0, 0)
OLED.Display.put_string(pid, "Hello, world")
"""
alias ElixirALE.I2C
@default_address 0x3C
@command_mode 0x80
@data_mode 0x40
# 8x8 monochrome bitmap font for ASCII code points 32-128.
@default_font {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00},
{0x00, 0x14, 0x7F, 0x14, 0x7F, 0x14, 0x00, 0x00},
{0x00, 0x24, 0x2A, 0x7F, 0x2A, 0x12, 0x00, 0x00},
{0x00, 0x23, 0x13, 0x08, 0x64, 0x62, 0x00, 0x00},
{0x00, 0x36, 0x49, 0x55, 0x22, 0x50, 0x00, 0x00},
{0x00, 0x00, 0x05, 0x03, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x1C, 0x22, 0x41, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x41, 0x22, 0x1C, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x08, 0x2A, 0x1C, 0x2A, 0x08, 0x00, 0x00},
{0x00, 0x08, 0x08, 0x3E, 0x08, 0x08, 0x00, 0x00},
{0x00, 0xA0, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00},
{0x00, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x20, 0x10, 0x08, 0x04, 0x02, 0x00, 0x00},
{0x00, 0x3E, 0x51, 0x49, 0x45, 0x3E, 0x00, 0x00},
{0x00, 0x00, 0x42, 0x7F, 0x40, 0x00, 0x00, 0x00},
{0x00, 0x62, 0x51, 0x49, 0x49, 0x46, 0x00, 0x00},
{0x00, 0x22, 0x41, 0x49, 0x49, 0x36, 0x00, 0x00},
{0x00, 0x18, 0x14, 0x12, 0x7F, 0x10, 0x00, 0x00},
{0x00, 0x27, 0x45, 0x45, 0x45, 0x39, 0x00, 0x00},
{0x00, 0x3C, 0x4A, 0x49, 0x49, 0x30, 0x00, 0x00},
{0x00, 0x01, 0x71, 0x09, 0x05, 0x03, 0x00, 0x00},
{0x00, 0x36, 0x49, 0x49, 0x49, 0x36, 0x00, 0x00},
{0x00, 0x06, 0x49, 0x49, 0x29, 0x1E, 0x00, 0x00},
{0x00, 0x00, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xAC, 0x6C, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x08, 0x14, 0x22, 0x41, 0x00, 0x00, 0x00},
{0x00, 0x14, 0x14, 0x14, 0x14, 0x14, 0x00, 0x00},
{0x00, 0x41, 0x22, 0x14, 0x08, 0x00, 0x00, 0x00},
{0x00, 0x02, 0x01, 0x51, 0x09, 0x06, 0x00, 0x00},
{0x00, 0x32, 0x49, 0x79, 0x41, 0x3E, 0x00, 0x00},
{0x00, 0x7E, 0x09, 0x09, 0x09, 0x7E, 0x00, 0x00},
{0x00, 0x7F, 0x49, 0x49, 0x49, 0x36, 0x00, 0x00},
{0x00, 0x3E, 0x41, 0x41, 0x41, 0x22, 0x00, 0x00},
{0x00, 0x7F, 0x41, 0x41, 0x22, 0x1C, 0x00, 0x00},
{0x00, 0x7F, 0x49, 0x49, 0x49, 0x41, 0x00, 0x00},
{0x00, 0x7F, 0x09, 0x09, 0x09, 0x01, 0x00, 0x00},
{0x00, 0x3E, 0x41, 0x41, 0x51, 0x72, 0x00, 0x00},
{0x00, 0x7F, 0x08, 0x08, 0x08, 0x7F, 0x00, 0x00},
{0x00, 0x41, 0x7F, 0x41, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x20, 0x40, 0x41, 0x3F, 0x01, 0x00, 0x00},
{0x00, 0x7F, 0x08, 0x14, 0x22, 0x41, 0x00, 0x00},
{0x00, 0x7F, 0x40, 0x40, 0x40, 0x40, 0x00, 0x00},
{0x00, 0x7F, 0x02, 0x0C, 0x02, 0x7F, 0x00, 0x00},
{0x00, 0x7F, 0x04, 0x08, 0x10, 0x7F, 0x00, 0x00},
{0x00, 0x3E, 0x41, 0x41, 0x41, 0x3E, 0x00, 0x00},
{0x00, 0x7F, 0x09, 0x09, 0x09, 0x06, 0x00, 0x00},
{0x00, 0x3E, 0x41, 0x51, 0x21, 0x5E, 0x00, 0x00},
{0x00, 0x7F, 0x09, 0x19, 0x29, 0x46, 0x00, 0x00},
{0x00, 0x26, 0x49, 0x49, 0x49, 0x32, 0x00, 0x00},
{0x00, 0x01, 0x01, 0x7F, 0x01, 0x01, 0x00, 0x00},
{0x00, 0x3F, 0x40, 0x40, 0x40, 0x3F, 0x00, 0x00},
{0x00, 0x1F, 0x20, 0x40, 0x20, 0x1F, 0x00, 0x00},
{0x00, 0x3F, 0x40, 0x38, 0x40, 0x3F, 0x00, 0x00},
{0x00, 0x63, 0x14, 0x08, 0x14, 0x63, 0x00, 0x00},
{0x00, 0x03, 0x04, 0x78, 0x04, 0x03, 0x00, 0x00},
{0x00, 0x61, 0x51, 0x49, 0x45, 0x43, 0x00, 0x00},
{0x00, 0x7F, 0x41, 0x41, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x02, 0x04, 0x08, 0x10, 0x20, 0x00, 0x00},
{0x00, 0x41, 0x41, 0x7F, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x04, 0x02, 0x01, 0x02, 0x04, 0x00, 0x00},
{0x00, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00},
{0x00, 0x01, 0x02, 0x04, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x20, 0x54, 0x54, 0x54, 0x78, 0x00, 0x00},
{0x00, 0x7F, 0x48, 0x44, 0x44, 0x38, 0x00, 0x00},
{0x00, 0x38, 0x44, 0x44, 0x28, 0x00, 0x00, 0x00},
{0x00, 0x38, 0x44, 0x44, 0x48, 0x7F, 0x00, 0x00},
{0x00, 0x38, 0x54, 0x54, 0x54, 0x18, 0x00, 0x00},
{0x00, 0x08, 0x7E, 0x09, 0x02, 0x00, 0x00, 0x00},
{0x00, 0x18, 0xA4, 0xA4, 0xA4, 0x7C, 0x00, 0x00},
{0x00, 0x7F, 0x08, 0x04, 0x04, 0x78, 0x00, 0x00},
{0x00, 0x00, 0x7D, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x80, 0x84, 0x7D, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x7F, 0x10, 0x28, 0x44, 0x00, 0x00, 0x00},
{0x00, 0x41, 0x7F, 0x40, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x7C, 0x04, 0x18, 0x04, 0x78, 0x00, 0x00},
{0x00, 0x7C, 0x08, 0x04, 0x7C, 0x00, 0x00, 0x00},
{0x00, 0x38, 0x44, 0x44, 0x38, 0x00, 0x00, 0x00},
{0x00, 0xFC, 0x24, 0x24, 0x18, 0x00, 0x00, 0x00},
{0x00, 0x18, 0x24, 0x24, 0xFC, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x7C, 0x08, 0x04, 0x00, 0x00, 0x00},
{0x00, 0x48, 0x54, 0x54, 0x24, 0x00, 0x00, 0x00},
{0x00, 0x04, 0x7F, 0x44, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x3C, 0x40, 0x40, 0x7C, 0x00, 0x00, 0x00},
{0x00, 0x1C, 0x20, 0x40, 0x20, 0x1C, 0x00, 0x00},
{0x00, 0x3C, 0x40, 0x30, 0x40, 0x3C, 0x00, 0x00},
{0x00, 0x44, 0x28, 0x10, 0x28, 0x44, 0x00, 0x00},
{0x00, 0x1C, 0xA0, 0xA0, 0x7C, 0x00, 0x00, 0x00},
{0x00, 0x44, 0x64, 0x54, 0x4C, 0x44, 0x00, 0x00},
{0x00, 0x08, 0x36, 0x41, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x41, 0x36, 0x08, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x02, 0x01, 0x01, 0x02, 0x01, 0x00, 0x00},
{0x00, 0x02, 0x05, 0x05, 0x02, 0x00, 0x00, 0x00}}
use Bitwise
@spec start_link(byte) :: {:ok, pid} | {:error, any}
def start_link(address \\ @default_address) do
I2C.start_link("i2c-2", address)
end
@spec reset(pid) :: :ok
def reset(pid) do
send_commands(pid, <<0xFD, 0x12>>)
off(pid)
# 96
set_multiplex_ratio(pid, 95)
set_start_line(pid, 0)
set_display_offset(pid, 96)
set_vertical_mode(pid)
send_commands(pid, <<0xAB, 0x01>>)
# 100 nit
set_contrast_level(pid, 0x53)
send_commands(pid, <<0xB1, 0x51>>)
send_commands(pid, <<0xB3, 0x01>>)
send_commands(pid, <<0xB9>>)
send_commands(pid, <<0xBC, 0x08>>)
send_commands(pid, <<0xBE, 0x07>>)
send_commands(pid, <<0xB6, 0x01>>)
send_commands(pid, <<0xD5, 0x62>>)
set_normal_mode(pid)
set_activate_scroll(pid, false)
on(pid)
# ms
Process.sleep(100)
set_row_address(pid, 0, 95)
set_column_address(pid, 8, 8 + 47)
:ok
end
@spec on(pid) :: :ok
def on(pid) do
send_command(pid, 0xAF)
end
@spec off(pid) :: :ok
def off(pid) do
send_command(pid, 0xAE)
end
@spec clear(pid) :: :ok
def clear(pid) do
# TODO: optimize more once https://github.com/fhunleth/elixir_ale/issues/20 is fixed.
block = :erlang.list_to_binary([@data_mode, String.duplicate("\x00", 16)])
Enum.each(1..48, fn _ ->
Enum.each(1..div(96, 16), fn _ -> I2C.write(pid, block) end)
end)
end
@spec set_text_position(pid, byte, byte) :: any
def set_text_position(pid, row, column) do
set_column_address(pid, 0x08 + column * 4, 0x08 + 47)
set_row_address(pid, 0x00 + row * 8, 0x07 + row * 8)
end
# @spec put_string(pid, <<>>) :: :ok
def put_string(_pid, <<>>), do: nil
@spec put_string(pid, binary) :: any
def put_string(pid, <<head, rest::binary>>) do
put_char(pid, head)
put_string(pid, rest)
end
# @spec put_char(pid, byte) :: any
def put_char(pid, char) when is_integer(char) and char in 32..127 do
c = char - 32
Enum.each([0, 2, 4, 6], fn i ->
Enum.each(0..7, fn j ->
glyph = elem(@default_font, c)
bit1 = band(bsr(elem(glyph, i), j), 0x01)
bit2 = band(bsr(elem(glyph, i + 1), j), 0x01)
send_data(
pid,
bor(
if bit1 != 0 do
0xF0
else
0
end,
if bit2 != 0 do
0x0F
else
0
end
)
)
end)
end)
end
@spec put_char(pid, byte) :: any
def put_char(pid, char) when is_integer(char) and char in 0..31 do
# replace with a space
put_char(pid, ?\s)
end
@spec set_column_address(pid, byte, byte) :: :ok
def set_column_address(pid, start, end_) do
send_commands(pid, <<0x15, start, end_>>)
:ok
end
@spec set_row_address(pid, byte, byte) :: :ok
def set_row_address(pid, start, end_) do
send_commands(pid, <<0x75, start, end_>>)
:ok
end
@spec set_contrast_level(pid, byte) :: :ok
def set_contrast_level(pid, level) do
send_commands(pid, <<0x81, level>>)
:ok
end
@spec set_horizontal_mode(pid) :: :ok
def set_horizontal_mode(pid) do
send_commands(pid, <<0xA0, 0x42>>)
set_row_address(pid, 0, 95)
set_column_address(pid, 8, 8 + 47)
end
@spec set_vertical_mode(pid) :: :ok
def set_vertical_mode(pid) do
send_commands(pid, <<0xA0, 0x46>>)
:ok
end
@spec set_start_line(pid, 0..127) :: :ok
def set_start_line(pid, row) do
send_commands(pid, <<0xA1, row>>)
:ok
end
@spec set_display_offset(pid, 0..127) :: :ok
def set_display_offset(pid, row) do
send_commands(pid, <<0xA2, row>>)
:ok
end
@spec set_normal_mode(pid) :: :ok
def set_normal_mode(pid) do
send_command(pid, 0xA4)
end
@spec set_inverse_mode(pid) :: :ok
def set_inverse_mode(pid) do
send_command(pid, 0xA7)
end
@spec set_multiplex_ratio(pid, 16..128) :: :ok
def set_multiplex_ratio(pid, ratio) do
send_commands(pid, <<0xA8, ratio>>)
:ok
end
@spec set_activate_scroll(pid, false) :: :ok
def set_activate_scroll(pid, false) do
send_command(pid, 0x2E)
end
@spec set_activate_scroll(pid, true) :: :ok
def set_activate_scroll(pid, true) do
send_command(pid, 0x2F)
end
# @spec send_commands(pid, <<>>) :: :ok
defp send_commands(_pid, <<>>), do: :ok
@spec send_commands(pid, binary) :: :ok
defp send_commands(pid, <<head, rest::binary>>) do
send_command(pid, head)
send_commands(pid, rest)
end
@spec send_command(pid, byte) :: :ok
defp send_command(pid, command) do
I2C.write(pid, <<@command_mode, command>>)
end
@spec send_data(pid, byte) :: :ok
defp send_data(pid, data) do
I2C.write(pid, <<@data_mode, data>>)
end
end
|
lib/nerves_grove/oled_display.ex
| 0.583441 | 0.653908 |
oled_display.ex
|
starcoder
|
defmodule Honeybadger.Utils do
@moduledoc """
Assorted helper functions used through out the Honeybadger package.
"""
@doc """
Internally all modules are prefixed with Elixir. This function removes the
`Elixir` prefix from the module when it is converted to a string.
# Example
iex> Honeybadger.Utils.module_to_string(Honeybadger.Utils)
"Honeybadger.Utils"
"""
def module_to_string(module) do
module
|> Module.split()
|> Enum.join(".")
end
@doc """
Transform value into a consistently cased string representation
# Example
iex> Honeybadger.Utils.canonicalize(:User_SSN)
"user_ssn"
"""
def canonicalize(val) do
val
|> to_string()
|> String.downcase()
end
@doc """
Configurable data sanitization. This currently:
- recursively truncates deep structures (to a depth of 20)
- constrains large string values (to 64k)
- filters out any map keys that might contain sensitive information.
"""
@depth_token "[DEPTH]"
@truncated_token "[TRUNCATED]"
@filtered_token "[FILTERED]"
# 64k with enough space to concat truncated_token
@default_max_string_size 64 * 1024 - 11
@default_max_depth 20
def sanitize(value, opts \\ []) do
base = %{
max_depth: @default_max_depth,
max_string_size: @default_max_string_size,
filter_keys: Honeybadger.get_env(:filter_keys)
}
opts =
Enum.into(opts, base)
|> Map.update!(:filter_keys, fn v -> MapSet.new(v, &canonicalize/1) end)
sanitize_val(value, Map.put(opts, :depth, 0))
end
defp sanitize_val(v, %{depth: depth, max_depth: depth}) when is_map(v) or is_list(v) do
@depth_token
end
defp sanitize_val(%{__struct__: _} = struct, opts) do
sanitize_val(Map.from_struct(struct), opts)
end
defp sanitize_val(v, %{depth: depth, filter_keys: filter_keys} = opts) when is_map(v) do
for {key, val} <- v, into: %{} do
if MapSet.member?(filter_keys, canonicalize(key)) do
{key, @filtered_token}
else
{key, sanitize_val(val, Map.put(opts, :depth, depth + 1))}
end
end
end
defp sanitize_val(v, %{depth: depth} = opts) when is_list(v) do
Enum.map(v, &sanitize_val(&1, Map.put(opts, :depth, depth + 1)))
end
defp sanitize_val(v, %{max_string_size: max_string_size}) when is_binary(v) do
if String.valid?(v) and String.length(v) > max_string_size do
String.slice(v, 0, max_string_size) <> @truncated_token
else
v
end
end
defp sanitize_val(v, _), do: v
end
|
lib/honeybadger/utils.ex
| 0.877254 | 0.484624 |
utils.ex
|
starcoder
|
defmodule Gobstopper.Service.Auth.Identity.Credential do
@moduledoc """
Manages the interactions with credentials.
Credential implementations will implement the given callbacks to handle the
specific credential type.
##Implementing a credential
Credentials should be implemented in a module conforming to
`#{String.slice(to_string(__MODULE__), 7..-1)}.type`. Where type is the capitalized
credential type.
e.g. For a credential that should be identified using the :email atom, then the
implementation for that credential should fall under `#{String.slice(to_string(__MODULE__), 7..-1)}.Email`.
"""
alias Gobstopper.Service.Auth.Identity
@doc """
Implement the behaviour for creating a new credential and associating it with
the given identity.
An identity should only have one credential of type. If the identity is attempting
to create a new credential for the same type, an error should be returned.
If the operation was successful return `:ok`.
"""
@callback create(identity :: Identity.Model.t, credential :: term) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for changing a credential that is associated with the
given identity.
If the operation was successful return `:ok`. Otherwise return the error.
"""
@callback change(identity :: Identity.Model.t, credential :: term) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for revoking the credential associated with the given
identity.
If the identity has no credential, then it should return an error.
If the operation was successful return `:ok`.
"""
@callback revoke(identity :: Identity.Model.t) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for identifying if a credential exists for the given
identity.
If one exists return true, otherwise return false.
"""
@callback credential?(identity :: Identity.Model.t) :: boolean
@doc """
Implement the behaviour for retrieving the presentable information for the
credential of a given identity.
If one exists return the state of the credential (`:unverified` or `:verified`)
and the presentable string. Otherwise return `{ :none, nil }`.
Verification state is used to infer whether the given credential is guaranteed
to be owned by the identity owner.
"""
@callback info(identity :: Identity.Model.t) :: { state :: :unverified | :verified, presentable :: String.t } | { :none, nil }
@doc """
Implement the behaviour for authenticating an identity using the given credential.
If the operation was successful return `{ :ok, identity }`, where `identity` is
the identity of the authenticated credential. Otherwise return an error.
"""
@callback authenticate(credential :: term) :: { :ok, identity :: Identity.Model.t } | { :error, reason :: String.t }
@doc """
Create the credential type for the given identity.
If the credential is valid, and the identity doesn't already have a credential
of that type associated with it, then it will succeed. Otherwise returns the
reason of failure.
"""
@spec create(atom, Identity.Model.t, term) :: :ok | { :error, String.t }
def create(type, identity, credential) do
atom_to_module(type).create(identity, credential)
end
@doc """
Change the credential of type belonging to the identity.
Returns `:ok` if the operation was successful, otherwise returns an error.
"""
@spec change(atom, Identity.Model.t, term) :: :ok | { :error, String.t }
def change(type, identity, credential) do
atom_to_module(type).change(identity, credential)
end
@doc """
Revoke the credential of type belonging to the identity.
Returns `:ok` if the operation was successful, otherwise returns an error if
there was no such credential or the operation could not be completed.
"""
@spec revoke(atom, Identity.Model.t) :: :ok | { :error, String.t }
def revoke(type, identity) do
atom_to_module(type).revoke(identity)
end
@doc """
Check if a credential of type exists for the given identity.
Returns true if one exists, otherwise false.
"""
@spec credential?(atom, Identity.Model.t) :: boolean
def credential?(type, identity) do
atom_to_module(type).credential?(identity)
end
@doc """
Retrieve the info for the type of credential of an identity.
If a credential exists the return value will consist of the state of the
credential (`:unverified` or `:verified`) and the presentable string. Otherwise
it will return `{ :none, nil }`.
"""
@spec info(atom, Identity.Model.t) :: { :unverified | :verified, String.t } | { :none, nil }
def info(type, identity) do
atom_to_module(type).info(identity)
end
@doc """
Authenticate the type of credential.
If credential can be successfully authenticated, then it returns the identity.
Otherwise returns the reason of failure.
"""
@spec authenticate(atom, term) :: { :ok, Identity.Model.t } | { :error, String.t }
def authenticate(type, credential) do
atom_to_module(type).authenticate(credential)
end
@spec atom_to_module(atom) :: atom
defp atom_to_module(name) do
String.to_atom(to_string(__MODULE__) <> "." <> format_as_module(to_string(name)))
end
@spec format_as_module(String.t) :: String.t
defp format_as_module(name) do
name
|> String.split(".")
|> Enum.map(fn module ->
String.split(module, "_") |> Enum.map(&String.capitalize(&1)) |> Enum.join
end)
|> Enum.join(".")
end
end
|
apps/gobstopper_service/lib/gobstopper.service/auth/identity/credential.ex
| 0.912109 | 0.638399 |
credential.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.DoorLockOperationReport do
@moduledoc """
OperationReport is used to advertise the status of a door lock
This is response to the `Grizzly.ZWave.Commands.OperationGet`
command.
Params:
* `:mode` - the door operating lock mode (required)
* `:outside_handles_mode` - a map of the outside door handles and if they
can or cannot open the door locally (optional)
* `:inside_handles_mode` - a map of the inside door handles and if they can
or cannot open the door locally (optional)
* `:latch_position` - the position of the latch (optional)
* `:bolt_position` - the position of the bolt (optional)
* `:door_state` - the state of the door being open or closed (optional)
* `:timeout_minutes` - how long the door has been unlocked (required)
* `:timeout_seconds` - how long the door has been unlocked (required)
* `:target_mode` - the target mode of an ongoing transition or of the most recent transition (optional - v4)
* `duration` - the estimated remaining time before the target mode is realized (optional - v4)
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave.{Command, DecodeError}
alias Grizzly.ZWave.CommandClasses.DoorLock
@typedoc """
These modes tell if the handle can open the door locally or not.
The door lock does not have to report all or any of these, so the default is
to set them all to disabled if they are not specified when building the
command.
"""
@type handles_mode :: %{non_neg_integer() => :enabled | :disabled}
@typedoc """
This param only matters if the door lock says it supports this door
component in the CapabilitiesReport.
If it isn't support the node receiving this report can ignore.
For defaults, if this param isn't provided during when calling `new/1`, we
0 this field out by setting it to :disabled
"""
@type latch_position :: :open | :closed
@typedoc """
This param only matters if the door lock says it supports this door
component in the CapabilitiesReport.
If it isn't support the node receiving this report can ignore.
For defaults, if this param isn't provided during when calling `new/1`, we
0 this field out by setting it to :disabled
"""
@type bolt_position :: :locked | :unlocked
@typedoc """
This param only matters if the door lock says it supports this door
component in the CapabilitiesReport.
If it isn't support the node receiving this report can ignore.
For defaults, if this param isn't provided during when calling `new/1`, we
0 this field out by setting it to :disabled
"""
@type door_state :: :open | :closed
@type timeout_minutes :: 0x00..0xFD | :undefined
@type timeout_seconds :: 0x00..0x3B | :undefined
@type param ::
{:mode, DoorLock.mode()}
| {:outside_handles_mode, handles_mode()}
| {:inside_handles_mode, handles_mode()}
| {:latch_position, latch_position()}
| {:bolt_position, bolt_position()}
| {:door_state, door_state()}
| {:timeout_minutes, timeout_minutes()}
| {:timeout_seconds, timeout_seconds()}
| {:target_mode, DoorLock.mode()}
| {:duration, :unknown | non_neg_integer()}
@impl true
@spec new([param()]) :: {:ok, Command.t()}
def new(params) do
command = %Command{
name: :door_lock_operation_report,
command_byte: 0x03,
command_class: DoorLock,
params: params_with_defaults(params),
impl: __MODULE__
}
{:ok, command}
end
@impl true
@spec encode_params(Command.t()) :: binary()
def encode_params(command) do
mode = Command.param!(command, :mode)
outside_door_handles = Command.param!(command, :outside_handles_mode)
inside_door_handles = Command.param!(command, :inside_handles_mode)
latch_position = Command.param!(command, :latch_position)
bolt_position = Command.param!(command, :bolt_position)
door_state = Command.param!(command, :door_state)
timeout_minutes = Command.param!(command, :timeout_minutes)
timeout_seconds = Command.param!(command, :timeout_seconds)
target_mode = Command.param(command, :target_mode)
outside_handles_byte = door_handles_modes_to_byte(outside_door_handles)
inside_handles_byte = door_handles_modes_to_byte(inside_door_handles)
door_condition_byte = door_condition_to_byte(latch_position, bolt_position, door_state)
timeout_minutes_byte = timeout_minutes_to_byte(timeout_minutes)
timeout_seconds_byte = timeout_seconds_to_byte(timeout_seconds)
<<handles_byte>> = <<outside_handles_byte::size(4), inside_handles_byte::size(4)>>
if target_mode == nil do
<<DoorLock.mode_to_byte(mode), handles_byte, door_condition_byte, timeout_minutes_byte,
timeout_seconds_byte>>
else
# version 4
duration = Command.param!(command, :duration)
target_mode_byte = DoorLock.mode_to_byte(target_mode)
duration_byte = duration_to_byte(duration)
<<DoorLock.mode_to_byte(mode), handles_byte, door_condition_byte, timeout_minutes_byte,
timeout_seconds_byte, target_mode_byte, duration_byte>>
end
end
@impl true
@spec decode_params(binary()) :: {:ok, [param()]} | {:error, DecodeError.t()}
def decode_params(
<<mode_byte, outside_handles_int::size(4), inside_handles_int::size(4),
door_condition_byte, timeout_minutes, timeout_seconds>>
) do
outside_handles = door_handles_modes_from_byte(outside_handles_int)
inside_handles = door_handles_modes_from_byte(inside_handles_int)
latch_position = latch_position_from_byte(door_condition_byte)
bolt_position = bolt_position_from_byte(door_condition_byte)
door_state = door_state_from_byte(door_condition_byte)
with {:ok, mode} <- DoorLock.mode_from_byte(mode_byte),
{:ok, timeout_minutes} <- timeout_minutes_from_byte(timeout_minutes),
{:ok, timeout_seconds} <- timeout_seconds_from_byte(timeout_seconds) do
{:ok,
[
mode: mode,
outside_handles_mode: outside_handles,
inside_handles_mode: inside_handles,
latch_position: latch_position,
bolt_position: bolt_position,
door_state: door_state,
timeout_minutes: timeout_minutes,
timeout_seconds: timeout_seconds
]}
else
{:error, %DecodeError{} = decode_error} ->
%DecodeError{decode_error | command: :door_lock_operation_report}
end
end
# Version 4
def decode_params(
<<mode_byte, outside_handles_int::size(4), inside_handles_int::size(4),
door_condition_byte, timeout_minutes, timeout_seconds, target_mode_byte, duration_byte>>
) do
outside_handles = door_handles_modes_from_byte(outside_handles_int)
inside_handles = door_handles_modes_from_byte(inside_handles_int)
latch_position = latch_position_from_byte(door_condition_byte)
bolt_position = bolt_position_from_byte(door_condition_byte)
door_state = door_state_from_byte(door_condition_byte)
with {:ok, mode} <- DoorLock.mode_from_byte(mode_byte),
{:ok, timeout_minutes} <- timeout_minutes_from_byte(timeout_minutes),
{:ok, timeout_seconds} <- timeout_seconds_from_byte(timeout_seconds),
{:ok, target_mode} <- DoorLock.mode_from_byte(target_mode_byte),
{:ok, duration} <- duration_from_byte(duration_byte) do
{:ok,
[
mode: mode,
outside_handles_mode: outside_handles,
inside_handles_mode: inside_handles,
latch_position: latch_position,
bolt_position: bolt_position,
door_state: door_state,
timeout_minutes: timeout_minutes,
timeout_seconds: timeout_seconds,
target_mode: target_mode,
duration: duration
]}
end
end
def door_handles_modes_to_byte(handles_mode) do
handle_1_bit = door_handle_value_to_bit(Map.get(handles_mode, 1, :disabled))
handle_2_bit = door_handle_value_to_bit(Map.get(handles_mode, 2, :disabled))
handle_3_bit = door_handle_value_to_bit(Map.get(handles_mode, 3, :disabled))
handle_4_bit = door_handle_value_to_bit(Map.get(handles_mode, 4, :disabled))
<<byte>> =
<<0::size(4), handle_4_bit::size(1), handle_3_bit::size(1), handle_2_bit::size(1),
handle_1_bit::size(1)>>
byte
end
def door_condition_to_byte(latch_position, bolt_position, door_state) do
latch_bit = latch_bit_from_position(latch_position)
bolt_bit = bolt_bit_from_position(bolt_position)
door_bit = door_bit_from_state(door_state)
<<byte>> = <<0::size(5), latch_bit::size(1), bolt_bit::size(1), door_bit::size(1)>>
byte
end
defp params_with_defaults(params) do
handles_modes_default = %{1 => :disabled, 2 => :disabled, 3 => :disabled, 4 => :disabled}
defaults = [
inside_handles_mode: handles_modes_default,
outside_handles_mode: handles_modes_default,
latch_position: :open,
bolt_position: :locked,
door_state: :open,
timeout_minutes: 0,
timeout_seconds: 0
]
Keyword.merge(defaults, params)
end
defp latch_bit_from_position(:open), do: 0
defp latch_bit_from_position(:closed), do: 1
defp bolt_bit_from_position(:locked), do: 0
defp bolt_bit_from_position(:unlocked), do: 1
defp door_bit_from_state(:open), do: 0
defp door_bit_from_state(:closed), do: 1
defp timeout_seconds_to_byte(s) when s >= 0 and s <= 0x3B, do: s
defp timeout_seconds_to_byte(:undefined), do: 0xFE
defp timeout_minutes_to_byte(m) when m >= 0 and m <= 0xFC, do: m
defp timeout_minutes_to_byte(:undefined), do: 0xFE
defp duration_to_byte(secs) when secs in 0..127, do: secs
defp duration_to_byte(secs) when secs in 128..(126 * 60), do: round(secs / 60) + 0x7F
defp duration_to_byte(:unknown), do: 0xFE
defp door_handles_modes_from_byte(byte) do
<<_::size(4), handle_4::size(1), handle_3::size(1), handle_2::size(1), handle_1::size(1)>> =
<<byte>>
%{
1 => door_handle_enable_value_from_bit(handle_1),
2 => door_handle_enable_value_from_bit(handle_2),
3 => door_handle_enable_value_from_bit(handle_3),
4 => door_handle_enable_value_from_bit(handle_4)
}
end
defp door_handle_enable_value_from_bit(1), do: :enabled
defp door_handle_enable_value_from_bit(0), do: :disabled
defp door_handle_value_to_bit(:enabled), do: 1
defp door_handle_value_to_bit(:disabled), do: 0
defp latch_position_from_byte(byte) do
<<_::size(5), latch_bit::size(1), _::size(2)>> = <<byte>>
if latch_bit == 1 do
:closed
else
:open
end
end
defp bolt_position_from_byte(byte) do
<<_::size(5), _::size(1), bolt_bit::size(1), _::size(1)>> = <<byte>>
if bolt_bit == 1 do
:unlocked
else
:locked
end
end
defp door_state_from_byte(byte) do
<<_::size(5), _::size(2), door_state_bit::size(1)>> = <<byte>>
if door_state_bit == 1 do
:closed
else
:open
end
end
defp timeout_minutes_from_byte(m) when m >= 0 and m <= 0xFD, do: {:ok, m}
defp timeout_minutes_from_byte(0xFE), do: {:ok, :undefined}
defp timeout_minutes_from_byte(byte),
do: {:error, %DecodeError{value: byte, param: :timeout_minute, command: :operation_report}}
defp timeout_seconds_from_byte(s) when s >= 0 and s <= 0x3B, do: {:ok, s}
defp timeout_seconds_from_byte(0xFE), do: {:ok, :undefined}
defp timeout_seconds_from_byte(byte),
do: {:error, %DecodeError{value: byte, param: :timeout_second, command: :operation_report}}
defp duration_from_byte(byte) when byte in 0x00..0x7F, do: {:ok, byte}
defp duration_from_byte(byte) when byte in 0x80..0xFD, do: {:ok, (byte - 0x7F) * 60}
defp duration_from_byte(0xFE), do: :unknown
defp duration_from_byte(byte),
do: {:error, %DecodeError{value: byte, param: :duration, command: :supervision_report}}
end
|
lib/grizzly/zwave/commands/door_lock_operation_report.ex
| 0.881755 | 0.431704 |
door_lock_operation_report.ex
|
starcoder
|
defmodule Multiverses.Registry do
@moduledoc """
This module is intended to be a drop-in replacement for `Registry`, but
not all functionality is implemented.
If universes are active, keys in the Registry will be `{universe, key}`
instead of the normal `key`. A convenience `via/2` macro has been
provided, which will perform this substitution correctly.
Unimplemented functionality:
- `count_match/3,4`
- `match/3,4`
- `unregister_match/3,4`
"""
use Multiverses.Clone,
module: Registry,
except: [
count: 1,
dispatch: 3,
dispatch: 4,
keys: 2,
lookup: 2,
register: 3,
unregister: 2,
update_value: 3,
select: 2,
start_link: 3, # these two functions are deprecated.
start_link: 2, # these two functions are deprecated.
]
require Multiverses
def count(registry) do
registry
|> Registry.select([
{
{:"$1", :_, :_},
[{:==, {:element, 1, :"$1"}, {:const, Multiverses.self()}}],
[:"$1"]
}
])
|> Enum.count()
end
def dispatch(registry, key, fun, opts \\ []) do
Registry.dispatch(registry, {Multiverses.self(), key}, fun, opts)
end
def keys(registry, pid) do
universe = Multiverses.self()
registry
|> Registry.keys(pid)
|> Enum.map(fn {^universe, key} -> key end)
# NB: there shouldn't be any pids that don't match this universe.
end
def lookup(registry, key) do
Registry.lookup(registry, {Multiverses.self(), key})
end
@doc """
Registers the calling process with the Registry. Works as `Registry.register/3` does.
"""
def register(registry, key, value) do
Registry.register(registry, {Multiverses.self(), key}, value)
end
def select(registry, spec) do
universe = Multiverses.self()
new_spec = Enum.map(spec, fn {match, filters, result} ->
{new_match, match_var} =
case match do
{:_, a, b} -> {{:"$4", a, b}, :"$4"}
{a, b, c} -> {{a, b, c}, a}
end
# this adjustment function has to takes existing filters and results
# and intrusively changes them to select on the second part of the
# element when the match var matches the first position. This needs
# to be a arity-2 function that is passed itself, to allow using
# recursivity in a lambda with a y-combinator technique.
# NB: this needs to be a lambda so that Multiverses can be compile-time
# only.
adjust = fn
^match_var, _self ->
{:element, 2, match_var}
list, self when is_list(list) ->
Enum.map(list, &self.(&1, self))
tuple, self when is_tuple(tuple) ->
tuple
|> Tuple.to_list()
|> self.(self)
|> List.to_tuple()
map, self when is_map(map) ->
map
|> Enum.map(fn
{key, value} ->
{self.(key, self), self.(value, self)}
end)
|> Enum.into(%{})
any, _self ->
any
end
new_filters =
adjust.(filters, adjust) ++
[{:==, {:element, 1, match_var}, {:const, universe}}]
new_result = adjust.(result, adjust)
{new_match, new_filters, new_result}
end)
Registry.select(registry, new_spec)
end
def unregister(registry, key) do
Registry.unregister(registry, {Multiverses.self(), key})
end
def update_value(registry, key, callback) do
Registry.update_value(registry, {Multiverses.self(), key}, callback)
end
@doc """
generates the correct via term to call this registry.
if `:use_multiverses` is activated, then the via term will look like:
```elixir
{:via, Registry, {reg, {universe, key}}}
```
If it's not, the via term will look like:
```elixir
{:via, Registry, {reg, key}}
```
"""
defmacro via(reg, key) do
this_app = Multiverses.app()
use_multiverses? = __CALLER__.module
|> Module.get_attribute(:multiverse_otp_app, this_app)
|> Application.get_env(:use_multiverses, this_app == :multiverses)
if use_multiverses? do
quote do
require Multiverses
{:via, Registry, {unquote(reg), {Multiverses.self(), unquote(key)}}}
end
else
quote do
{:via, Registry, {unquote(reg), unquote(key)}}
end
end
end
end
|
lib/multiverses/registry.ex
| 0.785555 | 0.719753 |
registry.ex
|
starcoder
|
defmodule Zipper do
defstruct [:current, :former]
@type dir :: :left | :right
@type t() :: %__MODULE__{current: BinTree.t(), former: [{dir, any, BinTree.t()}]}
@doc """
Get a zipper focused on the root node.
"""
@spec from_tree(BinTree.t()) :: Zipper.t()
def from_tree(bin_tree) do
%__MODULE__{current: bin_tree, former: []}
end
@doc """
Get the complete tree from a zipper.
"""
@spec to_tree(Zipper.t()) :: BinTree.t()
def to_tree(%__MODULE__{current: bin_tree, former: []}) do
bin_tree
end
def to_tree(%__MODULE__{current: bin_tree, former: [{:left, value, right} | tail]}) do
bin_tree = %BinTree{value: value, left: bin_tree, right: right}
to_tree(%__MODULE__{current: bin_tree, former: tail})
end
def to_tree(%__MODULE__{current: bin_tree, former: [{:right, value, left} | tail]}) do
bin_tree = %BinTree{value: value, left: left, right: bin_tree}
to_tree(%__MODULE__{current: bin_tree, former: tail})
end
@doc """
Get the value of the focus node.
"""
@spec value(Zipper.t()) :: any
def value(%__MODULE__{current: %BinTree{value: value}}) do
value
end
@doc """
Get the left child of the focus node, if any.
"""
@spec left(Zipper.t()) :: Zipper.t() | nil
def left(%__MODULE__{
current: %BinTree{left: nil}
}) do
nil
end
def left(%__MODULE__{current: %BinTree{value: value, right: right, left: left}, former: former}) do
%__MODULE__{
current: left,
former: [{:left, value, right} | former]
}
end
@doc """
Get the right child of the focus node, if any.
"""
@spec right(Zipper.t()) :: Zipper.t() | nil
def right(%__MODULE__{
current: %BinTree{right: nil}
}) do
nil
end
def right(%__MODULE__{current: %BinTree{value: value, right: right, left: left}, former: former}) do
%__MODULE__{
current: right,
former: [{:right, value, left} | former]
}
end
@doc """
Get the parent of the focus node, if any.
"""
@spec up(Zipper.t()) :: Zipper.t() | nil
def up(%__MODULE__{current: _bin_tree, former: []}) do
nil
end
def up(%__MODULE__{current: bin_tree, former: [{:left, value, right} | tail]}) do
bin_tree = %BinTree{value: value, left: bin_tree, right: right}
%__MODULE__{current: bin_tree, former: tail}
end
def up(%__MODULE__{current: bin_tree, former: [{:right, value, left} | tail]}) do
bin_tree = %BinTree{value: value, left: left, right: bin_tree}
%__MODULE__{current: bin_tree, former: tail}
end
@doc """
Set the value of the focus node.
"""
@spec set_value(Zipper.t(), any) :: Zipper.t()
def set_value(%__MODULE__{current: bin_tree, former: former}, value) do
bin_tree = Map.put(bin_tree, :value, value)
%__MODULE__{current: bin_tree, former: former}
end
@doc """
Replace the left child tree of the focus node.
"""
@spec set_left(Zipper.t(), BinTree.t() | nil) :: Zipper.t()
def set_left(%__MODULE__{current: bin_tree, former: former}, left) do
bin_tree = Map.put(bin_tree, :left, left)
%__MODULE__{current: bin_tree, former: former}
end
@doc """
Replace the right child tree of the focus node.
"""
@spec set_right(Zipper.t(), BinTree.t() | nil) :: Zipper.t()
def set_right(%__MODULE__{current: bin_tree, former: former}, right) do
bin_tree = Map.put(bin_tree, :right, right)
%__MODULE__{current: bin_tree, former: former}
end
end
|
elixir/zipper/lib/zipper.ex
| 0.896935 | 0.717309 |
zipper.ex
|
starcoder
|
defmodule PixelFont.TableSource.Glyf.Simple do
alias PixelFont.RectilinearShape.Path, as: RSPath
defstruct ~w(last_points inst_size inst flags x_coords y_coords)a
@type t :: %__MODULE__{
last_points: [integer()],
inst_size: 0x0000..0xFFFF,
inst: binary(),
flags: [0x00..0xFF],
x_coords: [0x00..0xFF],
y_coords: [0x00..0xFF]
}
@spec new(RSPath.t()) :: t()
def new(path) do
{_, last_points, contours} =
path
|> make_relative()
|> Enum.reduce({0, [], []}, fn contour, {pos, last_points, coords} ->
len = length(contour)
{pos + len, [pos + len - 1 | last_points], [contour | coords]}
end)
{flags, coords} =
contours
|> Enum.reverse()
|> List.flatten()
|> Enum.map(fn {x, y} ->
x_is_positive = if x >= 0, do: 1, else: 0
y_is_positive = if y >= 0, do: 1, else: 0
flag = <<0::2, y_is_positive::1, x_is_positive::1, 7::4>>
{flag, {abs(x), abs(y)}}
end)
|> Enum.unzip()
{x_coords, y_coords} = Enum.unzip(coords)
%__MODULE__{
last_points: Enum.reverse(last_points),
inst_size: 0,
inst: "",
flags: compress_flags(flags),
x_coords: x_coords,
y_coords: y_coords
}
end
defp make_relative(path) do
path
|> Enum.reduce({{0, 0}, []}, fn contour, {last_pt, contours} ->
contour2 =
contour
|> Enum.zip([last_pt | contour])
|> Enum.map(fn {{cx, cy}, {px, py}} -> {cx - px, cy - py} end)
{List.last(contour), [contour2 | contours]}
end)
|> elem(1)
|> Enum.reverse()
end
defp compress_flags(flags) do
flags
|> Enum.chunk_by(& &1)
|> Enum.map(fn chunk ->
chunk_length = length(chunk)
if chunk_length > 2 do
<<flag1::4, _rpt::1, flag2::3>> = hd(chunk)
[<<flag1::4, fc00:e968:6179::de52:7100, flag2::3>>, <<chunk_length - 1::8>>]
else
chunk
end
end)
end
end
|
lib/pixel_font/table_source/glyf/simple.ex
| 0.735167 | 0.438004 |
simple.ex
|
starcoder
|
defmodule Wadm.Model.Decoder do
@capability_component_type "capability"
@actor_component_type "actor"
alias Wadm.Model.{
ActorComponent,
CapabilityComponent,
LinkDefinition,
SpreadScaler,
WeightedTarget,
AppSpec
}
@doc """
Takes a map as returned by either of `YamlElixir`'s parse functions and returns
either an error or a canonical representation of the components discovered within
the model. Component extraction is an all-or-nothing process - if one of the components
in the model map fails to decode, then the entire operation will fail
"""
@spec extract_components(Map.t()) :: {:ok, [AppSpec.component()]} | {:error, String.t()}
def extract_components(yaml) do
case get_in(yaml, ["spec", "components"]) do
nil ->
{:error, "No components to extract from application specification"}
comps ->
{pass, fail} =
comps
|> Enum.map(fn comp -> from_map(comp) end)
|> Enum.split_with(fn x ->
case x do
{:ok, _c} -> true
{:error, _e} -> false
end
end)
if length(fail) == 0 do
{:ok,
pass
|> Enum.map(fn {:ok, x} -> x end)}
else
reasons = fail |> Enum.map(fn {:error, r} -> r end) |> Enum.join(",")
{:error, "Failed to extract components: #{reasons}"}
end
end
end
defp from_map(
%{
"name" => name,
"type" => @capability_component_type,
"properties" => %{
"contract" => contract,
"image" => image
}
} = map
) do
traits =
case map["traits"] do
nil -> []
t -> t
end
link_name =
case get_in(map, ["properties", "link_name"]) do
nil -> "default"
ln -> ln
end
case extract_traits(traits) do
{:ok, traits} ->
{:ok,
%CapabilityComponent{
name: name,
image: image,
contract: contract,
link_name: link_name,
traits: traits
}}
{:error, reason} ->
{:error, reason}
end
end
defp from_map(%{
"name" => name,
"type" => @actor_component_type,
"properties" => %{
"image" => image
},
"traits" => traits
}) do
case extract_traits(traits) do
{:ok, traits} ->
{:ok,
%ActorComponent{
name: name,
image: image,
traits: traits
}}
{:error, reason} ->
{:error, reason}
end
end
defp from_map(%{
"name" => name,
"type" => @actor_component_type,
"properties" => %{
"image" => image
}
}) do
{:ok,
%ActorComponent{
name: name,
image: image,
traits: []
}}
end
defp from_map(%{"type" => @actor_component_type}) do
{:error, "Cannot extract actor component from map"}
end
defp from_map(%{"type" => @capability_component_type}) do
{:error, "Cannot extract capability component from map"}
end
defp extract_traits(traits) do
{pass, fail} =
traits
|> Enum.map(fn trait -> trait_from_map(trait) end)
|> Enum.split_with(fn x ->
case x do
{:ok, _t} -> true
{:error, _e} -> false
end
end)
if length(fail) == 0 do
{:ok,
pass
|> Enum.map(fn {:ok, x} -> x end)}
else
{:error, "Failed to extract traits"}
end
end
defp trait_from_map(%{
"type" => "spreadscaler",
"properties" => %{
"replicas" => replicas,
"spread" => weighted_targets
}
}) do
{pass, fail} =
weighted_targets
|> Enum.map(fn target -> target_from_map(target) end)
|> Enum.split_with(fn x ->
case x do
{:ok, _t} -> true
{:error, _e} -> false
end
end)
if length(fail) == 0 do
{:ok,
%SpreadScaler{
replicas: replicas,
spread:
pass
|> Enum.map(fn {:ok, x} -> x end)
}}
else
{:error, "Failed to extract weighted targets from spread definition"}
end
end
defp trait_from_map(%{
"type" => "linkdef",
"properties" => %{
"target" => target,
"values" => values
}
}) do
{:ok,
%LinkDefinition{
target: target,
values: values
}}
end
defp trait_from_map(%{}) do
{:error, "Unable to decode trait from map"}
end
defp target_from_map(
%{
"name" => name,
"requirements" => requirements
} = map
)
when is_map(requirements) do
weight =
case map["weight"] do
nil -> 100
w -> w
end
{:ok,
%WeightedTarget{
name: name,
requirements: requirements,
weight: weight
}}
end
defp target_from_map(%{}) do
{:error, "Unable to decode weighted target from spread specification"}
end
end
|
wadm/lib/wadm/model/decoder.ex
| 0.794544 | 0.457197 |
decoder.ex
|
starcoder
|
defmodule Dpos.Tx.MultiSig do
use Dpos.Tx, type: 4
@doc """
Sets the lifetime in seconds of the multisignature.
The lifetime must be >= 3600 and <= 259200.
"""
@spec set_lifetime(Dpos.Tx.t(), pos_integer) :: Dpos.Tx.t()
def set_lifetime(%Dpos.Tx{} = tx, ttl)
when is_integer(ttl) and ttl >= 3600 and ttl <= 259_200 do
ms =
tx
|> get_multi_signature()
|> Map.put(:lifetime, ttl)
Map.put(tx, :asset, %{multisignature: ms})
end
@doc """
Sets the minimum number of signatures required to validate a transaction.
The minimum possible value is 2.
"""
@spec set_min(Dpos.Tx.t(), pos_integer()) :: Dpos.Tx.t()
def set_min(%Dpos.Tx{} = tx, min) when is_integer(min) and min >= 2 do
ms =
tx
|> get_multi_signature()
|> Map.put(:min, min)
Map.put(tx, :asset, %{multisignature: ms})
end
@doc """
Adds a public key to the keysgroup field of the multisignature.
"""
@spec add_public_key(Dpos.Tx.t(), String.t()) :: Dpos.Tx.t()
def add_public_key(%Dpos.Tx{} = tx, pub_key)
when is_binary(pub_key) and byte_size(pub_key) == 64 do
ms =
tx
|> get_multi_signature()
|> add_to_keysgroup(pub_key)
Map.put(tx, :asset, %{multisignature: ms})
end
defp get_child_bytes(%{asset: %{multisignature: %{min: m, keysgroup: k, lifetime: t}}})
when is_integer(m) and is_integer(t) and is_list(k) do
keys = Enum.join(k)
<<m::size(8), t::size(8), keys::bytes>>
end
defp get_child_bytes(_) do
[
"Invalid multi signature\n" <>
"See Tx.MultiSig.set_lifetime/2\n" <>
"See Tx.MultiSig.set_min/2\n" <>
"See Tx.MultiSig.add_public_key/2"
]
|> Enum.join()
|> raise()
end
defp get_multi_signature(%{asset: %{multisignature: ms}}) when is_map(ms), do: ms
defp get_multi_signature(_), do: %{}
defp add_to_keysgroup(multi_signature, pub_key) do
keysgroup = multi_signature[:keysgroup] || []
keysgroup = ["+" <> pub_key | keysgroup]
Map.put(multi_signature, :keysgroup, keysgroup)
end
end
|
lib/tx/multi_sig.ex
| 0.739799 | 0.472075 |
multi_sig.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.ScheduleEntryTypeSupportedReport do
@moduledoc """
This command is used to report the number of supported schedule slots an Entry Lock schedule device supports for each user in the system.
Params:
* `:number_of_slots_week_day` - A number from 0 – 255 that represents how many different schedule slots are supported each week for every user in the system for type Week Day.
* `:number_of_slots_year_day` - A number from 0 – 255 that represents how many different schedule slots are supported for every user in the system for type Year Day.
* `:number_of_slots_daily_repeating` - A number from 0 to 255 that represents how many different schedule slots are supported for every user in the system for type Daily Repeating Day.
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave.{Command, DecodeError}
alias Grizzly.ZWave.CommandClasses.ScheduleEntryLock
@type param ::
{:number_of_slots_week_day, byte()}
| {:number_of_slots_year_day, byte()}
| {:number_of_slots_daily_repeating, byte()}
@impl true
@spec new([param()]) :: {:ok, Command.t()}
def new(params) do
command = %Command{
name: :schedule_entry_type_supported_report,
command_byte: 0x0A,
command_class: ScheduleEntryLock,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
@spec encode_params(Command.t()) :: binary()
def encode_params(command) do
number_of_slots_week_day = Command.param!(command, :number_of_slots_week_day)
number_of_slots_year_day = Command.param!(command, :number_of_slots_year_day)
number_of_slots_daily_repeating = Command.param(command, :number_of_slots_daily_repeating)
# Schedule Entry Lock Command Class, Version 3
if number_of_slots_daily_repeating == nil do
<<number_of_slots_week_day, number_of_slots_year_day>>
else
<<number_of_slots_week_day, number_of_slots_year_day, number_of_slots_daily_repeating>>
end
end
@impl true
@spec decode_params(binary()) :: {:ok, [param()]} | {:error, DecodeError.t()}
def decode_params(<<number_of_slots_week_day, number_of_slots_year_day>>) do
{:ok,
[
number_of_slots_week_day: number_of_slots_week_day,
number_of_slots_year_day: number_of_slots_year_day
]}
end
# Schedule Entry Lock Command Class, Version 3
def decode_params(
<<number_of_slots_week_day, number_of_slots_year_day, number_of_slots_daily_repeating>>
) do
{:ok,
[
number_of_slots_week_day: number_of_slots_week_day,
number_of_slots_year_day: number_of_slots_year_day,
number_of_slots_daily_repeating: number_of_slots_daily_repeating
]}
end
end
|
lib/grizzly/zwave/commands/schedule_entry_type_supported_report.ex
| 0.880714 | 0.618536 |
schedule_entry_type_supported_report.ex
|
starcoder
|
defmodule Astarte.Flow.Blocks.DynamicVirtualDevicePool do
@moduledoc """
This is a consumer block that takes `data` from incoming `Message`s and publishes it as an Astarte device,
interpreting the `key` as <realm>/<device_id>/<interface><path>.
The devices are dynamically registered when their device id is first seens. Credentials secret obtained
with the registration are stored in the chosen CredentialsStorage.
"""
use GenStage
require Logger
alias Astarte.API.Pairing
alias Astarte.Device
alias Astarte.Flow.Compat
alias Astarte.Flow.Message
alias Astarte.Flow.Blocks.DynamicVirtualDevicePool.DETSCredentialsStorage
alias Astarte.Flow.VirtualDevicesSupervisor
defmodule State do
@moduledoc false
defstruct [
:device_base_opts,
:credentials_storage,
:pairing_agent,
:pairing_jwt_map,
:pairing_url
]
end
@doc """
Starts the `DynamicVirtualDevicePool`.
## Options
* `:pairing_url` (required) - base URL of the Astarte Pairing API instance the devices will
connect to, e.g. `https://astarte.api.example.com/pairing` or `http://localhost:4003` for
a local installation. URL containing the API version suffix (i.e. `/v1`) are *deprecated*
and will be removed in a future release.
* `:pairing_jwt_map` (required) - A map in the form `%{realm_name => jwt}` where jwt must be a JWT with the authorizations needed
to register a device in that realm.
* `:interface_provider` (required) - The `interface_provider` that will be used by the spawned devices.
* `:ignore_ssl_errors` - A boolean to indicate wether devices have to ignore SSL errors when connecting to the broker. Defaults to `false`.
* `:credentials_storage` - The module used to store and fetch credentials secrets. Defaults to `DETSCredentialsStorage`.
* `:pairing_agent` - The module used to register the devices. Defaults to `Astarte.API.Pairing.Agent`
"""
@spec start_link(options) :: GenServer.on_start()
when options: [option],
option:
{:pairing_url, pairing_url :: String.t()}
| {:pairing_jwt_map,
pairing_jwt_map :: %{optional(realm :: String.t()) => jwt :: String.t()}}
| {:interface_provider, {module(), term()} | String.t()}
| {:ignore_ssl_errors, ignore_ssl_errors :: boolean()}
| {:credentials_storage, credentials_storage :: module()}
def start_link(opts) do
GenStage.start_link(__MODULE__, opts)
end
# Callbacks
@impl true
def init(opts) do
pairing_url =
Keyword.fetch!(opts, :pairing_url)
|> Compat.normalize_device_pairing_url()
pairing_jwt_map = Keyword.fetch!(opts, :pairing_jwt_map)
interface_provider = Keyword.fetch!(opts, :interface_provider)
ignore_ssl_errors = Keyword.get(opts, :ignore_ssl_errors, false)
credentials_storage = Keyword.get(opts, :credentials_storage, DETSCredentialsStorage)
pairing_agent = Keyword.get(opts, :pairing_agent, Pairing.Agent)
base_opts = [
pairing_url: pairing_url,
interface_provider: interface_provider,
ignore_ssl_errors: ignore_ssl_errors
]
state = %State{
credentials_storage: credentials_storage,
pairing_agent: pairing_agent,
pairing_url: pairing_url,
pairing_jwt_map: pairing_jwt_map,
device_base_opts: base_opts
}
{:consumer, state}
end
@impl true
def handle_events(events, _from, state) do
Enum.each(events, fn message ->
handle_message(message, state)
end)
{:noreply, [], state}
end
defp handle_message(message, state) do
%Message{
key: key,
data: data,
timestamp: timestamp_micros
} = message
with {:ok, {realm, device_id, interface, path}} <- parse_key(key),
{:ok, pid} <- fetch_device(state, realm, device_id),
{:ok, timestamp} <- DateTime.from_unix(timestamp_micros, :microsecond),
:ok <-
Device.send_datastream(pid, interface, path, data, timestamp: timestamp) do
:ok
else
{:error, reason} ->
_ = Logger.warn("Error handling message: #{inspect(reason)}", message: message)
{:error, reason}
end
end
defp parse_key(key) do
case String.split(key, "/") do
[realm, device_id, interface | path_tokens] ->
path = "/" <> Path.join(path_tokens)
{:ok, {realm, device_id, interface, path}}
_ ->
{:error, :invalid_astarte_key}
end
end
defp fetch_device(state, realm, device_id) do
case Astarte.Device.get_pid(realm, device_id) do
nil ->
start_device(state, realm, device_id)
pid when is_pid(pid) ->
{:ok, pid}
end
end
defp start_device(state, realm, device_id) do
%State{
device_base_opts: base_opts
} = state
with {:ok, credentials_secret} <-
fetch_credentials_secret(state, realm, device_id) do
device_opts = [
realm: realm,
device_id: device_id,
credentials_secret: credentials_secret
]
opts = Keyword.merge(device_opts, base_opts)
case DynamicSupervisor.start_child(VirtualDevicesSupervisor, {Device, opts}) do
{:ok, pid} ->
Device.wait_for_connection(pid)
{:ok, pid}
{:error, {:already_started, pid}} ->
{:ok, pid}
{:error, reason} ->
{:error, reason}
end
end
end
defp fetch_credentials_secret(state, realm, device_id) do
%State{
credentials_storage: credentials_storage
} = state
case credentials_storage.fetch_credentials_secret(realm, device_id) do
{:ok, credentials_secret} ->
{:ok, credentials_secret}
:error ->
register_device(state, realm, device_id)
end
end
defp register_device(state, realm, device_id) do
%State{
credentials_storage: credentials_storage,
pairing_agent: pairing_agent,
pairing_jwt_map: pairing_jwt_map,
pairing_url: pairing_url
} = state
with {:ok, jwt} <- Map.fetch(pairing_jwt_map, realm),
client = Pairing.client(pairing_url, realm, auth_token: jwt),
{:ok, %{status: 201, body: body}} <- pairing_agent.register_device(client, device_id),
%{"data" => %{"credentials_secret" => credentials_secret}} <- body,
:ok <-
credentials_storage.store_credentials_secret(realm, device_id, credentials_secret) do
{:ok, credentials_secret}
else
:error ->
Logger.warn("No JWT available for realm #{realm}")
{:error, :jwt_not_found}
{:ok, %{status: status, body: body}} ->
Logger.warn(
"Cannot register device #{device_id} in realm #{realm}: " <>
"#{inspect(status)} #{inspect(body)}"
)
{:error, :cannot_register_device}
{:error, reason} ->
Logger.warn(
"Error registering device #{inspect(device_id)} in realm #{realm}: #{inspect(reason)}"
)
{:error, reason}
end
end
end
|
lib/astarte_flow/blocks/dynamic_virtual_device_pool.ex
| 0.893184 | 0.430866 |
dynamic_virtual_device_pool.ex
|
starcoder
|
defmodule Genetic do
require Integer
alias Genetic.{
Chromosome,
SelectionStrategy,
CrossoverStrategy,
MutationStrategy,
ReinsertionStrategy
}
@spec run(Genetic.Problem.t(), Keyword.t()) :: [Chromosome.t()]
def run(problem, opts \\ []) do
problem
|> initialize(opts)
|> evolve(problem, 0, opts)
end
## Helpers
# Recursively creates new populations through selection, crossover, mutation, and reinsertion.
defp evolve(population, problem, generation, opts) do
population = evaluate(population, problem, opts)
best = Enum.at(population, 0)
IO.puts("Current best: #{inspect(best)}")
if problem.terminate?(population, generation) do
population
else
{parents, leftover} = select(population, opts)
children = crossover(parents, opts)
mutants = mutation(population, opts)
offspring = children ++ mutants
new_population = reinsertion(parents, offspring, leftover, problem.fitness_module(), opts)
evolve(new_population, problem, generation + 1, opts)
end
end
# Sets up the initial population.
defp initialize(problem, opts) do
case Keyword.fetch(opts, :initial_population) do
{:ok, genotypes} ->
genotypes
:error ->
n = Keyword.get(opts, :population_size, 100)
for _ <- 1..n do
problem.genotype()
end
end
end
# Sorts the population by fitness.
defp evaluate(population, problem, _opts) do
population
|> Enum.map(&problem.update_fitness/1)
|> Enum.sort({:desc, problem.fitness_module()})
end
# Splits the population into parents and leftovers.
defp select(population, opts) do
fun = Keyword.get(opts, :selection_type, &SelectionStrategy.natural/3)
count = Keyword.get(opts, :selection_count, nil)
n =
if count do
count
else
rate = Keyword.get(opts, :selection_rate, 0.8)
n = trunc(round(Enum.count(population) * rate))
if Integer.is_even(n), do: n, else: n + 1
end
parents = fun.(population, n, opts)
leftover =
population
|> MapSet.new()
|> MapSet.difference(MapSet.new(parents))
{parents, MapSet.to_list(leftover)}
end
# The parents produce offspring.
defp crossover(parents, opts) do
fun = Keyword.get(opts, :crossover_type, &CrossoverStrategy.uniform/3)
# Ensure each pairing has two parents.
pairings = Enum.chunk_every(parents, 2, 2, Enum.take(parents, 1))
for [p1, p2] <- pairings do
{c1, c2} = fun.(p1, p2, opts)
[c1, c2]
end
|> List.flatten()
end
# Random members of the population are mutated.
defp mutation(population, opts) do
fun = Keyword.get(opts, :mutation_type, &MutationStrategy.scramble/2)
count = Keyword.get(opts, :mutation_count, nil)
n =
if count do
count
else
rate = Keyword.get(opts, :mutation_rate, 0.05)
trunc(Enum.count(population) * rate)
end
if n > Enum.count(population) do
# Special case when you want to use mutation to increase population size.
for _ <- 1..n do
population
|> Enum.random()
|> fun.(opts)
end
else
# Otherwise, take a distinct, random selection from the population.
population
|> Enum.take_random(n)
|> Enum.map(&fun.(&1, opts))
end
end
# Creates a new population.
defp reinsertion(parents, offspring, leftover, fitness_module, opts) do
fun = Keyword.get(opts, :reinsertion_type, &ReinsertionStrategy.elitist/5)
fun.(parents, offspring, leftover, fitness_module, opts)
end
end
|
lib/genetic.ex
| 0.784154 | 0.533641 |
genetic.ex
|
starcoder
|
defmodule Toml.Test.Assertions do
@moduledoc false
import ExUnit.Assertions
alias Toml.Test.JsonConverter
@doc """
Given a path to a TOML file, asserts that parsing succeeds and
conversion to it's JSON equivalent matches the expected result.
The expected result should be contained in a .json file of the same
name as the .toml file given, in the same directory. The result is
compared with `assert_deep_equal/2`.
"""
def assert_toml_valid(path) do
json = Path.join([Path.dirname(path), Path.basename(path, ".toml") <> ".json"])
case Toml.decode_file(path) do
{:error, {:invalid_toml, reason}} when is_binary(reason) ->
flunk(reason)
{:ok, decoded} ->
expected = JsonConverter.parse_json_file!(json)
typed = JsonConverter.to_typed_map(decoded)
assert_deep_equal(expected, typed)
end
end
@doc """
Asserts that two items are deeply equivalent, meaning that
all lists have the same items, all maps have the same keys, and all values
are the same. Order of items is not considered in this equality comparison.
"""
def assert_deep_equal(a, b) do
if do_deep_equal(a, b) do
assert true
else
assert a == b
end
end
defp do_deep_equal(a, b) when is_map(a) and is_map(b) do
asort = a |> Map.to_list() |> Enum.sort_by(&to_sort_key/1)
bsort = b |> Map.to_list() |> Enum.sort_by(&to_sort_key/1)
if map_size(a) == map_size(b) do
for {{ak, av}, {bk, bv}} <- Enum.zip(asort, bsort) do
if ak != bk do
false
else
do_deep_equal(av, bv)
end
end
else
false
end
end
defp do_deep_equal(a, a), do: true
defp do_deep_equal(a, b) when is_list(a) and is_list(b) do
if length(a) == length(b) do
asort = Enum.sort_by(a, &to_sort_key/1)
bsort = Enum.sort_by(b, &to_sort_key/1)
for {ai, bi} <- Enum.zip(asort, bsort) do
do_deep_equal(ai, bi)
end
else
a == b
end
end
defp do_deep_equal(a, b) do
a == b
end
defp to_sort_key(v) when is_map(v), do: Enum.sort(Map.keys(v))
defp to_sort_key(v) when is_list(v), do: Enum.sort_by(v, &to_sort_key/1)
defp to_sort_key(v), do: v
end
|
test/support/assertions.ex
| 0.75985 | 0.714578 |
assertions.ex
|
starcoder
|
defmodule Prometheus.Metric.Histogram do
@moduledoc """
A Histogram tracks the size and number of events in buckets.
You can use Histograms for aggregatable calculation of quantiles.
Example use cases for Histograms:
- Response latency;
- Request size.
Histogram expects `buckets` key in a metric spec. Buckets can be:
- a list of numbers in increasing order;
- one of the generate specs (shortcuts for `Prometheus.Buckets` macros)
- `:default`;
- `{:linear, start, step, count}`;
- `{:exponential, start, step, count}`.
Example:
```
defmodule ExampleInstrumenter do
use Prometheus.Metric
## to be called at app/supervisor startup.
## to tolerate restarts use declare.
def setup do
Histogram.new([name: :http_request_duration_milliseconds,
labels: [:method],
buckets: [100, 300, 500, 750, 1000],
help: "Http Request execution time."])
end
def instrument(%{time: time, method: method}) do
Histogram.observe([name: :http_request_duration_milliseconds, labels: [method]],
time)
end
end
```
"""
use Prometheus.Erlang, :prometheus_histogram
@doc """
Creates a histogram using `spec`.
Histogram cannot have a label named "le".
Raises `Prometheus.MissingMetricSpecKeyError` if required `spec` key is missing.<br>
Raises `Prometheus.InvalidMetricNameError` if metric name is invalid.<br>
Raises `Prometheus.InvalidMetricHelpError` if help is invalid.<br>
Raises `Prometheus.InvalidMetricLabelsError` if labels isn't a list.<br>
Raises `Prometheus.InvalidMetricNameError` if label name is invalid.<br>
Raises `Prometheus.InvalidValueError` exception if duration_unit is unknown or
doesn't match metric name.<br>
Raises `Prometheus.MFAlreadyExistsError` if a histogram with the same `spec` exists.
Histogram-specific exceptions:
Raises `Prometheus.HistogramNoBucketsError` if buckets are missing, not a list,
empty list or not known buckets spec.<br>
Raises `Prometheus.HistogramInvalidBucketsError` if buckets aren't
in increasing order.<br>
Raises `Prometheus.HistogramInvalidBoundError` if bucket bound isn't a number.
"""
delegate new(spec)
@doc """
Creates a histogram using `spec`.
Histogram cannot have a label named "le".
If a histogram with the same `spec` exists returns `false`.
Raises `Prometheus.MissingMetricSpecKeyError` if required `spec` key is missing.<br>
Raises `Prometheus.InvalidMetricNameError` if metric name is invalid.<br>
Raises `Prometheus.InvalidMetricHelpError` if help is invalid.<br>
Raises `Prometheus.InvalidMetricLabelsError` if labels isn't a list.<br>
Raises `Prometheus.InvalidMetricNameError` if label name is invalid.<br>
Raises `Prometheus.InvalidValueError` exception if duration_unit is unknown or
doesn't match metric name.
Histogram-specific exceptions:
Raises `Prometheus.HistogramNoBucketsError` if buckets are missing, not a list,
empty list or not known buckets spec.<br>
Raises `Prometheus.HistogramInvalidBucketsError` if buckets aren't
in increasing order.<br>
Raises `Prometheus.HistogramInvalidBoundError` if bucket bound isn't a number.
"""
delegate declare(spec)
@doc """
Observes the given amount.
Raises `Prometheus.InvalidValueError` exception if `amount` isn't
a number.<br>
Raises `Prometheus.UnknownMetricError` exception if a histogram for `spec`
can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric observe(spec, amount \\ 1)
@doc """
Observes the amount of time spent executing `body`.
Raises `Prometheus.UnknownMetricError` exception if a histogram for `spec`
can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
Raises `Prometheus.InvalidValueError` exception if fun isn't a function or block.
"""
defmacro observe_duration(spec, body) do
env = __CALLER__
Prometheus.Injector.inject(
fn block ->
quote do
start_time = :erlang.monotonic_time()
try do
unquote(block)
after
end_time = :erlang.monotonic_time()
Prometheus.Metric.Histogram.observe(unquote(spec), end_time - start_time)
end
end
end,
env,
body
)
end
@doc """
Removes histogram series identified by spec.
Raises `Prometheus.UnknownMetricError` exception if a histogram for `spec`
can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric remove(spec)
@doc """
Resets the value of the histogram identified by `spec`.
Raises `Prometheus.UnknownMetricError` exception if a histogram for `spec`
can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric reset(spec)
@doc """
Returns the value of the histogram identified by `spec`. If there is no histogram for
given labels combination, returns `:undefined`.
Raises `Prometheus.UnknownMetricError` exception if a histogram for `spec`
can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric value(spec)
end
|
astreu/deps/prometheus_ex/lib/prometheus/metric/histogram.ex
| 0.959374 | 0.946498 |
histogram.ex
|
starcoder
|
defrecord Flect.Compiler.Syntax.Node, type: nil,
location: nil,
tokens: [],
children: [],
comments: [],
data: nil do
@moduledoc """
Represents an AST (abstract syntax tree) node.
`type` is an atom indicating the kind of node. `location` is a
`Flect.Compiler.Syntax.Location` indicating the node's location in
the source code document. `tokens` is a list of the
`Flect.Compiler.Syntax.Token`s that make up this node. `children` is
a list of all children. `comments` contains a list of comment tokens
that belong to this node. `data` is an arbitrary term associated with
the node - it can have different meanings depending on which compiler
stage the node is being used in.
"""
record_type(type: atom(),
location: Flect.Compiler.Syntax.Location.t(),
tokens: [{atom(), Flect.Compiler.Syntax.Token.t()}],
children: [{atom(), t()}],
comments: [Flect.Compiler.Syntax.Token.t()],
data: term())
@doc """
Formats the node and all of its children in a user-presentable way.
Returns the resulting binary.
`self` is the node record.
"""
@spec format(t()) :: String.t()
def format(self) do
do_format(self, "")
end
@spec do_format(t(), String.t()) :: String.t()
defp do_format(node, indent) do
loc = fn(loc) -> "(#{loc.line()},#{loc.column()})" end
str = "#{indent}#{atom_to_binary(node.type())} #{loc.(node.location())} "
str = "#{str}[ " <> Enum.join((lc {_, t} inlist node.tokens(), do: "\"#{t.value()}\" #{loc.(t.location())}"), ", ") <> " ]\n"
str = "#{str}#{indent}{\n"
str = str <> Enum.join(lc {_, child} inlist node.children(), do: do_format(child, indent <> " ") <> "\n")
str = "#{str}#{indent}}"
str
end
end
|
lib/compiler/syntax/node.ex
| 0.796015 | 0.568116 |
node.ex
|
starcoder
|
defmodule Dictator do
@moduledoc """
Plug that checks if your users are authorised to access the resource.
You can use it at the router or controller level:
```
# lib/my_app_web/controllers/post_controller.ex
defmodule MyApp.PostController do
plug Dictator
def show(conn, params) do
# ...
end
end
# lib/my_app_web/router.ex
defmodule MyAppWeb.Router do
pipeline :authorised do
plug Dictator
end
end
```
Requires Phoenix (or at least `conn.private[:phoenix_action]` to be set).
To load resources from the database, requires Ecto. See `Dictator.Policies.EctoSchema`.
Dictator assumes your policies are in `lib/my_app_web/policies/` and follow
the `MyAppWeb.Policies.Name` naming convention. As an example, for posts,
`MyAppWeb.Policies.Post` would be defined in
`lib/my_app_web/policies/post.ex`.
It is also assumed the current user is loaded and available on
`conn.assigns`. By default, it is assumed to be under
`conn.assigns[:current_user]`, although this option can be overriden.
## Plug Options
Options that you can pass to the module, when plugging it (e.g. `plug
Dictator, only: [:create, :update]`). None of the following options are
required.
* `only`: limits the actions to perform authorisation on to the provided list.
* `except`: limits the actions to perform authorisation on to exclude the provided list.
* `policy`: policy to apply. See above to understand how policies are inferred.
* `key`: key under which the current user is placed in `conn.assigns` or the
session. Defaults to `:current_user`.
* `fetch_strategy`: Strategy to be used to get the current user. Can be
either `Dictator.FetchStrategies.Assigns` to fetch it from `conn.assigns` or
`Dictator.FetchStrategies.Session` to fetch it from the session. You can also
implement your own strategy and pass it in this option or set it in the
config. Defaults to `Dictator.FetchStrategies.Assigns`.
* `unauthorized_handler`: Handler to be called when the user is not authorised to access the resource. Defaults to the option
passed to `unauthorized_handler` in your `config/*.exs` files. Check the section below to understand better.
## Configuration options
Options that you can place in your `config/*.exs` files.
* `key`: Same as the `:key` parameter in the plug option section. The plug option takes precedence, meaning you can place it in a config and then override it in specific controllers or pipelines.
* `unauthorized_handler`: Handler to be called when the user is not authorised to access the resource. Defaults to `Dictator.UnauthorizedHandlers.Default`.
"""
@behaviour Plug
@impl Plug
def init(opts), do: opts
@impl Plug
def call(conn, opts) do
if should_authorize?(conn, opts) do
authorize(conn, opts)
else
conn
end
end
defp should_authorize?(conn, opts) do
action = conn.private[:phoenix_action]
cond do
opts[:only] -> action in opts[:only]
opts[:except] -> action not in opts[:except]
true -> true
end
end
defp authorize(conn, opts) do
policy = opts[:policy] || load_policy(conn)
key = opts[:key] || default_key()
fetch_strategy = opts[:fetch_strategy] || default_fetch_strategy()
user = apply(fetch_strategy, :fetch, [conn, key])
action = conn.private[:phoenix_action]
resource =
if requires_resource_load?(policy) do
apply(policy, :load_resource, [conn.params])
else
nil
end
params = %{params: conn.params, resource: resource, opts: opts}
if apply(policy, :can?, [user, action, params]) do
conn
else
unauthorized_handler = opts[:unauthorized_handler] || unauthorized_handler()
opts = unauthorized_handler.init(opts)
unauthorized_handler.call(conn, opts)
end
end
defp load_policy(conn) do
conn
|> extract_policy_module()
|> ensure_policy_loaded!()
end
defp extract_policy_module(conn) do
conn.private.phoenix_controller
|> Atom.to_string()
|> String.split(".")
|> List.update_at(-1, &String.trim(&1, "Controller"))
|> List.insert_at(2, "Policies")
|> Enum.join(".")
|> String.to_existing_atom()
end
defp ensure_policy_loaded!(mod) do
if Code.ensure_loaded?(mod) do
mod
else
nil
end
end
defp default_key do
Dictator.Config.get(:key, :current_user)
end
defp default_fetch_strategy do
Dictator.Config.get(:fetch_strategy, Dictator.FetchStrategies.Assigns)
end
defp unauthorized_handler do
Dictator.Config.get(:unauthorized_handler, Dictator.UnauthorizedHandlers.Default)
end
defp requires_resource_load?(policy) do
policy.__info__(:attributes)
|> Keyword.get_values(:behaviour)
|> List.flatten()
|> Enum.member?(Dictator.Policies.EctoSchema)
end
end
|
lib/dictator.ex
| 0.840684 | 0.695519 |
dictator.ex
|
starcoder
|
defmodule Reactivity.DSL.Signal do
alias Reactivity.Quality.Context
alias Reactivity.DSL.DoneNotifier
alias ReactiveMiddleware.Registry
alias Observables.Obs
alias Observables.GenObservable
require Logger
@doc """
Creates a signal from a plain observable, operating under the globally set consistency guarantee.
"""
def from_plain_obs(obs) do
cg = Registry.get_guarantee
Logger.debug("The guarantee set: #{inspect cg}")
cobs = Context.new_context_obs(obs, cg)
sobs =
obs
|> Obs.zip(cobs)
{:signal, sobs}
end
@doc """
Creates a signal from a signal observable, that is: an observable with output of the format {v, c}.
This is used to create signals for guarantees with non-obvious context content that can be manually attached
using the plain observable interface.
"""
def from_signal_obs(sobs) do
{:signal, sobs}
end
@doc """
Transforms a signal into a plain observable, stripping all messages from their contexts.
"""
def to_plain_obs({:signal, sobs}=_signal) do
{vobs, _cobs} =
sobs
|> Obs.unzip
vobs
end
@doc """
Transforms a signal into a signal observable, that is: an observable with output of the format {v, c}.
Thus, the messages of the signal are fully kept, no context is stripped.
"""
def to_signal_obs({:signal, sobs}=_signal) do
sobs
end
@doc """
Returns the current value of the Signal.
"""
def evaluate({:signal, sobs}=_signal) do
case Obs.last(sobs) do
nil -> nil
{v, _c} -> v
end
end
@doc """
Applies a given procedure to a signal's value and its previous result.
Works in the same way as the Enum.scan function:
Enum.scan(1..10, fn(x,y) -> x + y end)
=> [1, 3, 6, 10, 15, 21, 28, 36, 45, 55]
"""
def scan({:signal, sobs}=_signal, func, default \\ nil) do
{vobs, cobs} =
sobs
|> Obs.unzip
svobs =
vobs
|> Obs.scan(func, default)
nobs =
svobs
|> Obs.zip(cobs)
{:signal, nobs}
end
@doc """
Delays each produced item by the given interval.
"""
def delay({:signal, sobs}=_signal, interval) do
dobs =
sobs
|> Obs.delay(interval)
{:signal, dobs}
end
@doc """
Applies a procedure to the values of a signal without changing them.
Generally used for side effects.
"""
def each({:signal, sobs}=_signal, proc) do
{vobs, _cobs} =
sobs
|> Obs.unzip
vobs
|> Obs.each(proc)
{:signal, sobs}
end
@doc """
Lifts and applies a primitive function to one or more signals
Values of the input signals are produced into output using this function
depending on the consistency guarantees of the signals
"""
def liftapp({:signal, sobs}=_signal, func) do
new_sobs = sobs
|> Obs.map(fn {v, c} -> {func.(v), c} end)
{:signal, new_sobs}
end
def liftapp(signals, func) do
# Combine the input from the list of signals
cg = Registry.get_guarantee
new_sobs =
signals
|> Enum.map(fn {:signal, sobs} -> sobs end)
|> Obs.combinelatest_n
# Filter out input that is not of sufficient quality.
|> Obs.filter(
fn ctup ->
Tuple.to_list(ctup)
|> Enum.map(fn {_v, c} -> c end)
|> Context.combine(cg)
|> Context.sufficient_quality?(cg)
end)
# Apply the function to the input to create output.
|> Obs.map(
fn ctup ->
clist = Tuple.to_list(ctup)
vals =
clist
|> Enum.map(fn {v, _c} -> v end)
cts =
clist
|> Enum.map(fn {_v, c} -> c end)
new_cxt = Context.combine(cts, cg)
new_val = apply(func, vals)
{new_val, new_cxt}
end)
{:signal, new_sobs}
end
@doc """
Gets a signal from the registry by its name.
"""
def signal(name) do
{:ok, signal} = Registry.get_signal(name)
signal
end
@doc """
Gets the names, types and hosts of all the available signals.
"""
def signals() do
{:ok, signals} = Registry.get_signals
signals
|> Enum.map(fn {name, {host, _signal}} -> {name, host} end)
end
@doc """
Gets the node where the signal with the given name is hosted.
"""
def host(name) do
{:ok, host} = Registry.get_signal_host(name)
host
end
@doc """
Publishes a signal by registering it in the registry.
"""
def register({:signal, sobs}=signal, name) do
Registry.add_signal(signal, name)
{:ok, notifier} = DoneNotifier.start(name)
{cont, pid} = sobs
GenObservable.notify_done(pid, notifier)
signal
end
@doc """
Unregisters a signal from the registry by its name.
"""
def unregister(name) do
Registry.remove_signal(name)
end
@doc """
Inspects the given signal by printing its output values `v` to the console.
"""
def print({:signal, sobs}=_signal) do
{vobs, _cobs} =
sobs
|> Obs.unzip
vobs
|> Obs.inspect
{:signal, sobs}
end
@doc """
Inspects the given signal by printing its output messages `{v, c}` to the console.
"""
def print_message({:signal, sobs}=_signal) do
sobs
|> Obs.inspect
{:signal, sobs}
end
end
|
lib/reactivity/dsl/signal.ex
| 0.67971 | 0.631836 |
signal.ex
|
starcoder
|
defmodule FIFO do
@moduledoc """
A first-in-first-out queue data structure for Elixir.
With a first-in-first-out (FIFO) queue, the first item inserted is the first
item removed. A real-life analogy is the line, or queue, at the grocery store.
The first person to get in line is the first person helped, and that order is
maintained until the line is empty.
iex> queue = FIFO.new
#FIFO<[]>
iex> queue = queue |> FIFO.push(1) |> FIFO.push(2)
#FIFO<[1, 2]>
iex> {{:value, 1}, queue} = FIFO.pop(queue)
iex> queue
#FIFO<[2]>
iex> {{:value, 2}, queue} = FIFO.pop(queue)
iex> {:empty, queue} = FIFO.pop(queue)
iex> queue
#FIFO<[]>
Under the hood, this library uses the `:queue` data structure in Erlang's
standard library: https://erlang.org/doc/man/queue.html. It wraps the
Original API with a few name changes.
The reason for this library is to provide a more Elixir idiomatic queue
implementation. For example, I renamed Erlang's `is_empty/1` to `empty?/1`.
More importantly, I reordered arguments to allow piping, so the queue is the
first argument:
iex> FIFO.new |> FIFO.push(1) |> FIFO.push(2)
#FIFO<[1, 2]>
Additionally, this data structure implements three Elixir protocols: `Inspect`,
`Enumerable`, and `Collectable`. `Inspect` allows pretty printing, as you can
see in the example above. `Enumerable` and `Collectable` are useful for
working with collections.
A limitation of this implementation is that queues cannot reliably be compared
using `==/2`. That is because of the way the Erlang library implements the
queue to amortize operations. If you need to compare two queues, you can
use `FIFO.equal?/2`.
iex> queue1 = FIFO.new(1..3)
iex> queue2 = FIFO.new |> FIFO.push(1) |> FIFO.push(2) |> FIFO.push(3)
iex> queue1 == queue2
false
iex> FIFO.equal?(queue1, queue2)
true
"""
@opaque queue :: %__MODULE__{store: :queue.queue()}
@type t :: queue
defstruct store: :queue.new()
@doc """
Returns an empty queue.
## Examples
iex> FIFO.new()
#FIFO<[]>
"""
@spec new :: t
def new do
:queue.new() |> wrap_store
end
defp wrap_store(store), do: %FIFO{store: store}
@doc """
Creates a queue from an enumerable.
## Examples
iex> FIFO.new([1, 2, 3])
#FIFO<[1, 2, 3]>
"""
@spec new(Enum.t()) :: t
def new(enumerable) do
enumerable
|> Enum.to_list()
|> from_list
end
@doc """
Creates a queue from an enumerable via the transformation function.
## Examples
iex> FIFO.new([1, 2, 3], fn n -> n * n end)
#FIFO<[1, 4, 9]>
"""
@spec new(Enum.t(), (term -> term)) :: t
def new(enumerable, transform) do
enumerable
|> Enum.map(transform)
|> from_list
end
@doc """
Creates a queue from a list.
## Examples
iex> FIFO.from_list([1, 2, 3])
#FIFO<[1, 2, 3]>
"""
@spec from_list(list) :: t
def from_list(list) when is_list(list) do
list |> :queue.from_list() |> wrap_store
end
@doc """
Compares two queues. Returns `true` if they contain the same items in the same
order, returns `false` if not.
Because of the implementation of `:queue`, you cannot reliably compare two
queues using `==/2`. Use `FIFO.equal?/2` instead.
## Examples
iex> queue1 = FIFO.new([1, 2, 3])
iex> queue2 = FIFO.new([1, 2, 3])
iex> FIFO.equal?(queue1, queue2)
true
iex> queue1 = FIFO.new([1, 2, 3])
iex> queue2 = FIFO.new([1, 2])
iex> FIFO.equal?(queue1, queue2)
false
"""
@spec equal?(t, t) :: boolean
def equal?(%FIFO{} = queue1, %FIFO{} = queue2) do
to_list(queue1) == to_list(queue2)
end
@doc """
Filters a queue.
## Examples
iex> queue = FIFO.from_list([1,2,3,4])
iex> FIFO.filter(queue, fn item -> rem(item, 2) != 0 end)
#FIFO<[1, 3]>
"""
@spec filter(t, (term -> boolean)) :: t
def filter(%FIFO{store: store}, func) do
store |> do_filter(func) |> wrap_store
end
defp do_filter(store, func) do
:queue.filter(func, store)
end
@doc """
Returns a list of items in a queue.
## Examples
iex> queue = FIFO.from_list([1, 2, 3, 4])
iex> FIFO.to_list(queue)
[1, 2, 3, 4]
"""
@spec to_list(t) :: list
def to_list(%FIFO{store: store}), do: :queue.to_list(store)
@doc """
Enqueues an item at the end of the queue.
## Examples
iex> queue = FIFO.from_list([1, 2])
iex> FIFO.push(queue, 3)
#FIFO<[1, 2, 3]>
"""
@spec push(t, term) :: t
def push(%FIFO{store: store}, item) do
:queue.in(item, store) |> wrap_store
end
@doc """
Enqueues an item at the front of the queue.
## Examples
iex> queue = FIFO.from_list([1, 2])
iex> FIFO.push_r(queue, 3)
#FIFO<[3, 1, 2]>
"""
@spec push_r(t, term) :: t
def push_r(%FIFO{store: store}, item) do
:queue.in_r(item, store) |> wrap_store
end
@doc """
Returns `true` if the queue has no items. Returns `false` if the queue has items.
## Examples
iex> queue = FIFO.new
iex> FIFO.empty?(queue)
true
iex> queue = FIFO.from_list([1])
iex> FIFO.empty?(queue)
false
"""
@spec empty?(t) :: boolean
def empty?(%FIFO{store: store}), do: :queue.is_empty(store)
@doc """
Returns `true` if the given value is a queue. Returns `false` if not.
## Examples
iex> FIFO.queue?(FIFO.new)
true
iex> FIFO.queue?([])
false
"""
@spec queue?(t) :: boolean
def queue?(%FIFO{store: store}), do: :queue.is_queue(store)
def queue?(_), do: false
@doc """
Returns a new queue which is a combination of `queue1` and `queue2`. `queue1`
is in front of `queue2`.
## Examples
iex> queue1 = FIFO.from_list([1, 2])
iex> queue2 = FIFO.from_list([3, 4])
iex> FIFO.join(queue1, queue2)
#FIFO<[1, 2, 3, 4]>
"""
@spec join(t, t) :: t
def join(%FIFO{store: store1}, %FIFO{store: store2}) do
:queue.join(store1, store2) |> wrap_store
end
@doc """
Returns the length of the queue.
## Examples
iex> queue = FIFO.new
iex> FIFO.length(queue)
0
iex> queue = FIFO.from_list([1, 2, 3])
iex> FIFO.length(queue)
3
"""
@spec length(t) :: non_neg_integer
def length(%FIFO{store: store}), do: :queue.len(store)
@doc """
Returns `true` if `item` matches a value in queue. Returns `false` if not.
## Examples
iex> queue = FIFO.from_list([1, 2, 3])
iex> FIFO.member?(queue, 2)
true
iex> queue = FIFO.from_list([1, 2, 3])
iex> FIFO.member?(queue, 7)
false
"""
@spec member?(t, term) :: boolean
def member?(%FIFO{store: store}, item), do: :queue.member(item, store)
@type tagged_value(term) :: {:value, term}
@type value_out :: {tagged_value(term), t}
@type empty_out :: {:empty, t}
@doc """
Removes item from the front of the queue.
## Examples
iex> queue = FIFO.from_list([1, 2])
iex> {{:value, 1}, queue} = FIFO.pop(queue)
iex> queue
#FIFO<[2]>
iex> queue = FIFO.new
iex> {:empty, queue} = FIFO.pop(queue)
iex> queue
#FIFO<[]>
"""
@spec pop(t) :: value_out | empty_out
def pop(%FIFO{store: store}) do
store |> :queue.out() |> handle_pop
end
defp handle_pop({{:value, item}, updated_store}) do
{{:value, item}, wrap_store(updated_store)}
end
defp handle_pop({:empty, updated_store}) do
{:empty, wrap_store(updated_store)}
end
@doc """
Returns an item from the end of the queue.
## Examples
iex> queue = FIFO.from_list([1, 2, 3])
iex> {{:value, 3}, queue} = FIFO.pop_r(queue)
iex> queue
#FIFO<[1, 2]>
iex> queue = FIFO.new
iex> {:empty, queue} = FIFO.pop_r(queue)
iex> queue
#FIFO<[]>
"""
@spec pop_r(t) :: value_out | empty_out
def pop_r(%FIFO{store: store}) do
store |> :queue.out_r() |> handle_pop
end
@doc """
Reverses a queue.
## Examples
iex> queue = FIFO.from_list([1, 2, 3])
iex> FIFO.reverse(queue)
#FIFO<[3, 2, 1]>
"""
@spec reverse(t) :: t
def reverse(%FIFO{store: store}) do
store |> :queue.reverse() |> wrap_store
end
@doc """
Splits a queue into two queues, starting from the given position `n`.
## Examples
iex> queue = FIFO.from_list([1, 2, 3])
iex> {queue2, queue3} = FIFO.split(queue, 1)
iex> queue2
#FIFO<[1]>
iex> queue3
#FIFO<[2, 3]>
"""
@spec split(t, integer) :: {t, t}
def split(%FIFO{store: store}, n) when n >= 0 do
{store2, store3} = :queue.split(n, store)
{wrap_store(store2), wrap_store(store3)}
end
defimpl Enumerable do
def count(queue) do
{:ok, FIFO.length(queue)}
end
def member?(queue, val) do
{:ok, FIFO.member?(queue, val)}
end
def slice(queue) do
length = FIFO.length(queue)
{:ok, length, &Enumerable.List.slice(FIFO.to_list(queue), &1, &2, length)}
end
def reduce(queue, acc, fun) do
Enumerable.List.reduce(FIFO.to_list(queue), acc, fun)
end
end
defimpl Collectable do
def into(queue) do
fun = fn
list, {:cont, x} -> [x | list]
list, :done -> FIFO.join(queue, FIFO.from_list(Enum.reverse(list)))
_, :halt -> :ok
end
{[], fun}
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(queue, opts) do
opts = %Inspect.Opts{opts | charlists: :as_lists}
concat(["#FIFO<", Inspect.List.inspect(FIFO.to_list(queue), opts), ">"])
end
end
end
|
lib/fifo.ex
| 0.860574 | 0.533701 |
fifo.ex
|
starcoder
|
defmodule Gpio.Mock do
use GenServer
@behaviour Gpio
defmodule State do
@moduledoc false
defstruct pin: nil,
direction: nil,
callbacks: [],
value: 0
end
# Public API
def start_link(pin, pin_direction, opts \\ []) do
opts = Keyword.put(opts, :name, Gpio.server_ref(pin))
GenServer.start_link(__MODULE__, [pin, pin_direction], opts)
end
def release(pin) do
GenServer.cast(Gpio.server_ref(pin), :release)
end
def write(pin, value) when is_integer(value) do
GenServer.call(Gpio.server_ref(pin), {:write, value})
end
def read(pin) do
GenServer.call(Gpio.server_ref(pin), :read)
end
def set_int(pin, direction) do
GenServer.call(Gpio.server_ref(pin), {:set_int, direction, self()})
end
# GenServer callbacks
def init([pin, pin_direction]) do
state = %State{pin: pin, direction: pin_direction}
{:ok, state}
end
def handle_call(:read, _from, %State{value: value} = state) do
{:reply, {:ok, value}, state}
end
def handle_call({:write, value}, _from, %State{direction: :output} = state) do
trigger_interrupt(value, state)
state = %State{state | value: value}
{:reply, :ok, state}
end
def handle_call({:write, value}, _from, %State{direction: :input} = state) do
state = %State{state | value: value}
{:reply, :ok, state}
end
def handle_call({:set_int, direction, requestor}, _from, %State{direction: :input} = state) do
true = Gpio.pin_interrupt_condition?(direction)
new_callbacks = Gpio.insert_unique(state.callbacks, requestor)
state = %State{state | callbacks: new_callbacks}
{:reply, :ok, state}
end
def handle_cast(:release, state) do
{:stop, :normal, state}
end
def handle_info(_msg, state) do
{:noreply, state}
end
defp trigger_interrupt(value, %State{value: value}) do
:ok
end
defp trigger_interrupt(new_value, state) do
condition = case new_value do
0 -> :falling
1 -> :rising
end
msg = {:gpio_interrupt, state.pin, condition}
Enum.each(state.callbacks, &(send(&1, msg)))
end
end
|
lib/gpio/mock.ex
| 0.602997 | 0.494751 |
mock.ex
|
starcoder
|
defmodule Versioning.Schema do
@moduledoc """
Defines a versioning schema.
A versioning schema is used to change data through a series of steps from a
"current" version to a "target" version. This is useful in maintaining backwards
compatability with older versions of API's without enormous complication.
## Example
defmodule MyApp.Versioning do
use Versioning.Schema, adapter: Versioning.Adapter.Semantic
version("2.0.0", do: [])
version "1.1.0" do
type "User" do
change(MyApp.Changes.SomeUserChange)
end
end
version "1.0.1" do
type "Post" do
change(MyApp.Changes.SomePostChange))
end
type "All!" do
change(MyApp.Changes.SomeAllChange)
end
end
version("1.0.0", do: [])
end
When creating a schema, an adapter must be specified. The adapter determines how
versions are parsed and compared. For more information on adapters, please see
`Versioning.Adapter`.
In the example above, we have 4 versions. Our current version is represented by
the top version - `"2.0.0"`. Our oldest version is at the bottom - `"1.0.0"`.
We define a version with the `version/2` macro. Within a version, we specify types
that have been manipulated. We define a type with the `type/2` macro. Within
a type, we specify changes that have occured. We define a change with the `change/2`
macro.
## Running Schemas
Lets say we have a `%Post{}` struct that we would like to run through our schema.
post = %Post{status: :enabled}
Versioning.new(post, "2.0.0", "1.0.0")
We have created a new versioning of our post struct. The versioning sets the
data, current version, target version, as well as type. We can now run our
versioning through our schema.
{:ok, versioning} = MyApp.Versioning.run(versioning)
With the above, our versioning struct will first be run through our MyApp.Changes.SomePostChange
change module as the type matches our versioning type. It will then be run through
our MyApp.Changes.SomeAllChange as it also matches on the `"All!` type (more detail
available at the `change/2` macro).
With the above, we are transforming our data "down" through our schema. But we
can also transform it "up".
post = %{"status" => "some_status"}
Versioning.new(post, "1.0.0", "2.0.0", "Post")
If we were to run our new versioning through the schema, the same change modules
would be run, but in reverse order.
## Change Modules
At the heart of versioning schemas are change modules. You can find more information
about creating change modules at the `Versioning.Change` documentation.
## Schema attributes
Supported attributes for configuring the defined schema. They must be
set after the `use Versioning.Schema` call.
These attributes are:
* `@latest` - configures the schema latest version. By default, this will
be the version at the top of your schema. But if you do not wish to have
this behaviour, you can set it here.
## Reflection
Any schema module will generate the `__schema__` function that can be
used for runtime introspection of the schema:
* `__schema__(:down)` - Returns the data structure representing a downward versioning.
* `__schema__(:up)` - Returns the data structure representing an upward versioning.
* `__schema__(:adapter)` - Returns the versioning adapter used by the schema.
* `__schema__(:latest, :string)` - Returns the latest version in string format.
* `__schema__(:latest, :parsed)` - Returns the latest version in parsed format.
"""
@type t :: module()
@type direction :: :up | :down
@type change :: {atom(), list()}
@type type :: {binary(), [change()]}
@type version :: {binary(), [type()]}
@type schema :: [version()]
@type result :: Versioning.t() | [Versioning.t()] | no_return()
defmacro __using__(opts) do
adapter = Keyword.get(opts, :adapter)
unless adapter do
raise ArgumentError, "missing :adapter option on use Versioning.Schema"
end
quote do
@adapter unquote(adapter)
@latest nil
def run(versioning_or_versionings) do
Versioning.Schema.Executer.run(__MODULE__, versioning_or_versionings)
end
import Versioning.Schema, only: [version: 2, type: 2, change: 1, change: 2]
Module.register_attribute(__MODULE__, :_schema, accumulate: true)
@before_compile Versioning.Schema
end
end
@doc """
Defines a version in the schema.
A version must be in string format, and must adhere to requirements of the
Elixir `Version` module. This means SemVer 2.0.
A version can only be represented once within a schema. The most recent version
should be at the top of your schema, and the oldest at the bottom.
Any issue with the above will raise a `Versioning.CompileError` during schema
compilation.
## Example
version "1.0.1" do
end
version("1.0.0", do: [])
"""
defmacro version(version, do: block) do
quote do
@_schema {:version, unquote(version)}
unquote(block)
end
end
@doc """
Defines a type within a version.
A type can only be represented once within a version, and must be a string. Any
issue with this will raise a `Versioning.CompileError` during compilation.
Typically, it should be represented in `"CamelCase"` format.
Any changes within a type that matches the type on a `Versioning` struct will
be run. There is also the special case `"All!"` type, which lets you define
changes that will be run against all versionings - regardless of type.
## Example
version "1.0.1" do
type "All!" do
end
type "Foo" do
end
end
"""
defmacro type(object, do: block) do
quote do
@_schema {:type, unquote(object)}
unquote(block)
end
end
@doc """
Defines a change within a type.
A change must be represented by a module that implements the `Versioning.Change`
behaviour. You can also set options that will be passed along to the change module.
Changes are run in the order they are placed, based on the direction of the
version change. For instance, if a schema was being run "down" for the example below,
MyChangeModule would be run first, followed by MyOtherChangeModule. This would
be reversed if running "up" a schema.
## Example
version "1.0.1" do
type "Foo" do
change(MyChangeModule)
change(MyOtherChangeModule, [foo: :bar])
end
end
"""
defmacro change(change, init \\ []) do
quote do
@_schema {:change, unquote(change), unquote(init)}
end
end
defmacro __before_compile__(env) do
{schema_down, schema_up, latest} = Versioning.Schema.Compiler.build(env)
schema_down = Macro.escape(schema_down)
schema_up = Macro.escape(schema_up)
latest = Macro.escape(latest)
quote do
def __schema__(:down) do
unquote(schema_down)
end
def __schema__(:up) do
unquote(schema_up)
end
def __schema__(:latest, :parsed) do
unquote(latest)
end
def __schema__(:latest, :string) do
to_string(unquote(latest))
end
def __schema__(:adapter) do
@adapter
end
end
end
end
|
lib/versioning/schema.ex
| 0.890663 | 0.611498 |
schema.ex
|
starcoder
|
defmodule Gringotts.Gateways.Paymill do
@moduledoc """
An Api Client for the [PAYMILL](https://www.paymill.com/) gateway.
For refernce see [PAYMILL's API (v2.1) documentation](https://developers.paymill.com/API/index)
The following features of PAYMILL are implemented:
| Action | Method |
| ------ | ------ |
| Authorize | `authorize/3` |
| Capture | `capture/3` |
| Purchase | `purchase/3` |
| Void | `void/2` |
Following fields are required for config
| Config Parameter | PAYMILL secret |
| private_key | **your_private_key** |
| public_key | **your_public_key** |
Your application config must include 'private_key', 'public_key'
config :gringotts, Gringotts.Gateways.Paymill,
private_key: "your_privat_key",
public_key: "your_public_key"
"""
use Gringotts.Gateways.Base
alias Gringotts.{CreditCard, Address, Response}
alias Gringotts.Gateways.Paymill.ResponseHandler, as: ResponseParser
use Gringotts.Adapter, required_config: [:private_key, :public_key]
@home_page "https://paymill.com"
@money_format :cents
@default_currency "EUR"
@live_url "https://api.paymill.com/v2.1/"
@headers [{"Content-Type", "application/x-www-form-urlencoded"}]
@doc """
Authorize a card with particular amount and return a token in response
### Example
amount = 100
card = %CreditCard{
first_name: "Sagar",
last_name: "Karwande",
number: "4111111111111111",
month: 12,
year: 2018,
verification_code: 123
}
options = []
iex> Gringotts.authorize(Gringotts.Gateways.Paymill, amount, card, options)
"""
@spec authorize(number, String.t | CreditCard.t, Keyword) :: {:ok | :error, Response}
def authorize(amount, card_or_token, options) do
Keyword.put(options, :money, amount)
action_with_token(:authorize, amount, card_or_token, options)
end
@doc """
Purchase with a card
### Example
amount = 100
card = %CreditCard{
first_name: "Sagar",
last_name: "Karwande",
number: "4111111111111111",
month: 12,
year: 2018,
verification_code: 123
}
options = []
iex> Gringotts.purchase(Gringotts.Gateways.Paymill, amount, card, options)
"""
@spec purchase(number, CreditCard.t, Keyword) :: {:ok | :error, Response}
def purchase(amount, card, options) do
Keyword.put(options, :money, amount)
action_with_token(:purchase, amount, card, options)
end
@doc """
Capture a particular amount with authorization token
### Example
amount = 100
token = "preauth_<PASSWORD>"
options = []
iex> Gringotts.capture(Gringotts.Gateways.Paymill, token, amount, options)
"""
@spec capture(String.t, number, Keyword) :: {:ok | :error, Response}
def capture(authorization, amount, options) do
post = add_amount([], amount, options) ++ [{"preauthorization", authorization}]
commit(:post, "transactions", post, options)
end
@doc """
Voids a particular authorized amount
### Example
token = "preauth_<PASSWORD>"
options = []
iex> Gringotts.void(Gringotts.Gateways.Paymill, token, options)
"""
@spec void(String.t, Keyword) :: {:ok | :error, Response}
def void(authorization, options) do
commit(:delete, "preauthorizations/#{authorization}", [], options)
end
@doc false
@spec authorize_with_token(number, String.t, Keyword) :: term
def authorize_with_token(money, card_token, options) do
post = add_amount([], money, options) ++ [{"token", card_token}]
commit(:post, "preauthorizations", post, options)
end
@doc false
@spec purchase_with_token(number, String.t, Keyword) :: term
def purchase_with_token(money, card_token, options) do
post = add_amount([], money, options) ++ [{"token", card_token}]
commit(:post, "transactions", post, options)
end
@spec save_card(CreditCard.t, Keyword) :: Response
defp save_card(card, options) do
{:ok, %HTTPoison.Response{body: response}} = HTTPoison.get(
get_save_card_url(),
get_headers(options),
params: get_save_card_params(card, options))
parse_card_response(response)
end
@spec save(CreditCard.t, Keyword) :: Response
defp save(card, options) do
save_card(card, options)
end
defp action_with_token(action, amount, "tok_" <> id = card_token, options) do
apply(__MODULE__, String.to_atom("#{action}_with_token"), [amount, card_token , options])
end
defp action_with_token(action, amount, %CreditCard{} = card, options) do
{:ok, response} = save_card(card, options)
card_token = get_token(response)
apply(__MODULE__, String.to_atom("#{action}_with_token"), [amount, card_token , options])
end
defp get_save_card_params(card, options) do
[
{"transaction.mode" , "CONNECTOR_TEST"},
{"channel.id" , get_config(:public_key, options)},
{"jsonPFunction" , "jsonPFunction"},
{"account.number" , card.number},
{"account.expiry.month" , card.month},
{"account.expiry.year" , card.year},
{"account.verification" , card.verification_code},
{"account.holder" , "#{card.first_name} #{card.last_name}"},
{"presentation.amount3D" , get_amount(options)},
{"presentation.currency3D" , get_currency(options)}
]
end
defp get_headers(options) do
@headers ++ set_username(options)
end
defp add_amount(post, money, options) do
post ++ [{"amount", money}, {"currency", @default_currency}]
end
defp set_username(options) do
[{"Authorization", "Basic #{Base.encode64(get_config(:private_key, options))}"}]
end
defp get_save_card_url(), do: "https://test-token.paymill.com/"
defp parse_card_response(response) do
response
|> String.replace(~r/jsonPFunction\(/, "")
|> String.replace(~r/\)/, "")
|> Poison.decode
end
defp get_currency(options), do: options[:currency] || @default_currency
defp get_amount(options), do: options[:money]
defp get_token(response) do
get_in(response, ["transaction", "identification", "uniqueId"])
end
defp commit(method, action, parameters \\ nil, options) do
method
|> HTTPoison.request(@live_url <> action, {:form, parameters}, get_headers(options), [])
|> ResponseParser.parse
end
defp get_config(key, options) do
get_in(options, [:config, key])
end
@moduledoc false
defmodule ResponseHandler do
alias Gringotts.Response
@response_code %{
10_001 => "Undefined response",
10_002 => "Waiting for something",
11_000 => "Retry request at a later time",
20_000 => "Operation successful",
20_100 => "Funds held by acquirer",
20_101 => "Funds held by acquirer because merchant is new",
20_200 => "Transaction reversed",
20_201 => "Reversed due to chargeback",
20_202 => "Reversed due to money-back guarantee",
20_203 => "Reversed due to complaint by buyer",
20_204 => "Payment has been refunded",
20_300 => "Reversal has been canceled",
22_000 => "Initiation of transaction successful",
30_000 => "Transaction still in progress",
30_100 => "Transaction has been accepted",
31_000 => "Transaction pending",
31_100 => "Pending due to address",
31_101 => "Pending due to uncleared eCheck",
31_102 => "Pending due to risk review",
31_103 => "Pending due regulatory review",
31_104 => "Pending due to unregistered/unconfirmed receiver",
31_200 => "Pending due to unverified account",
31_201 => "Pending due to non-captured funds",
31_202 => "Pending due to international account (accept manually)",
31_203 => "Pending due to currency conflict (accept manually)",
31_204 => "Pending due to fraud filters (accept manually)",
40_000 => "Problem with transaction data",
40_001 => "Problem with payment data",
40_002 => "Invalid checksum",
40_100 => "Problem with credit card data",
40_101 => "Problem with CVV",
40_102 => "Card expired or not yet valid",
40_103 => "Card limit exceeded",
40_104 => "Card is not valid",
40_105 => "Expiry date not valid",
40_106 => "Credit card brand required",
40_200 => "Problem with bank account data",
40_201 => "Bank account data combination mismatch",
40_202 => "User authentication failed",
40_300 => "Problem with 3-D Secure data",
40_301 => "Currency/amount mismatch",
40_400 => "Problem with input data",
40_401 => "Amount too low or zero",
40_402 => "Usage field too long",
40_403 => "Currency not allowed",
40_410 => "Problem with shopping cart data",
40_420 => "Problem with address data",
40_500 => "Permission error with acquirer API",
40_510 => "Rate limit reached for acquirer API",
42_000 => "Initiation of transaction failed",
42_410 => "Initiation of transaction expired",
50_000 => "Problem with back end",
50_001 => "Country blacklisted",
50_002 => "IP address blacklisted",
50_004 => "Live mode not allowed",
50_005 => "Insufficient permissions (API key)",
50_100 => "Technical error with credit card",
50_101 => "Error limit exceeded",
50_102 => "Card declined",
50_103 => "Manipulation or stolen card",
50_104 => "Card restricted",
50_105 => "Invalid configuration data",
50_200 => "Technical error with bank account",
50_201 => "Account blacklisted",
50_300 => "Technical error with 3-D Secure",
50_400 => "Declined because of risk issues",
50_401 => "Checksum was wrong",
50_402 => "Bank account number was invalid (formal check)",
50_403 => "Technical error with risk check",
50_404 => "Unknown error with risk check",
50_405 => "Unknown bank code",
50_406 => "Open chargeback",
50_407 => "Historical chargeback",
50_408 => "Institution / public bank account (NCA)",
50_409 => "KUNO/Fraud",
50_410 => "Personal Account Protection (PAP)",
50_420 => "Rejected due to acquirer fraud settings",
50_430 => "Rejected due to acquirer risk settings",
50_440 => "Failed due to restrictions with acquirer account",
50_450 => "Failed due to restrictions with user account",
50_500 => "General timeout",
50_501 => "Timeout on side of the acquirer",
50_502 => "Risk management transaction timeout",
50_600 => "Duplicate operation",
50_700 => "Cancelled by user",
50_710 => "Failed due to funding source",
50_711 => "Payment method not usable, use other payment method",
50_712 => "Limit of funding source was exceeded",
50_713 => "Means of payment not reusable (canceled by user)",
50_714 => "Means of payment not reusable (expired)",
50_720 => "Rejected by acquirer",
50_730 => "Transaction denied by merchant",
50_800 => "Preauthorisation failed",
50_810 => "Authorisation has been voided",
50_820 => "Authorisation period expired"
}
def parse({:ok, %HTTPoison.Response{body: body, status_code: 200}}) do
body = Poison.decode!(body)
parse_body(body)
end
def parse({:ok, %HTTPoison.Response{body: body, status_code: 400}}) do
body = Poison.decode!(body)
[]
|> set_params(body)
end
def parse({:ok, %HTTPoison.Response{body: body, status_code: 404}}) do
body = Poison.decode!(body)
[]
|> set_success(body)
|> set_params(body)
|> handle_opts()
end
defp set_success(opts, %{"error" => error}) do
opts ++ [message: error, success: false]
end
defp set_success(opts, %{"transaction" => %{"response_code" => 20_000}}) do
opts ++ [success: true]
end
defp parse_body(%{"data" => data}) do
[]
|> set_success(data)
|> parse_authorization(data)
|> parse_status_code(data)
|> set_params(data)
|> handle_opts()
end
defp handle_opts(opts) do
case Keyword.fetch(opts, :success) do
{:ok, true} -> {:ok, Response.success(opts)}
{:ok, false} -> {:error, Response.error(opts)}
end
end
#Status code
defp parse_status_code(opts, %{"status" => "failed"} = body) do
response_code = get_in(body, ["transaction", "response_code"])
response_msg = Map.get(@response_code, response_code, -1)
opts ++ [message: response_msg]
end
defp parse_status_code(opts, %{"transaction" => transaction}) do
response_code = Map.get(transaction, "response_code", -1)
response_msg = Map.get(@response_code, response_code, -1)
opts ++ [status_code: response_code, message: response_msg]
end
defp parse_status_code(opts, %{"response_code" => code}) do
response_msg = Map.get(@response_code, code, -1)
opts ++ [status_code: code, message: response_msg]
end
#Authorization
defp parse_authorization(opts, %{"status" => "failed"}) do
opts ++ [success: false]
end
defp parse_authorization(opts, %{"id" => id} = auth) do
opts ++ [authorization: id]
end
defp set_params(opts, body), do: opts ++ [params: body]
end
end
|
lib/gringotts/gateways/paymill.ex
| 0.863492 | 0.558628 |
paymill.ex
|
starcoder
|
defmodule Jalaali do
@moduledoc """
Jalaali module helps converting gregorian dates to jalaali dates.
Jalaali calendar is widely used in Persia and Afganistan.
This module helps you with converting erlang and/or elixir DateTime formats to Jalaali date (and vice versa) and checking for leap years
"""
@days_offset 1_721_060
@breaks [
-61,
9,
38,
199,
426,
686,
756,
818,
1111,
1181,
1210,
1635,
2060,
2097,
2192,
2262,
2324,
2394,
2456,
3178
]
@doc """
Converts erlang or elixir date or dateTime from Gregorian to Jalaali format
## Parameters
- arg1: Date to convert in erlang format (a tuple with three elements)
- arg1: erlang dateTime to convert in erlang format (a tuple with two difftent tuples each with 3 elements)
- ex_dt: Date or DateTime to convert
## Exmaples
iex> Jalaali.to_jalaali {2016, 12, 17}
{1395, 9, 27}
iex> Jalaali.to_jalaali {{2016, 12, 17}, {11, 11, 11}}
{{1395, 9, 27}, {11, 11, 11}}
iex> Jalaali.to_jalaali ~D[2016-12-17]
~D[1395-09-27]
"""
@spec to_jalaali(tuple() | DateTime.t() | Date.t()) :: tuple() | DateTime.t() | Date.t()
def to_jalaali({gy, gm, gd}) do
d2j(g2d({gy, gm, gd}))
end
def to_jalaali({date, time}) do
{to_jalaali(date), time}
end
def to_jalaali(ex_dt) do
{jy, jm, jd} = to_jalaali({ex_dt.year, ex_dt.month, ex_dt.day})
%{ex_dt | year: jy, month: jm, day: jd}
end
@doc """
Converts erlang or elixir date or dateTime from Jalaali to Gregorian format
## Parameters
- arg1: Date to convert in erlang format (a tuple with three elements)
- arg1: Date to convert in erlang format (a tuple with three elements)
- ex_dt: Date or DateTime to convert
## Exmaples
iex> Jalaali.to_gregorian {1395, 9, 27}
{2016, 12, 17}
iex> Jalaali.to_jalaali {{2016, 12, 17}, {11, 11, 11}}
{{1395, 9, 27}, {11, 11, 11}}
iex> Jalaali.to_gregorian ~D[1395-09-27]
~D[2016-12-17]
"""
@spec to_gregorian(tuple() | DateTime.t() | Date.t()) :: tuple() | DateTime.t() | Date.t()
def to_gregorian({jy, jm, jd}) do
d2g(j2d({jy, jm, jd}))
end
def to_gregorian({date, time}) do
{to_gregorian(date), time}
end
def to_gregorian(ex_dt) do
{gy, gm, gd} = to_gregorian({ex_dt.year, ex_dt.month, ex_dt.day})
%{ex_dt | year: gy, month: gm, day: gd}
end
@doc """
Checks whether a Jalaali date is valid or not.
## Parameters
- arg1: is a tuple in shape of {jalaali_year, jalaali_month, jalaali_day}
## Examples
iex> Jalaali.is_valid_jalaali_date {1395, 9, 27}
true
iex> Jalaali.is_valid_jalaali_date {1395, 91, 27}
false
"""
@spec is_valid_jalaali_date(tuple()) :: boolean()
def is_valid_jalaali_date?({jy, jm, jd}) do
year_is_valid = jy <= 3177 && -61 <= jy
month_is_valid = 1 <= jm && jm <= 12
day_is_valid = 1 <= jd && jd <= Jalaali.jalaali_month_length(jy, jm)
year_is_valid && month_is_valid && day_is_valid
end
@doc """
This function is same as `is_valid_jalaali_date?` and is only here
because i forgot to add question mark in `jalaali <= 0.1.1`
Please use `is_valid_jalaali_date?` instead.
"""
def is_valid_jalaali_date(jdate) do
is_valid_jalaali_date?(jdate)
end
@doc """
Checks if a Jalaali year is leap
## Parameters
- jy: Jalaali Year (-61 to 3177)
## Examples
iex> Jalaali.is_leap_jalaali_year(1395)
true
iex> Jalaali.is_leap_jalaali_year(1396)
false
iex> Jalaali.is_leap_jalaali_year(1394)
false
"""
@spec is_leap_jalaali_year(integer()) :: boolean()
def is_leap_jalaali_year(jy) do
jal_cal(jy).leap == 0
end
@doc """
Number of days in a given month in a Jalaali year.
## Examples
iex> Jalaali.jalaali_month_length(1395, 11)
30
iex> Jalaali.jalaali_month_length(1395, 6)
31
iex> Jalaali.jalaali_month_length(1394, 12)
29
iex> Jalaali.jalaali_month_length(1395, 12)
30
"""
@spec jalaali_month_length(integer(), integer()) :: integer()
def jalaali_month_length(jy, jm) do
cond do
jm <= 6 -> 31
jm <= 11 -> 30
is_leap_jalaali_year(jy) -> 30
true -> 29
end
end
@doc """
Converts jalaali date to days number
"""
@spec jalaali_to_days(integer(), integer(), integer()) :: integer()
def jalaali_to_days(jy, jm, jd) do
j2d({jy, jm, jd}) - @days_offset
end
@doc """
Converts days number to jalaali date
"""
@spec days_to_jalaali(integer()) :: {integer(), integer(), integer()}
def days_to_jalaali(days) do
d2j(days + @days_offset)
end
@doc """
This function determines if the Jalaali (persian) year is leap(366-day long) or is the common year (365-days),
and finds the day in March (Gregorian calendar) of the first day of the Jalaali year (jy).
## Parameters
- jy: Jalaali Year (-61 to 3177)
"""
@spec jal_cal(integer()) :: map()
def jal_cal(jy) do
gy = jy + 621
if jy < -61 or jy >= 3178 do
raise "Invalid Jalaali year #{jy}"
end
{jump, jp, leap_j} = calc_jlimit(jy, {Enum.at(@breaks, 0), -14}, 1)
n = jy - jp
leap_j1 =
if mod(jump, 33) == 4 && jump - n == 4 do
leap_j + div(n, 33) * 8 + div(mod(n, 33) + 3, 4) + 1
else
leap_j + div(n, 33) * 8 + div(mod(n, 33) + 3, 4)
end
leap_g = div(gy, 4) - div((div(gy, 100) + 1) * 3, 4) - 150
march = 20 + leap_j1 - leap_g
n =
if jump - n < 6 do
n - jump + div(jump + 4, 33) * 33
else
jy - jp
end
leap_c = mod(mod(n + 1, 33) - 1, 4)
leap =
case leap_c do
-1 -> 4
_ -> leap_c
end
%{leap: leap, gy: gy, march: march}
end
@spec calc_jlimit(integer(), {integer(), integer()}, integer()) ::
{integer(), integer(), integer()}
defp calc_jlimit(jy, {jp, leap_j}, index) do
jm = Enum.at(@breaks, index)
jump = jm - jp
if jy < jm do
{jump, jp, leap_j}
else
calc_jlimit(jy, {jm, leap_j + div(jump, 33) * 8 + div(mod(jump, 33), 4)}, index + 1)
end
end
@spec j2d({integer(), integer(), integer()}) :: integer()
defp j2d({jy, jm, jd}) do
r = jal_cal(jy)
g2d({r.gy, 3, r.march}) + (jm - 1) * 31 - div(jm, 7) * (jm - 7) + jd - 1
end
@spec d2j(integer()) :: {integer(), integer(), integer()}
defp d2j(jdn) do
# calculate gregorian year (gy)
gy = elem(d2g(jdn), 0)
jy = gy - 621
r = jal_cal(jy)
jdn1f = g2d({gy, 3, r.march})
# find number of days that passed since 1 farvardin
k = jdn - jdn1f
cond do
k <= 185 && k >= 0 ->
{jy, div(k, 31) + 1, mod(k, 31) + 1}
k >= 0 ->
k = k - 186
jm = 7 + div(k, 30)
jd = mod(k, 30) + 1
{jy, jm, jd}
r.leap == 1 ->
jy = jy - 1
k = k + 180
jm = 7 + div(k, 30)
jd = mod(k, 30) + 1
{jy, jm, jd}
true ->
jy = jy - 1
k = k + 179
jm = 7 + div(k, 30)
jd = mod(k, 30) + 1
{jy, jm, jd}
end
end
@spec g2d({integer(), integer(), integer()}) :: integer()
defp g2d({gy, gm, gd}) do
d =
div((gy + div(gm - 8, 6) + 100_100) * 1461, 4) + div(153 * mod(gm + 9, 12) + 2, 5) + gd -
34_840_408
d - div(div(gy + 100_100 + div(gm - 8, 6), 100) * 3, 4) + 752
end
@spec d2g(integer()) :: {integer(), integer(), integer()}
defp d2g(jdn) do
j = 4 * jdn + 139_361_631 + div(div(4 * jdn + 183_187_720, 146_097) * 3, 4) * 4 - 3908
i = div(mod(j, 1461), 4) * 5 + 308
gd = div(mod(i, 153), 5) + 1
gm = mod(div(i, 153), 12) + 1
gy = div(j, 1461) - 100_100 + div(8 - gm, 6)
{gy, gm, gd}
end
@spec mod(integer(), integer()) :: integer()
defp mod(a, b) do
a - div(a, b) * b
end
end
|
lib/jalaali.ex
| 0.873242 | 0.417687 |
jalaali.ex
|
starcoder
|
defmodule ExDiceRoller.Compilers.Variable do
@moduledoc """
Handles compiling expressions that use variables.
Variables can be used to replace single letter characters in an expression
with a value, such as a number or an anonymous function that accepts list
arguments (`args` and `opts`, respectively).
Acceptable variable values include:
* integers
* floats
* compiled functions matching `t:Compiler.compiled_fun/2`
* strings that can be parsed by ExDiceRoller
* lists composed of any of the above
* lists of lists
Note that an error will be raised if values are not supplied for all varaibles
in an expression.
### Examples
iex> import ExDiceRoller.Sigil
ExDiceRoller.Sigil
iex> ExDiceRoller.roll(~a/1+x/, [x: 5])
6
iex> ExDiceRoller.roll("xdy+z", x: 5, y: 10, z: 50)
82
iex> ExDiceRoller.roll("xdy+z", [x: 5, y: 10, z: ~a/15d100/])
739
iex> ExDiceRoller.roll("xdy+z", x: [1, 2, 3], y: 1, z: 5, opts: [:keep])
[6, 6, 6, 6, 6, 6]
iex> ExDiceRoller.roll("xdy+z", [x: 1, y: [1, 10, 100], z: -6, opts: [:keep]])
[-5, -4, 66]
iex> ExDiceRoller.roll("xdy+z", x: [~a/1d2/, "1d4+1"], y: ["3,4d20/2", ~a/1d6/], z: 2, opts: [:keep])
[8, 8, 3, 3, 3, 10, 4, 7, 6, 3, 5, 4, 4, 4, 3]
iex> ExDiceRoller.roll("1+x")
** (ArgumentError) no variable 'x' was found in the arguments
"""
@behaviour ExDiceRoller.Compiler
alias ExDiceRoller.{Args, Compiler, Tokenizer, Parser}
@impl true
def compile({:var, _} = var), do: compile_var(var)
@spec compile_var({:var, charlist}) :: Compiler.compiled_fun()
defp compile_var({:var, var}), do: fn args -> var_final(var, args) end
@spec var_final(charlist, Keyword.t()) :: number
defp var_final(var, args) do
key = var |> to_string() |> String.to_atom()
args
|> Args.get_var(key)
|> var_final_arg(var, args)
end
@spec var_final_arg(any, charlist, Keyword.t()) :: number
defp var_final_arg(nil, var, _),
do: raise(ArgumentError, "no variable #{inspect(var)} was found in the arguments")
defp var_final_arg(val, _, _) when is_number(val), do: val
defp var_final_arg(val, _, args) when is_function(val), do: val.(args)
defp var_final_arg(val, var, args) when is_bitstring(val) do
{:ok, tokens} = Tokenizer.tokenize(val)
{:ok, parsed} = Parser.parse(tokens)
compiled_arg = Compiler.delegate(parsed)
var_final_arg(compiled_arg, var, args)
end
defp var_final_arg(val, var, args) when is_list(val) do
Enum.map(val, &var_final_arg(&1, var, args))
end
end
|
lib/compilers/variable.ex
| 0.864325 | 0.677407 |
variable.ex
|
starcoder
|
defmodule Tyx.Traversal do
@moduledoc false
use Boundary, deps: [Tyx], exports: [Lookup, Typemap]
alias Tyx.Traversal.Lookup
require Logger
@callback lookup(module(), atom(), [module()] | non_neg_integer()) ::
{:error, {module, atom(), non_neg_integer()}} | {:ok, atom()}
@spec validate(Macro.Env.t(), [Tyx.t()]) :: [{Tyx.t(), :ok | {:error, keyword()}}]
def validate(env, tyxes) do
tyxes_with_imports =
Enum.flat_map(env.functions, fn {mod, list} ->
for {f, a} <- list,
{:ok, %Tyx.Fn{} = tyx_fn} <- [Lookup.get(mod, f, a)],
do: %Tyx{fun: f, signature: tyx_fn}
end) ++ tyxes
Enum.map(tyxes, fn tyx ->
outcome = tyx.signature.~>
tyx.body
|> Macro.expand(env)
|> Macro.prewalk(&desugar(&1, tyx.signature, tyxes_with_imports, env))
|> Macro.postwalk([], fn ast, errors ->
case expand(ast, tyx.signature, tyxes_with_imports, env) do
{:ok, ast} -> {ast, errors}
{:error, error} -> {ast, [error | errors]}
end
end)
|> case do
{[^outcome], []} -> :ok
{[unexpected], []} -> {:error, return: [expected: outcome, got: unexpected]}
{_, errors} -> {:error, traversal: errors}
end
|> then(&{tyx, &1})
end)
end
defp desugar({:|>, _, _} = pipe_call, _mapping, _tyxes, _env) do
pipe_call
|> Macro.unpipe()
|> Enum.reduce(fn {arg, p}, {acc, pp} -> {Macro.pipe(acc, arg, pp), p} end)
|> elem(0)
end
defp desugar({{:., meta, args} = _dot_call, _no_parens, []}, mapping, tyxes, env) do
args =
case args do
[{_map, _meta, nil}, _field] -> args
_ -> desugar(args, mapping, tyxes, env)
end
{{:., meta, [{:__aliases__, [alias: false], [:Map]}, :fetch!]}, meta, args}
end
defp desugar({:__block__, _, expressions}, _mapping, _tyxes, _env) do
[return | pre] = Enum.reverse(expressions)
binding =
Enum.reduce(pre, %{}, fn
{:=, _, [{var, _, _}, result]}, ctx -> Map.put(ctx, var, result)
_some, ctx -> ctx
end)
Macro.postwalk(return, &apply_bindings(&1, binding))
end
defp desugar(not_pipe_call, _mapping, _tyxes, _env), do: not_pipe_call
@spec apply_bindings(Macro.t(), map()) :: Macro.t()
defp apply_bindings({key, _meta, nil} = var, binding) do
Map.get(binding, key, var)
end
defp apply_bindings(any, _binding), do: any
defp expand({key, _, nil}, mapping, _tyxes, _env),
do: if(mapping.<~[key], do: {:ok, mapping.<~[key]}, else: {:error, {key, :invalid}})
defp expand({:__aliases__, _, _} = alias_call, _mapping, _tyxes, _env),
do: {:ok, alias_call}
defp expand({{:., _, [{:__aliases__, _, mods}, fun]}, _, args}, _mapping, _tyxes, _env),
do: with({:ok, tyx} <- mods |> Module.concat() |> Lookup.get(fun, args), do: {:ok, tyx.~>})
defp expand({:., _, [{:__aliases__, _, _mods}, _fun]} = tail_call, _mapping, _tyxes, _env),
do: {:ok, tail_call}
for operator <- ~w|+ - *|a,
t1 <- [Tyx.BuiltIn.Integer, Tyx.BuiltIn.NonNegInteger, Tyx.BuiltIn.PosInteger],
t2 <- [Tyx.BuiltIn.Integer, Tyx.BuiltIn.NonNegInteger, Tyx.BuiltIn.PosInteger] do
# FIXME Carefully specify return types for all combinations
defp expand({unquote(operator), _, [unquote(t1), unquote(t2)]}, _mapping, _tyxes, _env),
do: {:ok, Tyx.BuiltIn.Integer}
end
defp expand({fun, _, args}, _mapping, tyxes, _env) do
Enum.reduce_while(tyxes, {:error, {:no_spec, [{fun, args}]}}, fn tyx, acc ->
with %Tyx{fun: ^fun, signature: %Tyx.Fn{<~: fargs, ~>: fret}} <- tyx,
^args <- Keyword.values(fargs),
do: {:halt, {:ok, fret}},
else: (_ -> {:cont, acc})
end)
end
defp expand({:do, any}, _mapping, _tyxes, _env), do: {:ok, any}
defp expand(any, _mapping, _tyxes, _env), do: {:ok, any}
end
|
lib/tyx/traversal.ex
| 0.736306 | 0.441793 |
traversal.ex
|
starcoder
|
defmodule TelemetryAsync.Handler do
@moduledoc """
GenServer that subscribes to the requested telemetry metrics. The handler will
randomly distribute requests to the ShardSupervisor Shards and re-execute the telemetry
metrics with :async prepended to the beginning.
A metric like `[:test]` will become `[:async, :test]`
The metrics are detached if the Handler process exits to allow graceful shutdown.
"""
use GenServer
alias TelemetryAsync.{Shard, ShardSupervisor}
@doc """
Starts the Telemetry.Handler. Several options are available:
* metrics -(required) Must be provided. This is a list of telemetry metric names. They must be lists of atoms, like telemetry accepts
* pool_size - (optional) The size of the ShardSupervisor pool. This defaults to the number of schedulers
* prefix - (optional) An atom that is used to name the individual Shards. Defaults to `TelemetryAsync.Shard`
* transform_fn - (optional) A function/3 that accepts the metric name (without async prepended), measurements, metadata and
returns a tuple `{measurements, metadata}` which will be executed async. This allows smaller data to cross
the process boundary. Like `:telemetry` recommends, it is recommended to provide a `&Module.function/3` capture
rather than providing an anonymous function.
The prefix and pool_size should match a ShardSupervisor started with the same options or the telemetry events will not be re-broadcast.
"""
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@doc false
def init(opts) do
metrics = Keyword.fetch!(opts, :metrics)
pool_size = Keyword.get(opts, :pool_size, ShardSupervisor.default_pool_size())
prefix = Keyword.get(opts, :prefix, Shard.default_prefix())
transform_fn = Keyword.get(opts, :transform_fn)
names = attach_metrics(metrics, pool_size, prefix, transform_fn)
Process.flag(:trap_exit, true)
{:ok, %{names: names, opts: opts, transform_fn: transform_fn}}
end
@doc false
def handler(metric, measurements, metadata, config = %{transform_fn: transform_fn})
when is_function(transform_fn, 3) do
{measurements, metadata} = transform_fn.(metric, measurements, metadata)
exec_handler(metric, measurements, metadata, config)
end
def handler(metric, measurements, metadata, config) do
exec_handler(metric, measurements, metadata, config)
end
@doc false
def terminate(_reason, %{names: names}) do
Enum.each(names, fn metric ->
:telemetry.detach(metric)
end)
end
defp exec_handler(metric, measurements, metadata, %{pool_size: pool_size, prefix: prefix}) do
ShardSupervisor.random_shard(pool_size: pool_size, prefix: prefix)
|> Shard.execute(fn ->
:telemetry.execute([:async | metric], measurements, metadata)
end)
end
defp attach_metrics(metrics, pool_size, prefix, transform_fn) do
Enum.map(metrics, fn metric ->
name = [__MODULE__ | [prefix | metric]] |> Module.concat()
:ok =
:telemetry.attach(name, metric, &__MODULE__.handler/4, %{
pool_size: pool_size,
prefix: prefix,
transform_fn: transform_fn
})
name
end)
end
end
|
lib/telemetry_async/handler.ex
| 0.902177 | 0.469581 |
handler.ex
|
starcoder
|
defmodule MPEGAudioFrameParser.Impl do
alias MPEGAudioFrameParser.Frame
require Logger
@sync_word 0b11111111111
@initial_state %{leftover: <<>>, current_frame: nil, frames: []}
def init() do
{:ok, @initial_state}
end
def add_packet(state, packet) do
process_bytes(state, packet)
end
def pop_frame(%{frames: []} = state) do
{:ok, nil, state}
end
def pop_frame(state) do
{frame, rest} = List.pop_at(state.frames, -1)
{:ok, frame, %{state | frames: rest}}
end
def flush(state) do
{:ok, state.frames, @initial_state}
end
# Private Functions
# Synced, and the current frame is complete:
defp process_bytes(%{current_frame: %Frame{complete: true}} = state, packet) do
frames = [state.current_frame | state.frames]
process_bytes(%{state | current_frame: nil, frames: frames}, packet)
end
# No data left, or not enough to be able to validate next frame. Return:
defp process_bytes(state, packet)
when bit_size(packet) < 32
do
{:ok, %{state | leftover: packet}}
end
# Leftover from previous call available. Prepend to this packet:
defp process_bytes(%{leftover: leftover} = state, packet)
when bit_size(leftover) > 0
do
process_bytes(%{state | leftover: <<>>}, <<leftover::bits, packet::bits>>)
end
# Not synced, found a sync word. Create a new frame struct:
defp process_bytes(%{current_frame: nil} = state, <<@sync_word::size(11), header::size(21), rest::bits>>) do
header = <<@sync_word::size(11), header::size(21)>>
frame = Frame.from_header(header)
process_bytes(%{state | current_frame: frame}, rest)
end
# Not synced, no sync word found. Discard a byte:
defp process_bytes(%{current_frame: nil} = state, packet) do
<<_byte, rest::bits>> = packet
process_bytes(state, rest)
end
# Synced, but with an invalid header. Discard a byte:
defp process_bytes(%{current_frame: %Frame{valid: false}} = state, packet) do
data = <<state.current_frame.data, packet::bits>>
<<_byte, rest::bits>> = data
process_bytes(%{state | current_frame: nil}, rest)
end
# Synced, current frame not complete and we have bytes available. Add bytes to frame:
defp process_bytes(%{current_frame: %Frame{complete: false}} = state, packet) do
{:ok, frame, rest} = Frame.add_bytes(state.current_frame, packet)
process_bytes(%{state | current_frame: frame}, rest)
end
end
|
lib/mpeg_audio_frame_parser/impl.ex
| 0.663778 | 0.449816 |
impl.ex
|
starcoder
|
defmodule ENHL.Report do
use GenServer
@doc """
Starts a new report.
"""
def start_link(year, game_id), do: GenServer.start_link(__MODULE__, year: year, game_id: game_id)
@doc """
Returns the year of the `report`.
Returns `{:ok, year}`.
"""
def year(report), do: GenServer.call(report, :year)
@doc """
Returns the game_id of the `report`.
Returns `{:ok, game_id}`.
"""
def game_id(report), do: GenServer.call(report, :game_id)
@doc """
Returns the url of the `report`.
Returns `{:ok, url}`.
"""
def url(report), do: GenServer.call(report, :url)
@doc """
Returns the game_info of the `report`.
Returns `{:ok, game_info}`.
"""
def game_info(report), do: GenServer.call(report, :game_info)
@doc """
Returns the events of the `report`.
Returns `{:ok, events}`.
"""
def events(report), do: GenServer.call(report, :events)
@doc """
Fetches the report from NHL site.
Returns `:ok` if the report fetched successfully, `:error` otherwise.
"""
def fetch(report), do: GenServer.call(report, :fetch, 30000)
@doc """
Parses the game info from report.
Returns `:ok` if the report parsed successfully, `:error` otherwise.
"""
def parse_game_info(report), do: GenServer.call(report, :parse_game_info)
@doc """
Parses the game events from report.
Returns `:ok` if the report parsed successfully, `:error` otherwise.
"""
def parse_events(report), do: GenServer.call(report, :parse_events)
## Server Callbacks
def init(year: year, game_id: game_id) do
url = "http://www.nhl.com/scores/htmlreports/#{year}#{year + 1}/PL0#{20_000 + game_id}.HTM"
{:ok, %{year: year, game_id: game_id, url: url, game_info: nil, events: nil, html: nil}}
end
def handle_call(:year, _from, state), do: {:reply, {:ok, state.year}, state}
def handle_call(:game_id, _from, state), do: {:reply, {:ok, state.game_id}, state}
def handle_call(:url, _from, state), do: {:reply, {:ok, state.url}, state}
def handle_call(:game_info, _from, state), do: {:reply, {:ok, state.game_info}, state}
def handle_call(:events, _from, state), do: {:reply, {:ok, state.events}, state}
def handle_call(:fetch, _from, %{html: html} = state) when is_nil(html) do
{:reply, :ok, put_in(state.html, HTTPoison.get!(state.url).body)}
end
def handle_call(:fetch, _from, state) do
# TODO: add error replies
{:reply, :ok, state}
end
def handle_call(:parse_game_info, _from, state) do
# TODO: add error replies
props = Floki.find(state.html, "td[align='center']")
if state.game_id != actual_game_id(props) do
{:reply, {:error, :invalid_game_id}, state}
else
{:reply, :ok, put_in(state.game_info, parse_game_info(state.game_id, props))}
end
end
def handle_call(:parse_events, _from, state) do
events = state.html |> Floki.find("tr[class='evenColor']") |> Enum.map(&parse_event/1)
{:reply, :ok, put_in(state.events, events)}
end
## Private functions
defp actual_game_id(props) do
props
|> text_element_value(12)
|> String.split(" ")
|> List.last
|> String.to_integer
end
defp parse_game_info(game_id, props) do
%{game_id: game_id}
|> Map.merge(parse_arena_info(props))
|> Map.merge(parse_game_time(props))
|> Map.merge(%{visitor: parse_team(props, 3, :away)})
|> Map.merge(%{home: parse_team(props, 16, :home)})
end
defp parse_arena_info(props) do
[attendance_str, arena] = props
|> text_element_value(10)
|> convert_nbsp
|> String.replace("at", "@")
|> String.split("@")
|> Enum.map(&String.trim/1)
attendance = attendance_str
|> String.split(" ")
|> List.last
|> String.replace(",", "")
|> String.to_integer
%{arena: arena, attendance: attendance}
end
defp parse_game_time(props) do
date = text_element_value(props, 9)
[start_time, end_time] = props
|> text_element_value(11)
|> convert_nbsp
|> String.split(";")
|> Enum.map(fn x -> x |> String.trim
|> String.split(" ", parts: 2)
|> List.last
end)
%{start: parse_datetime(date, start_time), end: parse_datetime(date, end_time)}
end
defp convert_nbsp(input) do
input
|> String.to_charlist
|> :binary.list_to_bin
|> :binary.replace(<<160>>, <<" ">>, [:global]) #
end
defp parse_datetime(date, time) do
Timex.parse!("#{time}, #{date}", "%k:%M %Z, %A, %B %e, %Y", :strftime)
end
defp parse_team(props, score_index, game_type) do
[title, games] = props |> text_element_value(score_index + 2) |> String.split("\n")
games = String.split(games)
%{
:title => title,
:game => games |> Enum.at(1) |> String.to_integer,
game_type => games |> List.last |> String.to_integer,
:score => int_element_value(props, score_index),
}
end
defp parse_event(html) do
html
|> Floki.find("td")
|> common_event_info
|> Map.merge(parse_players(html |> Floki.find("table")))
end
defp common_event_info(props) do
[time, elapsed] = text_element_value(props, 3) |> String.split
%{
event_id: int_element_value(props, 0),
period: int_element_value(props, 1),
str: text_element_value(props, 2),
time: time,
elapsed: elapsed,
type: text_element_value(props, 4),
desc: text_element_value(props, 5),
}
end
defp parse_players(tables) when length(tables) == 0, do: %{}
defp parse_players(tables) do
visitor_players = tables |> Enum.at(0) |> parse_players_table
offset = length(visitor_players) + 1
home_players = tables |> Enum.at(offset) |> parse_players_table
%{players: %{visitor: visitor_players, home: home_players}}
end
defp parse_players_table(html) when is_nil(html), do: []
defp parse_players_table(html), do: Floki.find(html, "font") |> Enum.map(&parse_player/1)
defp parse_player(html) do
[position, name] = html
|> Floki.attribute("title")
|> List.first
|> String.split("-")
|> Enum.map(&String.trim/1)
%{
position: position,
name: name,
number: html |> Floki.text |> String.to_integer,
}
end
defp text_element_value(props, index), do: props |> Enum.fetch!(index) |> Floki.text
defp int_element_value(props, index), do: props |> text_element_value(index) |> String.to_integer
end
|
lib/enhl/report.ex
| 0.740362 | 0.469034 |
report.ex
|
starcoder
|
defmodule Okta.IdPs do
@moduledoc """
The `Okta.IdPs` module provides access methods to the [Okta Identity Providers API](https://developer.okta.com/docs/reference/api/idps/).
All methods require a Tesla Client struct created with `Okta.client(base_url, api_key)`.
## Examples
client = Okta.Client("https://dev-000000.okta.com", "<PASSWORD>")
{:ok, result, _env} = Okta.Users.list_idps(client)
"""
alias Okta.IdPs.{IdentityProvider, Protocol, Policy}
import Okta.Utils
@idps_url "/api/v1/idps"
@type single_result() :: {:ok, IdentityProvider.t(), Tesla.Env.t()} | {:error, map(), any}
@type array_result() :: {:ok, list(IdentityProvider.t()), Tesla.Env.t()} | {:error, map(), any}
@doc """
Adds a new IdP to your organization
The function requires an `Okta.Idps.IdentityProvider` with `type`, `name`, `protocol` and `policy` and returns a `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#add-identity-provider
"""
@spec add_idp(Okta.client(), IdentityProvider.t()) :: single_result()
def add_idp(client, %IdentityProvider{} = idp) do
Tesla.post(client, @idps_url, transform_idp(idp)) |> Okta.result() |> idp_result()
end
@doc """
Add Generic OpenID Connect Identity Provider
The function requires a `name` and a `Okta.IdPs.Protocol` and `Okta.IdPs.Policy` and returns a `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#add-generic-openid-connect-identity-provider
"""
@spec add_oidc_idp(Okta.client(), String.t(), Protocol.t(), Policy.t()) :: single_result()
def add_oidc_idp(client, name, %Protocol{} = protocol, %Policy{} = policy) do
idp = %IdentityProvider{name: name, type: "OIDC", protocol: protocol, policy: policy}
add_idp(client, idp)
end
@doc """
Get Identity Provider. Fetches an IdP by id
This function returns a `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#get-identity-provider
"""
@spec get_idp(Okta.client(), String.t()) :: single_result()
def get_idp(client, idp_id) do
Tesla.get(client, @idps_url <> "/#{idp_id}") |> Okta.result() |> idp_result()
end
@doc """
List Identity Providers. Enumerates IdPs in your organization with pagination. A subset of IdPs can be returned that match a supported filter expression or query.
This function returns an array of `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#list-identity-providers
"""
@spec list_idps(Okta.client(), keyword()) :: array_result()
def list_idps(client, opts \\ []) do
Tesla.get(client, @idps_url, query: opts) |> Okta.result() |> idp_result()
end
@doc """
Find Identity Providers by Name. Searches for IdPs by name in your organization
This function returns an array of `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#find-identity-providers-by-name
"""
@spec find_idps(Okta.client(), String.t(), keyword()) :: array_result()
def find_idps(client, query, opts \\ []) do
find_idps(client, Keyword.merge(opts, q: query))
end
@doc """
Find Identity Providers by Type. Finds all IdPs with a specific type
This function returns an array of `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#find-identity-providers-by-type
"""
@spec find_idps_by_type(Okta.client(), String.t(), keyword()) :: array_result()
def find_idps_by_type(client, type, opts \\ []) do
find_idps(client, Keyword.merge(opts, type: type))
end
@doc """
Updates the configuration for an IdP. All properties must be specified when updating IdP configuration. Partial updates are not supported by the Okta API
The function requires an `Okta.Idps.IdentityProvider` with `type`, `name`, `issuerMode`, `status`, `protocol` and `policy` and returns a `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#update-identity-provider
"""
@spec update_idp(Okta.client(), String.t(), IdentityProvider.t()) :: single_result()
def update_idp(client, idp_id, %IdentityProvider{} = idp) do
client
|> Tesla.put(@idps_url <> "/#{idp_id}", transform_idp(idp))
|> Okta.result()
|> idp_result()
end
@doc """
Will perform a partial update of an IdP with any supplied attributes and with partial protocol and policy.
It works by first fetching the IdP data from the API and merging the supplied data with `Okta.Utils.merge_struct(struct1, struct2)`.
This means concurrent updates *could* fail as this is not an atomic transaction.
The function requires an `Okta.Idps.IdentityProvider` and returns a `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#update-identity-provider
"""
@spec partial_update_idp(Okta.client(), String.t(), IdentityProvider.t()) :: single_result()
def partial_update_idp(client, idp_id, %IdentityProvider{} = idp) do
case get_idp(client, idp_id) do
{:ok, old_idp, _env} -> update_idp(client, idp_id, merge_struct(old_idp, idp))
res -> res
end
end
@doc """
Delete Identity Provider. Removes an IdP from your organization.
https://developer.okta.com/docs/reference/api/idps/#delete-identity-provider
"""
@spec delete_idp(Okta.client(), String.t()) :: Okta.result()
def delete_idp(client, idp_id) do
Tesla.delete(client, @idps_url <> "/#{idp_id}") |> Okta.result()
end
@doc """
Activate Identity Provider. Activates an inactive IdP.
This function returns a `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#activate-identity-provider
"""
@spec activate_idp(Okta.client(), String.t()) :: single_result()
def activate_idp(client, idp_id) do
Tesla.post(client, @idps_url <> "/#{idp_id}/lifecycle/activate", %{})
|> Okta.result()
|> idp_result()
end
@doc """
Deactivate Identity Provider. Deactivates an active IdP
This function returns a `Okta.Idps.IdentityProvider` in the second tuple position.
https://developer.okta.com/docs/reference/api/idps/#deactivate-identity-provider
"""
@spec deactivate_idp(Okta.client(), String.t()) :: single_result()
def deactivate_idp(client, idp_id) do
Tesla.post(client, @idps_url <> "/#{idp_id}/lifecycle/deactivate", %{})
|> Okta.result()
|> idp_result()
end
@doc """
Find Users. Find all the users linked to an identity provider.
https://developer.okta.com/docs/reference/api/idps/#find-users
"""
@spec find_users(Okta.client(), String.t()) :: Okta.result()
def find_users(client, idp_id) do
Tesla.get(client, @idps_url <> "/#{idp_id}/users") |> Okta.result()
end
@doc """
Get a Linked Identity Provider User. Fetches a linked IdP user by ID.
https://developer.okta.com/docs/reference/api/idps/#get-a-linked-identity-provider-user
"""
@spec get_linked_user(Okta.client(), String.t(), String.t()) :: Okta.result()
def get_linked_user(client, idp_id, user_id) do
Tesla.get(client, @idps_url <> "/#{idp_id}/users/#{user_id}") |> Okta.result()
end
@doc """
Social Authentication Token Operation.
Okta doesn't import all the user information from a social provider.
If the app needs information which isn't imported, it can get the user token from this endpoint, then make an API call to the social provider with the token to request the additional information.
https://developer.okta.com/docs/reference/api/idps/#social-authentication-token-operation
"""
@spec social_tokens(Okta.client(), String.t(), String.t()) :: Okta.result()
def social_tokens(client, idp_id, user_id) do
Tesla.get(client, @idps_url <> "/#{idp_id}/users/#{user_id}/credentials/tokens")
|> Okta.result()
end
@doc """
Link a User to a Social Provider without a Transaction.
Links an Okta user to an existing social provider. This endpoint doesn't support the SAML2 Identity Provider Type.
https://developer.okta.com/docs/reference/api/idps/#link-a-user-to-a-social-provider-without-a-transaction
"""
@spec link_user(Okta.client(), String.t(), String.t(), String.t()) :: Okta.result()
def link_user(client, idp_id, user_id, external_id) do
Tesla.post(client, @idps_url <> "/#{idp_id}/users/#{user_id}", %{externalId: external_id})
|> Okta.result()
end
@doc """
Unlink User from IdP
Removes the link between the Okta user and the IdP user.
The next time the user federates into Okta via this IdP, they have to re-link their account according to the account link policy configured in Okta for this IdP.
https://developer.okta.com/docs/reference/api/idps/#unlink-user-from-idp
"""
@spec unlink_user(Okta.client(), String.t(), String.t()) :: Okta.result()
def unlink_user(client, idp_id, user_id) do
Tesla.delete(client, @idps_url <> "/#{idp_id}/users/#{user_id}") |> Okta.result()
end
defp transform_idp(%IdentityProvider{} = idp) do
idp
|> Map.from_struct()
|> Map.take([:type, :issuerMode, :name, :protocol, :policy, :status])
|> stringify_keys()
end
defp idp_result({:ok, idps, env}) when is_list(idps),
do: {:ok, Enum.map(idps, &parse_idp_return/1), env}
defp idp_result({:ok, idp, env}), do: {:ok, parse_idp_return(idp), env}
defp idp_result(result), do: result
defp parse_idp_return(idp_data) do
policy = to_struct(Policy, idp_data["policy"])
protocol = to_struct(Protocol, idp_data["protocol"])
IdentityProvider
|> to_struct(idp_data)
|> Map.put(:policy, policy)
|> Map.put(:protocol, protocol)
end
end
|
lib/okta/idps.ex
| 0.849769 | 0.410934 |
idps.ex
|
starcoder
|
defmodule AdventOfCode.Day18 do
@moduledoc ~S"""
[Advent Of Code day 18](https://adventofcode.com/2018/day/18).
"""
import AdventOfCode.Utils, only: [map_increment: 2]
@open "."
@tree "|"
@lumberyard "#"
def solve("1", input) do
grid = input |> parse_input()
Enum.reduce(1..10, grid, fn _, grid -> iterate(grid) end) |> resources_value()
end
def solve("2", input) do
grid = input |> parse_input()
{grid, iterations_left} =
Enum.reduce_while(Stream.iterate(1, &(&1 + 1)), {grid, %{}}, fn i, {grid, stats} ->
new_grid = iterate(grid)
id = grid_id(new_grid)
case Map.get(stats, id) do
nil ->
{:cont, {new_grid, Map.put(stats, id, i)}}
prev_i ->
iterations_left = rem(1_000_000_000 - i, i - prev_i)
{:halt, {new_grid, iterations_left}}
end
end)
Enum.reduce(1..iterations_left, grid, fn _, grid -> iterate(grid) end) |> resources_value()
end
def iterate(grid) do
Enum.reduce(grid, %{}, fn {point, value}, acc ->
Map.put(acc, point, next_value(value, adj_stats(grid, point)))
end)
end
defp grid_id(grid), do: Enum.map(grid, fn {_, v} -> v end)
defp resources_value(grid) do
stats = Enum.reduce(grid, %{}, fn {_point, value}, acc -> map_increment(acc, value) end)
stats[@tree] * stats[@lumberyard]
end
defp adj_stats(grid, point), do: points_stats(grid, adjacent(point))
defp adjacent({px, py}) do
for x <- -1..1, y <- -1..1, x + px >= 0 && y + py >= 0 && (x != 0 || y != 0), do: {px + x, py + y}
end
defp points_stats(grid, points), do: Enum.reduce(points, %{}, &map_increment(&2, Map.get(grid, &1)))
defp next_value(@open, %{@tree => c}) when c >= 3, do: @tree
defp next_value(@tree, %{@lumberyard => c}) when c >= 3, do: @lumberyard
defp next_value(@lumberyard, %{@lumberyard => l, @tree => t}) when t >= 1 and l >= 1, do: @lumberyard
defp next_value(@lumberyard, _), do: @open
defp next_value(value, _), do: value
def parse_input(input) do
String.split(input, "\n")
|> Enum.with_index()
|> Enum.reduce(%{}, fn {line, y}, acc ->
String.codepoints(line)
|> Enum.with_index()
|> Enum.reduce(acc, fn {point, x}, acc ->
Map.put(acc, {x, y}, point)
end)
end)
end
def print(grid) do
max = :math.sqrt(Enum.count(grid)) |> Kernel.trunc()
Enum.map(0..(max - 1), fn y ->
Enum.map(0..(max - 1), fn x -> Map.get(grid, {x, y}) end) |> Enum.join()
end)
|> Enum.join("\n")
end
end
|
lib/advent_of_code/day_18.ex
| 0.672224 | 0.61257 |
day_18.ex
|
starcoder
|
defmodule Absinthe.Resolution do
@moduledoc """
Information about the current resolution. It is created by adding field specific
information to the more general `%Absinthe.Blueprint.Execution{}` struct.
In many ways like the `%Conn{}` from `Plug`, the `%Absinthe.Resolution{}` is the
piece of information that passed along from middleware to middleware as part of
resolution.
## Contents
- `:adapter` - The adapter used for any name conversions.
- `:definition` - The Blueprint definition for this field.
- `:context` - The context passed to `Absinthe.run`.
- `:root_value` - The root value passed to `Absinthe.run`, if any.
- `:parent_type` - The parent type for the field.
- `:private` - Operates similarly to the `:private` key on a `%Plug.Conn{}`
and is a place for libraries (and similar) to store their information.
- `:schema` - The current schema.
- `:source` - The resolved parent object; source of this field.
When a `%Resolution{}` is accessed via middleware, you may want to update the
context (e.g. to cache a dataloader instance or the result of an ecto query).
Updating the context can be done simply by using the map updating syntax (or
`Map.put/3`):
```elixir
%{resolution | context: new_context}
# OR
Map.put(resolution, :context, new_context)
```
To access the schema type for this field, see the `definition.schema_node`.
"""
@typedoc """
The arguments that are passed from the schema. (e.g. id of the record to be
fetched)
"""
@type arguments :: %{optional(atom) => any}
@type source :: any
@type t :: %__MODULE__{
value: term,
errors: [term],
adapter: Absinthe.Adapter.t(),
context: map,
root_value: any,
schema: Absinthe.Schema.t(),
definition: Absinthe.Blueprint.node_t(),
parent_type: Absinthe.Type.t(),
source: source,
state: field_state,
acc: %{any => any},
extensions: %{any => any},
arguments: arguments,
fragments: [Absinthe.Blueprint.Document.Fragment.Named.t()]
}
defstruct [
:value,
:adapter,
:context,
:parent_type,
:root_value,
:definition,
:schema,
:source,
errors: [],
middleware: [],
acc: %{},
arguments: %{},
extensions: %{},
private: %{},
path: [],
state: :unresolved,
fragments: [],
fields_cache: %{}
]
def resolver_spec(fun) do
{{__MODULE__, :call}, fun}
end
@type field_state :: :unresolved | :resolved | :suspended
@doc """
Get the child fields under the current field.
See `project/2` for details.
"""
def project(info) do
case info.definition.schema_node.type do
%Absinthe.Type.Interface{} ->
raise need_concrete_type_error()
%Absinthe.Type.Union{} ->
raise need_concrete_type_error()
schema_node ->
project(info, schema_node)
end
end
@doc """
Get the current path.
Each `Absinthe.Resolution` struct holds the current result path as a list of
blueprint nodes and indices. Usually however you don't need the full AST list
and instead just want the path that will eventually end up in the result.
For that, use this function.
## Examples
Given some query:
```
{users { email }}
```
If you called this function inside a resolver on the users email field it
returns a value like:
```elixir
resolve fn _, _, resolution ->
Absinthe.Resolution.path(resolution) #=> ["users", 5, "email"]
end
```
In this case `5` is the 0 based index in the list of users the field is currently
at.
"""
def path(%{path: path}) do
path
|> Enum.reverse()
|> Enum.drop(1)
|> Enum.map(&field_name/1)
end
defp field_name(%{alias: nil, name: name}), do: name
defp field_name(%{alias: name}), do: name
defp field_name(%{name: name}), do: name
defp field_name(index), do: index
@doc """
Get the child fields under the current field.
## Example
Given a document like:
```
{ user { id name }}
```
```
field :user, :user do
resolve fn _, info ->
child_fields = Absinthe.Resolution.project(info) |> Enum.map(&(&1.name))
# ...
end
end
```
`child_fields` will be `["id", "name"]`.
It correctly handles fragments, so for example if you had the document:
```
{
user {
... on User {
id
}
... on Named {
name
}
}
}
```
you would still get a nice and simple `child_fields` that was `["id", "name"]`.
"""
def project(
%{
definition: %{selections: selections},
path: path,
fields_cache: cache
} = info,
type
) do
type = Absinthe.Schema.lookup_type(info.schema, type)
{fields, _} = Absinthe.Resolution.Projector.project(selections, type, path, cache, info)
fields
end
defp need_concrete_type_error() do
"""
You tried to project from a field that is an abstract type without concrete type information!
Use `project/2` instead of `project/1`, and supply the type yourself please!
"""
end
def call(%{state: :unresolved} = res, resolution_function) do
result =
case resolution_function do
fun when is_function(fun, 2) ->
fun.(res.arguments, res)
fun when is_function(fun, 3) ->
fun.(res.source, res.arguments, res)
{mod, fun} ->
apply(mod, fun, [res.source, res.arguments, res])
_ ->
raise Absinthe.ExecutionError, """
Field resolve property must be a 2 arity anonymous function, 3 arity
anonymous function, or a `{Module, :function}` tuple.
Instead got: #{inspect(resolution_function)}
Resolving field:
#{res.definition.name}
Defined at:
#{res.definition.schema_node.__reference__.location.file}:#{
res.definition.schema_node.__reference__.location.line
}
Info: #{inspect(res)}
"""
end
put_result(res, result)
end
def call(res, _), do: res
def path_string(%__MODULE__{path: path}) do
Enum.map(path, fn
%{name: name, alias: alias} ->
alias || name
%{schema_node: schema_node} ->
schema_node.name
end)
end
@doc """
Handy function for applying user function result tuples to a resolution struct
User facing functions generally return one of several tuples like `{:ok, val}`
or `{:error, reason}`. This function handles applying those various tuples
to the resolution struct.
The resolution state is updated depending on the tuple returned. `:ok` and
`:error` tuples set the state to `:resolved`, whereas middleware tuples set it
to `:unresolved`.
This is useful for middleware that wants to handle user facing functions, but
does not want to duplicate this logic.
"""
def put_result(res, {:ok, value}) do
%{res | state: :resolved, value: value}
end
def put_result(res, {:error, [{_, _} | _] = error_keyword}) do
%{res | state: :resolved, errors: [error_keyword]}
end
def put_result(res, {:error, errors}) do
%{res | state: :resolved, errors: List.wrap(errors)}
end
def put_result(res, {:plugin, module, opts}) do
put_result(res, {:middleware, module, opts})
end
def put_result(res, {:middleware, module, opts}) do
%{res | state: :unresolved, middleware: [{module, opts} | res.middleware]}
end
def put_result(res, result) do
raise result_error(result, res.definition, res.source)
end
@doc false
def result_error({:error, _} = value, field, source) do
result_error(
value,
field,
source,
"You're returning an :error tuple, but did you forget to include a `:message`\nkey in every custom error (map or keyword list)?"
)
end
def result_error(value, field, source) do
result_error(
value,
field,
source,
"Did you forget to return a valid `{:ok, any}` | `{:error, error_value}` tuple?"
)
end
@doc """
TODO: Deprecate
"""
def call(resolution_function, parent, args, field_info) do
case resolution_function do
fun when is_function(fun, 2) ->
fun.(args, field_info)
fun when is_function(fun, 3) ->
fun.(parent, args, field_info)
{mod, fun} ->
apply(mod, fun, [parent, args, field_info])
_ ->
raise Absinthe.ExecutionError, """
Field resolve property must be a 2 arity anonymous function, 3 arity
anonymous function, or a `{Module, :function}` tuple.
Instead got: #{inspect(resolution_function)}
Info: #{inspect(field_info)}
"""
end
end
def call(function, args, info) do
call(function, info.source, args, info)
end
@error_detail """
## For a data result
`{:ok, any}` result will do.
### Examples:
A simple integer result:
{:ok, 1}
Something more complex:
{:ok, %Model.Thing{some: %{complex: :data}}}
## For an error result
One or more errors for a field can be returned in a single `{:error, error_value}` tuple.
`error_value` can be:
- A simple error message string.
- A map containing `:message` key, plus any additional serializable metadata.
- A keyword list containing a `:message` key, plus any additional serializable metadata.
- A list containing multiple of any/all of these.
- Any other value compatible with `to_string/1`.
### Examples
A simple error message:
{:error, "Something bad happened"}
Multiple error messages:
{:error, ["Something bad", "Even worse"]
Single custom errors (note the required `:message` keys):
{:error, message: "Unknown user", code: 21}
{:error, %{message: "A database error occurred", details: format_db_error(some_value)}}
Three errors of mixed types:
{:error, ["Simple message", [message: "A keyword list error", code: 1], %{message: "A map error"}]}
Generic handler for interoperability with errors from other libraries:
{:error, :foo}
{:error, 1.0}
{:error, 2}
## To activate a plugin
`{:plugin, NameOfPluginModule, term}` to activate a plugin.
See `Absinthe.Resolution.Plugin` for more information.
"""
def result_error(value, field, source, guess) do
Absinthe.ExecutionError.exception("""
Invalid value returned from resolver.
Resolving field:
#{field.name}
Defined at:
#{field.schema_node.__reference__.location.file}:#{
field.schema_node.__reference__.location.line
}
Resolving on:
#{inspect(source)}
Got value:
#{inspect(value)}
...
#{guess}
...
The result must be one of the following...
#{@error_detail}
""")
end
end
defimpl Inspect, for: Absinthe.Resolution do
import Inspect.Algebra
def inspect(res, opts) do
# TODO: better inspect representation
inner =
res
|> Map.from_struct()
|> Map.update!(:fields_cache, fn _ ->
"#fieldscache<...>"
end)
|> Map.to_list()
|> Inspect.List.inspect(opts)
concat(["#Absinthe.Resolution<", inner, ">"])
end
end
|
lib/absinthe/resolution.ex
| 0.883242 | 0.875148 |
resolution.ex
|
starcoder
|
defmodule Kernel.Typespec do
@moduledoc false
@doc """
Defines a type.
This macro is responsible for handling the attribute `@type`.
## Examples
@type my_type :: atom
"""
defmacro deftype(type) do
pos = :elixir_locals.cache_env(__CALLER__)
%{line: line, file: file, module: module} = __CALLER__
quote do
Kernel.Typespec.deftype(
:type,
unquote(Macro.escape(type, unquote: true)),
unquote(line),
unquote(file),
unquote(module),
unquote(pos)
)
end
end
@doc """
Defines an opaque type.
This macro is responsible for handling the attribute `@opaque`.
## Examples
@opaque my_type :: atom
"""
defmacro defopaque(type) do
pos = :elixir_locals.cache_env(__CALLER__)
%{line: line, file: file, module: module} = __CALLER__
quote do
Kernel.Typespec.deftype(
:opaque,
unquote(Macro.escape(type, unquote: true)),
unquote(line),
unquote(file),
unquote(module),
unquote(pos)
)
end
end
@doc """
Defines a private type.
This macro is responsible for handling the attribute `@typep`.
## Examples
@typep my_type :: atom
"""
defmacro deftypep(type) do
pos = :elixir_locals.cache_env(__CALLER__)
%{line: line, file: file, module: module} = __CALLER__
quote do
Kernel.Typespec.deftype(
:typep,
unquote(Macro.escape(type, unquote: true)),
unquote(line),
unquote(file),
unquote(module),
unquote(pos)
)
end
end
@doc """
Defines a spec.
This macro is responsible for handling the attribute `@spec`.
## Examples
@spec add(number, number) :: number
"""
defmacro defspec(spec) do
pos = :elixir_locals.cache_env(__CALLER__)
%{line: line, file: file, module: module} = __CALLER__
quote do
Kernel.Typespec.defspec(
:spec,
unquote(Macro.escape(spec, unquote: true)),
unquote(line),
unquote(file),
unquote(module),
unquote(pos)
)
end
end
@doc """
Defines a callback.
This macro is responsible for handling the attribute `@callback`.
## Examples
@callback add(number, number) :: number
"""
defmacro defcallback(spec) do
pos = :elixir_locals.cache_env(__CALLER__)
%{line: line, file: file, module: module} = __CALLER__
quote do
Kernel.Typespec.defspec(
:callback,
unquote(Macro.escape(spec, unquote: true)),
unquote(line),
unquote(file),
unquote(module),
unquote(pos)
)
end
end
@doc """
Defines a macro callback.
This macro is responsible for handling the attribute `@macrocallback`.
## Examples
@macrocallback add(number, number) :: Macro.t
"""
defmacro defmacrocallback(spec) do
pos = :elixir_locals.cache_env(__CALLER__)
%{line: line, file: file, module: module} = __CALLER__
quote do
Kernel.Typespec.defspec(
:macrocallback,
unquote(Macro.escape(spec, unquote: true)),
unquote(line),
unquote(file),
unquote(module),
unquote(pos)
)
end
end
@doc """
Returns `true` if the current module defines a given type
(private, opaque or not). This function is only available
for modules being compiled.
"""
@spec defines_type?(module, atom, arity) :: boolean
def defines_type?(module, name, arity)
when is_atom(module) and is_atom(name) and arity in 0..255 do
finder = fn {_kind, expr, _caller} ->
type_to_signature(expr) == {name, arity}
end
:lists.any(finder, Module.get_attribute(module, :type)) or
:lists.any(finder, Module.get_attribute(module, :opaque))
end
@doc """
Returns `true` if the current module defines a given spec.
This function is only available for modules being compiled.
"""
@spec defines_spec?(module, atom, arity) :: boolean
def defines_spec?(module, name, arity)
when is_atom(module) and is_atom(name) and arity in 0..255 do
finder = fn {_kind, expr, _caller} ->
spec_to_signature(expr) == {name, arity}
end
:lists.any(finder, Module.get_attribute(module, :spec))
end
@doc """
Returns `true` if the current module defines a callback.
This function is only available for modules being compiled.
"""
@spec defines_callback?(module, atom, arity) :: boolean
def defines_callback?(module, name, arity)
when is_atom(module) and is_atom(name) and arity in 0..255 do
finder = fn {_kind, expr, _caller} ->
spec_to_signature(expr) == {name, arity}
end
:lists.any(finder, Module.get_attribute(module, :callback))
end
@doc """
Converts a spec clause back to Elixir AST.
"""
@spec spec_to_ast(atom, tuple) :: {atom, keyword, [Macro.t()]}
def spec_to_ast(name, spec)
def spec_to_ast(name, {:type, line, :fun, [{:type, _, :product, args}, result]})
when is_atom(name) do
meta = [line: line]
body = {name, meta, Enum.map(args, &typespec_to_ast/1)}
vars =
(args ++ [result])
|> Enum.flat_map(&collect_vars/1)
|> Enum.uniq()
|> Enum.map(&{&1, {:var, meta, nil}})
spec = {:::, meta, [body, typespec_to_ast(result)]}
if vars == [] do
spec
else
{:when, meta, [spec, vars]}
end
end
def spec_to_ast(name, {:type, line, :fun, []}) when is_atom(name) do
{:::, [line: line], [{name, [line: line], []}, quote(do: term)]}
end
def spec_to_ast(name, {:type, line, :bounded_fun, [type, constrs]}) when is_atom(name) do
{:type, _, :fun, [{:type, _, :product, args}, result]} = type
guards =
for {:type, _, :constraint, [{:atom, _, :is_subtype}, [{:var, _, var}, type]]} <- constrs do
{var, typespec_to_ast(type)}
end
meta = [line: line]
vars =
(args ++ [result])
|> Enum.flat_map(&collect_vars/1)
|> Enum.uniq()
|> Kernel.--(Keyword.keys(guards))
|> Enum.map(&{&1, {:var, meta, nil}})
args = for arg <- args, do: typespec_to_ast(arg)
when_args = [
{:::, meta, [{name, [line: line], args}, typespec_to_ast(result)]},
guards ++ vars
]
{:when, meta, when_args}
end
@doc """
Converts a type clause back to Elixir AST.
"""
def type_to_ast(type)
def type_to_ast({{:record, record}, fields, args}) when is_atom(record) do
fields = for field <- fields, do: typespec_to_ast(field)
args = for arg <- args, do: typespec_to_ast(arg)
type = {:{}, [], [record | fields]}
quote(do: unquote(record)(unquote_splicing(args)) :: unquote(type))
end
def type_to_ast({name, type, args}) when is_atom(name) do
args = for arg <- args, do: typespec_to_ast(arg)
quote(do: unquote(name)(unquote_splicing(args)) :: unquote(typespec_to_ast(type)))
end
@doc false
# TODO: Remove on v2.0
def beam_typedocs(module) when is_atom(module) or is_binary(module) do
warning =
"Kernel.Typespec.beam_typedocs/1 is deprecated, please use Code.get_docs/2 instead\n" <>
Exception.format_stacktrace()
IO.write(:stderr, warning)
if docs = Code.get_docs(module, :type_docs) do
for {tuple, _, _, doc} <- docs, do: {tuple, doc}
end
end
@doc """
Returns all types available from the module's BEAM code.
The result is returned as a list of tuples where the first
element is the type (`:typep`, `:type` and `:opaque`).
The module must have a corresponding BEAM file which can be
located by the runtime system.
"""
@spec beam_types(module | binary) :: [tuple] | nil
def beam_types(module) when is_atom(module) or is_binary(module) do
case abstract_code(module) do
{:ok, abstract_code} ->
exported_types = for {:attribute, _, :export_type, types} <- abstract_code, do: types
exported_types = :lists.flatten(exported_types)
for {:attribute, _, kind, {name, _, args} = type} <- abstract_code,
kind in [:opaque, :type] do
cond do
kind == :opaque -> {:opaque, type}
{name, length(args)} in exported_types -> {:type, type}
true -> {:typep, type}
end
end
_ ->
nil
end
end
@doc """
Returns all specs available from the module's BEAM code.
The result is returned as a list of tuples where the first
element is spec name and arity and the second is the spec.
The module must have a corresponding BEAM file which can be
located by the runtime system.
"""
@spec beam_specs(module | binary) :: [tuple] | nil
def beam_specs(module) when is_atom(module) or is_binary(module) do
from_abstract_code(module, :spec)
end
@doc """
Returns all callbacks available from the module's BEAM code.
The result is returned as a list of tuples where the first
element is spec name and arity and the second is the spec.
The module must have a corresponding BEAM file
which can be located by the runtime system.
"""
@spec beam_callbacks(module | binary) :: [tuple] | nil
def beam_callbacks(module) when is_atom(module) or is_binary(module) do
from_abstract_code(module, :callback)
end
defp from_abstract_code(module, kind) do
case abstract_code(module) do
{:ok, abstract_code} ->
for {:attribute, _, abs_kind, value} <- abstract_code, kind == abs_kind, do: value
:error ->
nil
end
end
defp abstract_code(module) do
case :beam_lib.chunks(abstract_code_beam(module), [:abstract_code]) do
{:ok, {_, [{:abstract_code, {_raw_abstract_v1, abstract_code}}]}} ->
{:ok, abstract_code}
_ ->
:error
end
end
defp abstract_code_beam(module) when is_atom(module) do
case :code.get_object_code(module) do
{^module, beam, _filename} -> beam
:error -> module
end
end
defp abstract_code_beam(binary) when is_binary(binary) do
binary
end
## Helpers
@doc false
def spec_to_signature({:when, _, [spec, _]}), do: type_to_signature(spec)
def spec_to_signature(other), do: type_to_signature(other)
@doc false
def type_to_signature({:::, _, [{name, _, context}, _]}) when is_atom(name) and is_atom(context),
do: {name, 0}
def type_to_signature({:::, _, [{name, _, args}, _]}) when is_atom(name),
do: {name, length(args)}
def type_to_signature(_), do: :error
## Macro callbacks
@doc false
def defspec(kind, expr, line, file, module, pos) when kind in [:callback, :macrocallback] do
case spec_to_signature(expr) do
{name, arity} ->
store_callbackdoc(line, file, module, kind, name, arity)
:error ->
:error
end
Module.store_typespec(module, kind, {kind, expr, pos})
end
@doc false
def defspec(kind, expr, _line, _file, module, pos) do
Module.store_typespec(module, kind, {kind, expr, pos})
end
defp store_callbackdoc(line, _file, module, kind, name, arity) do
table = :elixir_module.data_table(module)
{line, doc} = get_doc_info(table, :doc, line)
:ets.insert(table, {{:callbackdoc, {name, arity}}, line, kind, doc})
end
defp get_doc_info(table, attr, line) do
case :ets.take(table, attr) do
[{^attr, {line, doc}, _, _}] -> {line, doc}
[] -> {line, nil}
end
end
@doc false
def deftype(kind, expr, line, file, module, pos) do
case type_to_signature(expr) do
{name, arity} -> store_typedoc(line, file, module, kind, name, arity)
:error -> :error
end
Module.store_typespec(module, kind, {kind, expr, pos})
end
defp store_typedoc(line, file, module, kind, name, arity) do
table = :elixir_module.data_table(module)
{line, doc} = get_doc_info(table, :typedoc, line)
if kind == :typep && doc do
warning =
"type #{name}/#{arity} is private, @typedoc's are always discarded for private types"
:elixir_errors.warn(line, file, warning)
end
:ets.insert(table, {{:typedoc, {name, arity}}, line, kind, doc})
end
## Translation from Elixir AST to typespec AST
@doc false
def translate_type(kind, {:::, _, [{name, _, args}, definition]}, pos)
when is_atom(name) and name != ::: do
caller = :elixir_locals.get_cached_env(pos)
args =
if is_atom(args) do
[]
else
for(arg <- args, do: variable(arg))
end
vars = for {:var, _, var} <- args, do: var
spec = typespec(definition, vars, caller)
vars = for {:var, _, _} = var <- args, do: var
type = {name, spec, vars}
arity = length(vars)
{kind, export} =
case kind do
:type -> {:type, true}
:typep -> {:type, false}
:opaque -> {:opaque, true}
end
if builtin_type?(name, arity) do
compile_error(caller, "type #{name}/#{arity} is a builtin type and it cannot be redefined")
end
{kind, {name, arity}, caller.line, type, export}
end
def translate_type(_kind, other, pos) do
caller = :elixir_locals.get_cached_env(pos)
type_spec = Macro.to_string(other)
compile_error(caller, "invalid type specification: #{type_spec}")
end
defp builtin_type?(:as_boolean, 1), do: true
defp builtin_type?(:struct, 0), do: true
defp builtin_type?(:charlist, 0), do: true
# TODO: Remove char_list type by 2.0
defp builtin_type?(:char_list, 0), do: true
defp builtin_type?(:nonempty_charlist, 0), do: true
defp builtin_type?(:keyword, 0), do: true
defp builtin_type?(:keyword, 1), do: true
defp builtin_type?(name, arity), do: :erl_internal.is_type(name, arity)
@doc false
def translate_spec(kind, {:when, _meta, [spec, guard]}, pos) do
caller = :elixir_locals.get_cached_env(pos)
translate_spec(kind, spec, guard, caller)
end
def translate_spec(kind, spec, pos) do
caller = :elixir_locals.get_cached_env(pos)
translate_spec(kind, spec, [], caller)
end
defp translate_spec(kind, {:::, meta, [{name, _, args}, return]}, guard, caller)
when is_atom(name) and name != ::: do
translate_spec(kind, meta, name, args, return, guard, caller)
end
defp translate_spec(_kind, {name, _meta, _args} = spec, _guard, caller)
when is_atom(name) and name != ::: do
spec = Macro.to_string(spec)
compile_error(caller, "type specification missing return type: #{spec}")
end
defp translate_spec(_kind, spec, _guard, caller) do
spec = Macro.to_string(spec)
compile_error(caller, "invalid type specification: #{spec}")
end
defp translate_spec(kind, meta, name, args, return, guard, caller) when is_atom(args),
do: translate_spec(kind, meta, name, [], return, guard, caller)
defp translate_spec(kind, meta, name, args, return, guard, caller) do
ensure_no_defaults!(args)
unless Keyword.keyword?(guard) do
error = "expected keywords as guard in type specification, got: #{Macro.to_string(guard)}"
compile_error(caller, error)
end
vars = Keyword.keys(guard)
spec = {:type, line(meta), :fun, fn_args(meta, args, return, vars, caller)}
spec =
case guard_to_constraints(guard, vars, meta, caller) do
[] -> spec
constraints -> {:type, line(meta), :bounded_fun, [spec, constraints]}
end
arity = length(args)
{kind, {name, arity}, caller.line, spec}
end
defp ensure_no_defaults!(args) do
foreach_fun = fn
{:::, _, [left, right]} ->
ensure_not_default(left)
ensure_not_default(right)
left
other ->
ensure_not_default(other)
other
end
:lists.foreach(foreach_fun, args)
end
defp ensure_not_default({:\\, _, [_, _]}) do
raise ArgumentError, "default arguments \\\\ not supported in type spec"
end
defp ensure_not_default(_), do: :ok
defp guard_to_constraints(guard, vars, meta, caller) do
line = line(meta)
foldl_fun = fn
{_name, {:var, _, context}}, acc when is_atom(context) ->
acc
{name, type}, acc ->
constraint = [
{:atom, line, :is_subtype},
[{:var, line, name}, typespec(type, vars, caller)]
]
type = {:type, line, :constraint, constraint}
[type | acc]
end
:lists.reverse(:lists.foldl(foldl_fun, [], guard))
end
## To AST conversion
defp collect_vars({:ann_type, _line, args}) when is_list(args) do
[]
end
defp collect_vars({:type, _line, _kind, args}) when is_list(args) do
Enum.flat_map(args, &collect_vars/1)
end
defp collect_vars({:remote_type, _line, args}) when is_list(args) do
Enum.flat_map(args, &collect_vars/1)
end
defp collect_vars({:typed_record_field, _line, type}) do
collect_vars(type)
end
defp collect_vars({:paren_type, _line, [type]}) do
collect_vars(type)
end
defp collect_vars({:var, _line, var}) do
[erl_to_ex_var(var)]
end
defp collect_vars(_) do
[]
end
defp typespec_to_ast({:user_type, line, name, args}) do
typespec_to_ast({:type, line, name, args})
end
defp typespec_to_ast({:type, line, :tuple, :any}) do
{:tuple, [line: line], []}
end
defp typespec_to_ast({:type, line, :tuple, args}) do
args = for arg <- args, do: typespec_to_ast(arg)
{:{}, [line: line], args}
end
defp typespec_to_ast({:type, _line, :list, [{:type, _, :union, unions} = arg]}) do
case unpack_typespec_kw(unions, []) do
{:ok, ast} -> ast
:error -> [typespec_to_ast(arg)]
end
end
defp typespec_to_ast({:type, line, :list, []}) do
{:list, [line: line], []}
end
defp typespec_to_ast({:type, _line, :list, [arg]}) do
[typespec_to_ast(arg)]
end
defp typespec_to_ast({:type, line, :nonempty_list, []}) do
[{:..., [line: line], nil}]
end
defp typespec_to_ast({:type, line, :nonempty_list, [arg]}) do
[typespec_to_ast(arg), {:..., [line: line], nil}]
end
defp typespec_to_ast({:type, line, :map, :any}) do
{:map, [line: line], []}
end
defp typespec_to_ast({:type, line, :map, fields}) do
fields =
Enum.map(fields, fn
{:type, _, :map_field_assoc, :any} ->
{{:optional, [], [{:any, [], []}]}, {:any, [], []}}
{:type, _, :map_field_exact, [{:atom, _, k}, v]} ->
{k, typespec_to_ast(v)}
{:type, _, :map_field_exact, [k, v]} ->
{{:required, [], [typespec_to_ast(k)]}, typespec_to_ast(v)}
{:type, _, :map_field_assoc, [k, v]} ->
{{:optional, [], [typespec_to_ast(k)]}, typespec_to_ast(v)}
end)
{struct, fields} = Keyword.pop(fields, :__struct__)
map = {:%{}, [line: line], fields}
if struct do
{:%, [line: line], [struct, map]}
else
map
end
end
defp typespec_to_ast({:type, line, :binary, [arg1, arg2]}) do
[arg1, arg2] = for arg <- [arg1, arg2], do: typespec_to_ast(arg)
case {typespec_to_ast(arg1), typespec_to_ast(arg2)} do
{arg1, 0} ->
quote(line: line, do: <<_::unquote(arg1)>>)
{0, arg2} ->
quote(line: line, do: <<_::_*unquote(arg2)>>)
{arg1, arg2} ->
quote(line: line, do: <<_::unquote(arg1), _::_*unquote(arg2)>>)
end
end
defp typespec_to_ast({:type, line, :union, args}) do
args = for arg <- args, do: typespec_to_ast(arg)
Enum.reduce(Enum.reverse(args), fn arg, expr -> {:|, [line: line], [arg, expr]} end)
end
defp typespec_to_ast({:type, line, :fun, [{:type, _, :product, args}, result]}) do
args = for arg <- args, do: typespec_to_ast(arg)
[{:->, [line: line], [args, typespec_to_ast(result)]}]
end
defp typespec_to_ast({:type, line, :fun, [args, result]}) do
[{:->, [line: line], [[typespec_to_ast(args)], typespec_to_ast(result)]}]
end
defp typespec_to_ast({:type, line, :fun, []}) do
typespec_to_ast({:type, line, :fun, [{:type, line, :any}, {:type, line, :any, []}]})
end
defp typespec_to_ast({:type, line, :range, [left, right]}) do
{:.., [line: line], [typespec_to_ast(left), typespec_to_ast(right)]}
end
defp typespec_to_ast({:type, _line, nil, []}) do
[]
end
defp typespec_to_ast({:type, line, name, args}) do
args = for arg <- args, do: typespec_to_ast(arg)
{name, [line: line], args}
end
defp typespec_to_ast({:var, line, var}) do
{erl_to_ex_var(var), line, nil}
end
defp typespec_to_ast({:op, line, op, arg}) do
{op, [line: line], [typespec_to_ast(arg)]}
end
# Special shortcut(s)
# TODO: Remove char_list type by 2.0
defp typespec_to_ast({:remote_type, line, [{:atom, _, :elixir}, {:atom, _, type}, []]})
when type in [:charlist, :char_list] do
typespec_to_ast({:type, line, :charlist, []})
end
defp typespec_to_ast({
:remote_type,
line,
[{:atom, _, :elixir}, {:atom, _, :nonempty_charlist}, []]
}) do
typespec_to_ast({:type, line, :nonempty_charlist, []})
end
defp typespec_to_ast({:remote_type, line, [{:atom, _, :elixir}, {:atom, _, :struct}, []]}) do
typespec_to_ast({:type, line, :struct, []})
end
defp typespec_to_ast({:remote_type, line, [{:atom, _, :elixir}, {:atom, _, :as_boolean}, [arg]]}) do
typespec_to_ast({:type, line, :as_boolean, [arg]})
end
defp typespec_to_ast({:remote_type, line, [{:atom, _, :elixir}, {:atom, _, :keyword}, args]}) do
typespec_to_ast({:type, line, :keyword, args})
end
defp typespec_to_ast({:remote_type, line, [mod, name, args]}) do
args = for arg <- args, do: typespec_to_ast(arg)
dot = {:., [line: line], [typespec_to_ast(mod), typespec_to_ast(name)]}
{dot, [line: line], args}
end
defp typespec_to_ast({:ann_type, line, [var, type]}) do
{:::, [line: line], [typespec_to_ast(var), typespec_to_ast(type)]}
end
defp typespec_to_ast({:typed_record_field, {:record_field, line, {:atom, line1, name}}, type}) do
typespec_to_ast({:ann_type, line, [{:var, line1, name}, type]})
end
defp typespec_to_ast({:type, _, :any}) do
quote(do: ...)
end
defp typespec_to_ast({:paren_type, _, [type]}) do
typespec_to_ast(type)
end
defp typespec_to_ast({t, _line, atom}) when is_atom(t) do
atom
end
defp typespec_to_ast(other), do: other
defp erl_to_ex_var(var) do
case Atom.to_string(var) do
<<"_", c::binary-1, rest::binary>> ->
String.to_atom("_#{String.downcase(c)}#{rest}")
<<c::binary-1, rest::binary>> ->
String.to_atom("#{String.downcase(c)}#{rest}")
end
end
## To typespec conversion
defp line(meta) do
case :lists.keyfind(:line, 1, meta) do
{:line, line} -> line
false -> 0
end
end
# Handle unions
defp typespec({:|, meta, [_, _]} = exprs, vars, caller) do
exprs = collect_union(exprs)
union = for e <- exprs, do: typespec(e, vars, caller)
{:type, line(meta), :union, union}
end
# Handle binaries
defp typespec({:<<>>, meta, []}, _, _) do
{:type, line(meta), :binary, [{:integer, line(meta), 0}, {:integer, line(meta), 0}]}
end
defp typespec(
{:<<>>, meta, [{:::, unit_meta, [{:_, _, ctx1}, {:*, _, [{:_, _, ctx2}, unit]}]}]},
_,
_
)
when is_atom(ctx1) and is_atom(ctx2) and is_integer(unit) do
{:type, line(meta), :binary, [{:integer, line(meta), 0}, {:integer, line(unit_meta), unit}]}
end
defp typespec({:<<>>, meta, [{:::, size_meta, [{:_, _, ctx}, size]}]}, _, _)
when is_atom(ctx) and is_integer(size) do
{:type, line(meta), :binary, [{:integer, line(size_meta), size}, {:integer, line(meta), 0}]}
end
defp typespec(
{
:<<>>,
meta,
[
{:::, size_meta, [{:_, _, ctx1}, size]},
{:::, unit_meta, [{:_, _, ctx2}, {:*, _, [{:_, _, ctx3}, unit]}]}
]
},
_,
_
)
when is_atom(ctx1) and is_atom(ctx2) and is_atom(ctx3) and is_integer(size) and
is_integer(unit) do
args = [{:integer, line(size_meta), size}, {:integer, line(unit_meta), unit}]
{:type, line(meta), :binary, args}
end
## Handle maps and structs
defp typespec({:map, meta, args}, _vars, _caller) when args == [] or is_atom(args) do
{:type, line(meta), :map, :any}
end
defp typespec({:%{}, meta, fields} = map, vars, caller) do
map_fun = fn
{k, v} when is_atom(k) ->
args = [typespec(k, vars, caller), typespec(v, vars, caller)]
{:type, line(meta), :map_field_exact, args}
{{:required, meta2, [k]}, v} ->
args = [typespec(k, vars, caller), typespec(v, vars, caller)]
{:type, line(meta2), :map_field_exact, args}
{{:optional, meta2, [k]}, v} ->
args = [typespec(k, vars, caller), typespec(v, vars, caller)]
{:type, line(meta2), :map_field_assoc, args}
{k, v} ->
# TODO: Warn on Elixir v1.8 (since v1.6 is the first version to drop support for 18 and
# older)
# warning =
# "invalid map specification. %{foo => bar} is deprecated in favor of " <>
# "%{required(foo) => bar} and %{optional(foo) => bar}."
# :elixir_errors.warn(caller.line, caller.file, warning)
args = [typespec(k, vars, caller), typespec(v, vars, caller)]
{:type, line(meta), :map_field_assoc, args}
{:|, _, [_, _]} ->
error =
"invalid map specification. When using the | operator in the map key, " <>
"make sure to wrap the key type in parentheses: #{Macro.to_string(map)}"
compile_error(caller, error)
_ ->
compile_error(caller, "invalid map specification: #{Macro.to_string(map)}")
end
fields = :lists.map(map_fun, fields)
{:type, line(meta), :map, fields}
end
defp typespec({:%, _, [name, {:%{}, meta, fields}]}, vars, caller) do
# We cannot set a function name to avoid tracking
# as a compile time dependency, because for structs it actually is one.
module = Macro.expand(name, caller)
struct =
if module == caller.module do
Module.get_attribute(module, :struct) ||
compile_error(caller, "struct is not defined for #{Macro.to_string(name)}")
else
module.__struct__
end
struct = struct |> Map.from_struct() |> Map.to_list()
unless Keyword.keyword?(fields) do
compile_error(caller, "expected key-value pairs in struct #{Macro.to_string(name)}")
end
map_fun = fn {field, _} -> {field, Keyword.get(fields, field, quote(do: term()))} end
types = :lists.map(map_fun, struct)
foreach_fun = fn {field, _} ->
unless Keyword.has_key?(struct, field) do
compile_error(caller, "undefined field #{field} on struct #{Macro.to_string(name)}")
end
end
:lists.foreach(foreach_fun, fields)
typespec({:%{}, meta, [__struct__: module] ++ types}, vars, caller)
end
# Handle records
defp typespec({:record, meta, [atom]}, vars, caller) do
typespec({:record, meta, [atom, []]}, vars, caller)
end
defp typespec({:record, meta, [atom, fields]}, vars, caller) do
# We cannot set a function name to avoid tracking
# as a compile time dependency because for records it actually is one.
case Macro.expand({atom, [], [{atom, [], []}]}, caller) do
keyword when is_list(keyword) ->
map_fun = fn {field, _} -> Keyword.get(fields, field, quote(do: term())) end
types = :lists.map(map_fun, keyword)
foreach_fun = fn {field, _} ->
unless Keyword.has_key?(keyword, field) do
compile_error(caller, "undefined field #{field} on record #{inspect(atom)}")
end
end
:lists.foreach(foreach_fun, fields)
typespec({:{}, meta, [atom | types]}, vars, caller)
_ ->
compile_error(caller, "unknown record #{inspect(atom)}")
end
end
# Handle ranges
defp typespec({:.., meta, args}, vars, caller) do
args = for arg <- args, do: typespec(arg, vars, caller)
{:type, line(meta), :range, args}
end
# Handle special forms
defp typespec({:__MODULE__, _, atom}, vars, caller) when is_atom(atom) do
typespec(caller.module, vars, caller)
end
defp typespec({:__aliases__, _, _} = alias, vars, caller) do
# We set a function name to avoid tracking
# aliases in typespecs as compile time dependencies.
atom = Macro.expand(alias, %{caller | function: {:typespec, 0}})
typespec(atom, vars, caller)
end
# Handle funs
defp typespec([{:->, meta, [arguments, return]}], vars, caller) when is_list(arguments) do
args = fn_args(meta, arguments, return, vars, caller)
{:type, line(meta), :fun, args}
end
# Handle type operator
defp typespec({:::, meta, [var, expr]}, vars, caller) do
left = typespec(var, [elem(var, 0) | vars], caller)
right = typespec(expr, vars, caller)
{:ann_type, line(meta), [left, right]}
end
# Handle unary ops
defp typespec({op, meta, [integer]}, _, _) when op in [:+, :-] and is_integer(integer) do
{:op, line(meta), op, {:integer, line(meta), integer}}
end
# Handle remote calls in the form of @module_attribute.type.
# These are not handled by the general remote type clause as calling
# Macro.expand/2 on the remote does not expand module attributes (but expands
# things like __MODULE__).
defp typespec({{:., meta, [{:@, _, [{attr, _, _}]}, name]}, _, args} = orig, vars, caller) do
remote = Module.get_attribute(caller.module, attr)
unless is_atom(remote) and remote != nil do
message =
"invalid remote in typespec: #{Macro.to_string(orig)} (@#{attr} is #{inspect(remote)})"
compile_error(caller, message)
end
type = {typespec(remote, vars, caller), meta, typespec(name, vars, caller), args}
remote_type(type, vars, caller)
end
# Handle remote calls
defp typespec({{:., meta, [remote, name]}, _, args} = orig, vars, caller) do
# We set a function name to avoid tracking
# aliases in typespecs as compile time dependencies.
remote = Macro.expand(remote, %{caller | function: {:typespec, 0}})
unless is_atom(remote) do
compile_error(caller, "invalid remote in typespec: #{Macro.to_string(orig)}")
end
type = {typespec(remote, vars, caller), meta, typespec(name, vars, caller), args}
remote_type(type, vars, caller)
end
# Handle tuples
defp typespec({:tuple, meta, []}, _vars, _caller) do
{:type, line(meta), :tuple, :any}
end
defp typespec({:{}, meta, t}, vars, caller) when is_list(t) do
args = for e <- t, do: typespec(e, vars, caller)
{:type, line(meta), :tuple, args}
end
defp typespec({left, right}, vars, caller) do
typespec({:{}, [], [left, right]}, vars, caller)
end
# Handle blocks
defp typespec({:__block__, _meta, [arg]}, vars, caller) do
typespec(arg, vars, caller)
end
# Handle variables or local calls
defp typespec({name, meta, atom}, vars, caller) when is_atom(atom) do
if :lists.member(name, vars) do
{:var, line(meta), name}
else
typespec({name, meta, []}, vars, caller)
end
end
# Handle local calls
defp typespec({:string, meta, arguments}, vars, caller) do
warning =
"string() type use is discouraged. " <>
"For character lists, use charlist() type, for strings, String.t()\n" <>
Exception.format_stacktrace(Macro.Env.stacktrace(caller))
:elixir_errors.warn(caller.line, caller.file, warning)
arguments = for arg <- arguments, do: typespec(arg, vars, caller)
{:type, line(meta), :string, arguments}
end
defp typespec({:nonempty_string, meta, arguments}, vars, caller) do
warning =
"nonempty_string() type use is discouraged. " <>
"For non-empty character lists, use nonempty_charlist() type, for strings, String.t()\n" <>
Exception.format_stacktrace(Macro.Env.stacktrace(caller))
:elixir_errors.warn(caller.line, caller.file, warning)
arguments = for arg <- arguments, do: typespec(arg, vars, caller)
{:type, line(meta), :nonempty_string, arguments}
end
# TODO: Remove char_list type by 2.0
defp typespec({type, _meta, []}, vars, caller) when type in [:charlist, :char_list] do
if type == :char_list do
warning = "the char_list() type is deprecated, use charlist()"
:elixir_errors.warn(caller.line, caller.file, warning)
end
typespec(quote(do: :elixir.charlist()), vars, caller)
end
defp typespec({:nonempty_charlist, _meta, []}, vars, caller) do
typespec(quote(do: :elixir.nonempty_charlist()), vars, caller)
end
defp typespec({:struct, _meta, []}, vars, caller) do
typespec(quote(do: :elixir.struct()), vars, caller)
end
defp typespec({:as_boolean, _meta, [arg]}, vars, caller) do
typespec(quote(do: :elixir.as_boolean(unquote(arg))), vars, caller)
end
defp typespec({:keyword, _meta, args}, vars, caller) when length(args) <= 1 do
typespec(quote(do: :elixir.keyword(unquote_splicing(args))), vars, caller)
end
defp typespec({:fun, meta, args}, vars, caller) do
args = for arg <- args, do: typespec(arg, vars, caller)
{:type, line(meta), :fun, args}
end
defp typespec({name, meta, arguments}, vars, caller) do
arguments = for arg <- arguments, do: typespec(arg, vars, caller)
arity = length(arguments)
type = if :erl_internal.is_type(name, arity), do: :type, else: :user_type
{type, line(meta), name, arguments}
end
# Handle literals
defp typespec(atom, _, _) when is_atom(atom) do
{:atom, 0, atom}
end
defp typespec(integer, _, _) when is_integer(integer) do
{:integer, 0, integer}
end
defp typespec([], vars, caller) do
typespec({nil, [], []}, vars, caller)
end
defp typespec([{:..., _, atom}], vars, caller) when is_atom(atom) do
typespec({:nonempty_list, [], []}, vars, caller)
end
defp typespec([spec, {:..., _, atom}], vars, caller) when is_atom(atom) do
typespec({:nonempty_list, [], [spec]}, vars, caller)
end
defp typespec([spec], vars, caller) do
typespec({:list, [], [spec]}, vars, caller)
end
defp typespec(list, vars, caller) when is_list(list) do
[h | t] = :lists.reverse(list)
foldl_fun = fn x, acc -> {:|, [], [validate_kw(x, list, caller), acc]} end
union = :lists.foldl(foldl_fun, validate_kw(h, list, caller), t)
typespec({:list, [], [union]}, vars, caller)
end
defp typespec(other, _vars, caller) do
compile_error(caller, "unexpected expression in typespec: #{Macro.to_string(other)}")
end
## Helpers
defp compile_error(caller, desc) do
raise CompileError, file: caller.file, line: caller.line, description: desc
end
defp remote_type({remote, meta, name, arguments}, vars, caller) do
arguments = for arg <- arguments, do: typespec(arg, vars, caller)
{:remote_type, line(meta), [remote, name, arguments]}
end
defp collect_union({:|, _, [a, b]}), do: [a | collect_union(b)]
defp collect_union(v), do: [v]
defp validate_kw({key, _} = t, _, _caller) when is_atom(key), do: t
defp validate_kw(_, original, caller) do
compile_error(caller, "unexpected list in typespec: #{Macro.to_string(original)}")
end
defp fn_args(meta, args, return, vars, caller) do
case [fn_args(meta, args, vars, caller), typespec(return, vars, caller)] do
[{:type, _, :any}, {:type, _, :any, []}] -> []
x -> x
end
end
defp fn_args(meta, [{:..., _, _}], _vars, _caller) do
{:type, line(meta), :any}
end
defp fn_args(meta, args, vars, caller) do
args = for arg <- args, do: typespec(arg, vars, caller)
{:type, line(meta), :product, args}
end
defp variable({name, meta, _}) do
{:var, line(meta), name}
end
defp unpack_typespec_kw([{:type, _, :tuple, [{:atom, _, atom}, type]} | t], acc) do
unpack_typespec_kw(t, [{atom, typespec_to_ast(type)} | acc])
end
defp unpack_typespec_kw([], acc) do
{:ok, :lists.reverse(acc)}
end
defp unpack_typespec_kw(_, _acc) do
:error
end
end
|
lib/elixir/lib/kernel/typespec.ex
| 0.890109 | 0.501953 |
typespec.ex
|
starcoder
|
defmodule Phoenix.LiveView.Controller do
@moduledoc """
The Controller for LiveView rendering.
"""
@behaviour Plug
alias Phoenix.LiveView
@doc """
Renders a live view from a Plug request and sends an HTML response.
## Options
* `:session` - the map of session data to sign and send
to the client. When connecting from the client, the live view
will receive the signed session from the client and verify
the contents before proceeding with `mount/2`.
Before render the `@live_view_module` assign will be added to the
connection assigns for reference.
## Examples
defmodule ThermostatController do
...
import Phoenix.LiveView.Controller
def show(conn, %{"id" => thermostat_id}) do
live_render(conn, ThermostatLive, session: %{
thermostat_id: id,
current_user_id: get_session(conn, :user_id),
})
end
end
"""
def live_render(%Plug.Conn{} = conn, view, opts) do
endpoint = Phoenix.Controller.endpoint_module(conn)
case LiveView.View.static_render(endpoint, view, opts) do
{:ok, content} ->
conn
|> Plug.Conn.assign(:live_view_module, view)
|> Phoenix.Controller.put_view(__MODULE__)
|> Phoenix.Controller.render("template.html", %{
conn: conn,
content: content
})
{:stop, {:redirect, opts}} ->
Phoenix.Controller.redirect(conn, to: Map.fetch!(opts, :to))
end
end
@doc false
@impl Plug
def init(opts), do: opts
@doc false
@impl Plug
def call(%Plug.Conn{private: %{phoenix_live_view: phx_opts}} = conn, view) do
session_opts = phx_opts[:session] || [:path_params]
opts = Keyword.merge(phx_opts, session: session(conn, session_opts))
live_render(conn, view, opts)
end
defp session(conn, session_opts) do
Enum.reduce(session_opts, %{}, fn
:path_params, acc -> Map.put(acc, :path_params, conn.path_params)
key, acc -> Map.put(acc, key, Plug.Conn.get_session(conn, key))
end)
end
@doc false
# acts as a view via put_view to maintain the
# controller render + instrumentation stack
def render("template.html", %{content: content}) do
content
end
def render(_other, _assigns), do: nil
end
|
lib/phoenix_live_view/controller.ex
| 0.845592 | 0.515376 |
controller.ex
|
starcoder
|
defmodule FarmbotOS.SysCalls.PinControl do
@moduledoc false
alias FarmbotOS.{Asset, Leds}
alias FarmbotOS.Firmware.Command
alias FarmbotOS.Asset.{
BoxLed,
Peripheral,
Sensor
}
require FarmbotOS.Logger
def read_cached_pin(%_{pin: number}) do
read_cached_pin(number)
end
def read_cached_pin(pin_number) do
FarmbotOS.BotState.fetch().pins()[pin_number][:value]
end
def toggle_pin(pin_number) when is_number(pin_number) do
peripheral = Asset.get_peripheral_by_pin(pin_number)
with {:ok, _} <- Command.set_pin_io_mode(pin_number, :output) do
case Command.read_pin(pin_number, :digital) do
{:ok, 1} -> do_toggle_pin(peripheral || pin_number, 0)
{:ok, 0} -> do_toggle_pin(peripheral || pin_number, 1)
reason -> FarmbotOS.SysCalls.give_firmware_reason("toggle_pin", reason)
end
else
{:error, reason} ->
FarmbotOS.SysCalls.give_firmware_reason("toggle_pin", reason)
end
end
def toggle_pin(pin_number) do
{:error, "Unknown pin data: #{inspect(pin_number)}"}
end
def set_servo_angle(pin, angle) do
case Command.move_servo(pin, angle) do
{:ok, _} ->
:ok
{:error, reason} ->
FarmbotOS.SysCalls.give_firmware_reason("set_servo_angle", reason)
end
end
defp do_toggle_pin(%Peripheral{pin: pin_number} = data, value) do
with {:ok, _} <- Command.write_pin(pin_number, value, :digital),
value when is_number(value) <- do_read_pin(data, 0) do
:ok
else
reason ->
FarmbotOS.SysCalls.give_firmware_reason(
"do_toggle_pin:Peripheral",
reason
)
end
end
defp do_toggle_pin(pin_number, value) do
result = Command.write_pin(pin_number, value, 0)
with {:ok, _} <- result,
value when is_number(value) <- do_read_pin(pin_number, 0) do
:ok
else
{:error, reason} ->
FarmbotOS.SysCalls.give_firmware_reason("do_toggle_pin:int", reason)
end
end
def read_pin(%Peripheral{pin: _} = data, mode) do
do_read_pin(data, mode)
end
def read_pin(%Sensor{pin: pin} = data, mode) do
case do_read_pin(data, mode) do
{:error, _} = error ->
error
value ->
position = FarmbotOS.SysCalls.get_position()
params = %{
pin: pin,
mode: mode,
value: value,
x: position[:x],
y: position[:y],
z: position[:z]
}
_ = Asset.new_sensor_reading!(params)
value
end
end
def read_pin(%BoxLed{}, _mode) do
# {:error, "cannot read values of BoxLed"}
1
end
def read_pin(pin_number, mode) when is_number(pin_number) do
sensor = Asset.get_sensor_by_pin(pin_number)
peripheral = Asset.get_peripheral_by_pin(pin_number)
cond do
is_map(sensor) ->
read_pin(sensor, mode)
is_map(peripheral) ->
read_pin(peripheral, mode)
true ->
do_read_pin(pin_number, mode)
end
end
# digital peripheral
defp do_read_pin(%Peripheral{pin: pin_number, label: label}, 0)
when is_number(pin_number) do
case Command.read_pin(pin_number, 0) do
{:ok, 1} ->
FarmbotOS.Logger.info(
2,
"The #{label} peripheral value is ON (digital)"
)
1
{:ok, 0} ->
FarmbotOS.Logger.info(
2,
"The #{label} peripheral value is OFF (digital)"
)
0
{:ok, value} ->
FarmbotOS.Logger.info(
2,
"The #{label} peripheral value is #{value} (analog)"
)
value
reason ->
FarmbotOS.SysCalls.give_firmware_reason("do_read_pin", reason)
end
end
# analog peripheral
defp do_read_pin(%Peripheral{pin: pin_number, label: label}, 1)
when is_number(pin_number) do
case Command.read_pin(pin_number, 1) do
{:ok, value} ->
msg = "The #{label} peripheral value is #{value} (analog)"
FarmbotOS.Logger.info(2, msg)
value
{:error, reason} ->
place = "do_read_pin:Peripheral"
FarmbotOS.SysCalls.give_firmware_reason(place, reason)
end
end
# digital sensor
defp do_read_pin(%Sensor{pin: pin_number, label: label}, 0)
when is_number(pin_number) do
case Command.read_pin(pin_number, 0) do
{:ok, 1} ->
FarmbotOS.Logger.info(2, "The #{label} sensor value is 1 (digital)")
1
{:ok, 0} ->
FarmbotOS.Logger.info(2, "The #{label} sensor value is 0 (digital)")
0
{:ok, value} ->
msg = "The #{label} sensor value is #{value} (analog)"
FarmbotOS.Logger.info(2, msg)
{:error, reason} ->
FarmbotOS.SysCalls.give_firmware_reason("do_read_pin(%Sensor)", reason)
end
end
# analog sensor
defp do_read_pin(%Sensor{pin: pin_number, label: label}, 1)
when is_number(pin_number) do
case Command.read_pin(pin_number, 1) do
{:ok, value} ->
msg = "The #{label} sensor value is #{value} (analog)"
FarmbotOS.Logger.info(2, msg)
value
{:error, reason} ->
FarmbotOS.SysCalls.give_firmware_reason("do_read_pin(%Sensor)", reason)
end
end
# Catches unsupplied `mode`
defp do_read_pin(%type{mode: mode} = peripheral, nil)
when type in [Peripheral, Sensor] do
do_read_pin(peripheral, mode)
end
# Generic pin digital
defp do_read_pin(pin_number, 0) when is_number(pin_number) do
case Command.read_pin(pin_number, 0) do
{:ok, 0} ->
FarmbotOS.Logger.info(2, "Pin #{pin_number} value is OFF (digital)")
0
{:ok, 1} ->
FarmbotOS.Logger.info(2, "Pin #{pin_number} value is ON (digital)")
1
{:ok, value} ->
FarmbotOS.Logger.info(2, "Pin #{pin_number} is #{value} (analog)")
value
{:error, reason} ->
FarmbotOS.SysCalls.give_firmware_reason(
"do_read_pin(pin_number, 0)",
reason
)
end
end
# Generic pin digital
defp do_read_pin(pin_number, 1) when is_number(pin_number) do
case Command.read_pin(pin_number, 1) do
{:ok, value} ->
FarmbotOS.Logger.info(2, "Pin #{pin_number} is #{value} (analog)")
value
{:error, reason} ->
FarmbotOS.SysCalls.give_firmware_reason(
"do_read_pin(pin_number, 1)",
reason
)
end
end
# Peripheral digital
def write_pin(%Peripheral{pin: pin, label: _label}, 0, 1) do
do_write_pin(pin, 0, 1)
end
def write_pin(%Peripheral{pin: pin, label: _label}, 0, 0) do
do_write_pin(pin, 0, 0)
end
# Peripheral analog
def write_pin(%Peripheral{pin: pin, label: _label}, 1, value) do
do_write_pin(pin, 1, value)
end
def write_pin(%Sensor{pin: _pin}, _mode, _value) do
{:error, "cannot write Sensor value. Use a Peripheral"}
end
def write_pin(%BoxLed{id: 3}, 0, 1) do
FarmbotOS.Logger.info(2, "Turning Boxled3 ON")
Leds.white4(:solid)
:ok
end
def write_pin(%BoxLed{id: 3}, 0, 0) do
FarmbotOS.Logger.info(2, "Turning Boxled3 OFF")
Leds.white4(:off)
:ok
end
def write_pin(%BoxLed{id: 4}, 0, 1) do
FarmbotOS.Logger.info(2, "Turning Boxled4 ON")
Leds.white5(:solid)
:ok
end
def write_pin(%BoxLed{id: 4}, 0, 0) do
FarmbotOS.Logger.info(2, "Turning Boxled4 OFF")
Leds.white5(:off)
:ok
end
def write_pin(%BoxLed{id: id}, _mode, _) do
{:error, "cannot write Boxled#{id} in analog mode"}
end
# Generic pin digital
def write_pin(pin, 0, 1) do
do_write_pin(pin, 0, 1)
end
def write_pin(pin, 0, 0) do
do_write_pin(pin, 0, 0)
end
def write_pin(pin, 1, value) do
do_write_pin(pin, 1, value)
end
def do_write_pin(pin_number, mode, value) do
case Command.write_pin(pin_number, value, mode) do
{:ok, _} ->
FarmbotOS.BotState.set_pin_value(pin_number, value / 1, mode)
:ok
{:error, reason} ->
FarmbotOS.SysCalls.give_firmware_reason("do_write_pin/3", reason)
end
end
end
|
lib/os/sys_calls/pin_control.ex
| 0.560854 | 0.400456 |
pin_control.ex
|
starcoder
|
defmodule ExPixBRCode.JWS.Models.JWKS do
@moduledoc """
A JWKS result following RFC https://tools.ietf.org/html/rfc7517
"""
use ExPixBRCode.ValueObject
@key_required [:kty, :kid, :x5t, :x5c, :key_ops]
@key_optional [:use, :alg, :"x5t#S256", :x5u, :n, :e, :crv, :x, :y]
@supported_algs JOSE.JWA.supports()
|> Keyword.get(:jws)
|> elem(1)
|> Enum.reject(&(String.starts_with?(&1, "HS") or &1 == "none"))
embedded_schema do
embeds_many :keys, Key do
field :kty, :string
field :use, :string
field :key_ops, {:array, :string}
field :alg, :string
field :kid, :string
field :x5u, :string
field :x5t, :string
field :"x5t#S256", :string
field :x5c, {:array, :string}
# RSA fields
field :n, :string
field :e, :string
# EC fields
field :crv, :string
field :x, :string
field :y, :string
end
end
@doc false
def changeset(model \\ %__MODULE__{}, params) do
model
|> cast(params, [])
|> cast_embed(:keys, with: &key_changeset/2, required: true)
end
defp key_changeset(model, params) do
model
|> cast(params, @key_required ++ @key_optional)
|> validate_required(@key_required)
|> validate_inclusion(:alg, @supported_algs)
|> validate_inclusion(:kty, ["EC", "RSA"])
|> validate_subset(:key_ops, ["verify"])
|> validate_length(:x5c, min: 1)
|> validate_per_kty()
end
defp validate_per_kty(%{valid?: false} = c), do: c
defp validate_per_kty(changeset) do
case get_field(changeset, :kty) do
"EC" ->
crv = get_field(changeset, :crv)
x = get_field(changeset, :x)
y = get_field(changeset, :y)
validate_curve_key(changeset, crv, x, y)
"RSA" ->
n = get_field(changeset, :n)
e = get_field(changeset, :e)
validate_rsa_key(changeset, n, e)
end
end
defp validate_curve_key(changeset, crv, x, y)
when is_nil(crv) or is_nil(x) or is_nil(y),
do: add_error(changeset, :kty, "Missing EC params `crv`, `x` or `y`")
defp validate_curve_key(changeset, _, _, _), do: changeset
defp validate_rsa_key(changeset, n, e)
when is_nil(n) or is_nil(e),
do: add_error(changeset, :kty, "Missing RSA params `e` or `n`")
defp validate_rsa_key(changeset, _, _), do: changeset
end
|
lib/ex_pix_brcode/jws/models/jwks.ex
| 0.815159 | 0.436982 |
jwks.ex
|
starcoder
|
defmodule RoutePatterns.RoutePattern do
@moduledoc """
Route patterns are used to describe the subsets of a route, representing different
possible patterns of where trips may serve. For example, a bus route may have multiple
branches, and each branch may be modeled as a separate route pattern per direction.
Hierarchically, the route pattern level may be considered to be larger than the trip
level and smaller than the route level.
For most MBTA modes, a route pattern will typically represent a unique set of stops
that may be served on a route-trip combination. Seasonal schedule changes may result
in trips within a route pattern having different routings. In simple changes, such a
single bus stop removed or added between one schedule rating and the next (for example,
between the Summer and Fall schedules), trips will be maintained on the same
route_pattern_id. If the changes are significant, a new route_pattern_id may be introduced.
For Commuter Rail, express or skip-stop trips use the same route pattern as local trips.
Some branches do have multiple route patterns when the train takes a different path.
For example, CR-Providence has two route patterns per direction, one for the Wickford
Junction branch and the other for the Stoughton branch.
"""
alias JsonApi.Item
alias Routes.Route
alias Schedules.Trip
alias Stops.Stop
defstruct [
:direction_id,
:id,
:name,
:representative_trip_id,
:representative_trip_polyline,
:shape_id,
:shape_priority,
:headsign,
:stop_ids,
:route_id,
:time_desc,
:typicality
]
@type id_t :: String.t()
@type typicality_t :: 0 | 1 | 2 | 3 | 4
@type t :: %__MODULE__{
direction_id: 0 | 1,
id: id_t(),
name: String.t(),
representative_trip_id: Trip.id_t(),
representative_trip_polyline: String.t(),
shape_id: String.t(),
shape_priority: number,
headsign: String.t(),
stop_ids: [Stop.id_t()],
route_id: Route.id_t(),
time_desc: String.t(),
typicality: typicality_t()
}
def new(%Item{
id: id,
attributes: %{
"direction_id" => direction_id,
"name" => name,
"time_desc" => time_desc,
"typicality" => typicality
},
relationships: %{
"representative_trip" => [
%Item{
attributes: %{
"headsign" => headsign
},
id: representative_trip_id,
relationships: %{
"shape" => [
%Item{
attributes: %{
"polyline" => representative_trip_polyline,
"priority" => shape_priority
},
id: shape_id,
relationships: %{
"stops" => stops
}
}
]
}
}
],
"route" => [%Item{id: route_id}]
}
}) do
%__MODULE__{
direction_id: direction_id,
id: id,
name: name,
representative_trip_id: representative_trip_id,
representative_trip_polyline: representative_trip_polyline,
shape_id: shape_id,
shape_priority: shape_priority,
headsign: headsign,
stop_ids: Enum.map(stops, fn %JsonApi.Item{id: id} -> id end),
route_id: route_id,
time_desc: time_desc,
typicality: typicality
}
end
def new(%Item{
id: id,
attributes: %{
"direction_id" => direction_id,
"name" => name,
"time_desc" => time_desc,
"typicality" => typicality
},
relationships: %{
"representative_trip" => [%Item{id: representative_trip_id}],
"route" => [%Item{id: route_id}]
}
}) do
%__MODULE__{
direction_id: direction_id,
id: id,
name: name,
representative_trip_id: representative_trip_id,
route_id: route_id,
time_desc: time_desc,
typicality: typicality
}
end
end
|
apps/route_patterns/lib/route_pattern.ex
| 0.817101 | 0.630329 |
route_pattern.ex
|
starcoder
|
defmodule RoboticaCommon.Strings do
@moduledoc """
Provides String parsing and evaluation functions.
"""
@doc """
Substitute {xyz} values from a dictionary in a string.
iex> import RoboticaCommon.Strings
iex> replace_values("{i}-{j}", %{"i" => "hello", "j" => "world"})
{:ok, "hello-world"}
iex> import RoboticaCommon.Strings
iex> replace_values("!{i}-{j}$", %{"i" => "hello", "j" => "world"})
{:ok, "!hello-world$"}
iex> import RoboticaCommon.Strings
iex> replace_values("!{i-{j}$", %{"i" => "hello", "j" => "world"})
{:error, "Missing closing bracket on 'i-'."}
iex> import RoboticaCommon.Strings
iex> replace_values("!{i}-}-{j}$", %{"i" => "hello", "j" => "world"})
{:ok, "!hello-}-world$"}
iex> import RoboticaCommon.Strings
iex> replace_values("goodbye", %{"i" => "hello", "j" => "world"})
{:ok, "goodbye"}
iex> import RoboticaCommon.Strings
iex> replace_values("{x}-{y}", %{"i" => "hello", "j" => "world"})
{:error, "Cannot find x in lookup table of i=hello, j=world."}
"""
@spec replace_values(String.t(), %{required(String.t()) => String.t()}) ::
{:ok, String.t()} | {:error, String.t()}
def replace_values(string, values) do
case String.split(string, "{") do
[value] -> {:ok, value}
[str | list] -> replace_values_internal(list, values, [str])
end
end
defp replace_values_internal([], _values, result) do
result =
result
|> Enum.reverse()
|> Enum.join("")
{:ok, result}
end
defp replace_values_internal([str | rest], values, result) do
case String.split(str, "}", parts: 2) do
[value] ->
{:error, "Missing closing bracket on '#{value}'."}
[name, str] ->
case Map.fetch(values, name) do
{:ok, value} ->
result = [value | result]
result = [str | result]
replace_values_internal(rest, values, result)
:error ->
map =
values
# credo:disable-for-next-line Credo.Check.Refactor.Nesting
|> Enum.map(fn {a, b} -> "#{a}=#{b}" end)
|> Enum.join(", ")
{:error, "Cannot find #{name} in lookup table of #{map}."}
end
end
end
@doc """
Solve (simple) maths in a string.
iex> import RoboticaCommon.Strings
iex> solve_string("1")
{:ok, 1}
iex> import RoboticaCommon.Strings
iex> solve_string("1+2")
{:ok, 3}
iex> import RoboticaCommon.Strings
iex> solve_string("1+-2")
{:ok, -1}
iex> import RoboticaCommon.Strings
iex> solve_string("-1+2")
{:ok, 1}
iex> import RoboticaCommon.Strings
iex> solve_string("2*3")
{:ok, 6}
iex> import RoboticaCommon.Strings
iex> solve_string("2*1.5")
{:ok, 3}
iex> import RoboticaCommon.Strings
iex> solve_string("1+2*3")
{:ok, 7}
iex> solve_string("2*3+1")
{:ok, 7}
iex> import RoboticaCommon.Strings
iex> solve_string("2*3+1*2*1")
{:ok, 8}
iex> import RoboticaCommon.Strings
iex> solve_string("1+")
{:error, "Cannot parse '' as float."}
iex> import RoboticaCommon.Strings
iex> solve_string("1*")
{:error, "Cannot parse '' as float."}
iex> import RoboticaCommon.Strings
iex> solve_string("n")
{:error, "Cannot parse 'n' as float."}
iex> import RoboticaCommon.Strings
iex> solve_string("10n")
{:error, "Cannot parse '10n' as float."}
"""
@spec solve_string(String.t()) :: {:ok, integer()} | {:error, String.t()}
def solve_string(string) do
sum_parts = String.split(string, "+")
solve_sums(sum_parts, 0)
end
defp solve_sums([], result) when is_integer(result), do: {:ok, result}
defp solve_sums([], result) when is_float(result), do: {:ok, Float.round(result) |> trunc}
defp solve_sums([head | tail], result) do
this_result =
head
|> String.split("*")
|> solve_multiplications(1)
case this_result do
{:ok, value} -> solve_sums(tail, result + value)
{:error, error} -> {:error, error}
end
end
defp parse_float(value) do
case Float.parse(value) do
{value, ""} ->
{:ok, value}
{_, _} ->
{:error, "Cannot parse '#{value}' as float."}
:error ->
{:error, "Cannot parse '#{value}' as float."}
end
end
defp parse_integer(value) do
case Integer.parse(value) do
{value, ""} ->
{:ok, value}
{_, _} ->
{:error, "Cannot parse '#{value}' as integer."}
:error ->
{:error, "Cannot parse '#{value}' as integer."}
end
end
defp parse_integer_or_float(value) do
case parse_integer(value) do
{:ok, value} -> {:ok, value}
{:error, _} -> parse_float(value)
end
end
defp solve_multiplications([], result), do: {:ok, result}
defp solve_multiplications([head | tail], result) do
case parse_integer_or_float(head) do
{:ok, value} ->
solve_multiplications(tail, result * value)
{:error, error} ->
{:error, error}
end
end
@doc """
Substitute values in string and solve simple maths.
iex> import RoboticaCommon.Strings
iex> eval_string(10, %{"i" => "10", "j" => "20"})
{:ok, 10}
iex> import RoboticaCommon.Strings
iex> eval_string("{i}+{j}", %{"i" => "10", "j" => "20"})
{:ok, 30}
iex> import RoboticaCommon.Strings
iex> eval_string("{x}+{y}", %{"i" => "10", "j" => "20"})
{:error, "Cannot find x in lookup table of i=10, j=20."}
iex> import RoboticaCommon.Strings
iex> eval_string("{i}+{j}", %{"i" => "10a", "j" => "20b"})
{:error, "Cannot parse '10a' as float."}
"""
@spec eval_string(String.t() | integer(), %{required(String.t()) => String.t()}) ::
{:ok, integer()} | {:error, String.t()}
def eval_string(string, values) do
cond do
is_integer(string) ->
{:ok, string}
is_float(string) ->
{:ok, round(string)}
true ->
with {:ok, string} <- replace_values(string, values),
{:ok, result} <- solve_string(string) do
{:ok, result}
else
{:error, error} -> {:error, error}
end
end
end
@doc """
Substitute values in string. Solve simple maths if prefixed with =.
iex> import RoboticaCommon.Strings
iex> solve_string("{i}+{j}", %{"i" => "10", "j" => "20"})
{:ok, "10+20"}
iex> import RoboticaCommon.Strings
iex> solve_string("={i}+{j}", %{"i" => "10", "j" => "20"})
{:ok, "30"}
iex> import RoboticaCommon.Strings
iex> solve_string("={x}+{y}", %{"i" => "10", "j" => "20"})
{:error, "Cannot find x in lookup table of i=10, j=20."}
iex> import RoboticaCommon.Strings
iex> solve_string("={i}+{j}", %{"i" => "10a", "j" => "20"})
{:error, "Cannot parse '10a' as float."}
"""
@spec solve_string(String.t(), %{required(String.t()) => String.t()}) ::
{:ok, String.t()} | {:error, String.t()}
def solve_string(string, values) do
case replace_values(string, values) do
{:ok, "=" <> remain} ->
case solve_string(remain) do
{:ok, result} -> {:ok, Integer.to_string(result)}
{:error, error} -> {:error, error}
end
{:ok, string} ->
{:ok, string}
{:error, error} ->
{:error, error}
end
end
defp solve_string_combined_list([], _, result) do
{:ok, result |> Enum.reverse() |> Enum.join("")}
end
defp solve_string_combined_list([head | tail], values, result) do
case solve_string(head, values) do
{:ok, value} -> solve_string_combined_list(tail, values, [value | result])
{:error, error} -> {:error, error}
end
end
@doc """
Substitute values in string with ++ to separate items.
iex> import RoboticaCommon.Strings
iex> solve_string_combined("{i}+{j}++1+2", %{"i" => "10", "j" => "20"})
{:ok, "10+201+2"}
iex> import RoboticaCommon.Strings
iex> solve_string_combined("={i}+{j}++1+2", %{"i" => "10", "j" => "20"})
{:ok, "301+2"}
iex> import RoboticaCommon.Strings
iex> solve_string_combined("={x}+{y}++1+2", %{"i" => "10", "j" => "20"})
{:error, "Cannot find x in lookup table of i=10, j=20."}
iex> import RoboticaCommon.Strings
iex> solve_string_combined("={i}+{j}++1+2", %{"i" => "10a", "j" => "20"})
{:error, "Cannot parse '10a' as float."}
"""
@spec solve_string_combined(String.t(), %{required(String.t()) => String.t()}) ::
{:ok, String.t()} | {:error, String.t()}
def solve_string_combined(string, values) do
String.split(string, "++")
|> solve_string_combined_list(values, [])
end
end
|
robotica_common/lib/strings.ex
| 0.803868 | 0.471406 |
strings.ex
|
starcoder
|
defmodule AWS.Glacier do
@moduledoc """
Amazon Glacier is a storage solution for "cold data."
Amazon Glacier is an extremely low-cost storage service that provides
secure, durable, and easy-to-use storage for data backup and archival. With
Amazon Glacier, customers can store their data cost effectively for months,
years, or decades. Amazon Glacier also enables customers to offload the
administrative burdens of operating and scaling storage to AWS, so they
don't have to worry about capacity planning, hardware provisioning, data
replication, hardware failure and recovery, or time-consuming hardware
migrations.
Amazon Glacier is a great storage choice when low storage cost is paramount
and your data is rarely retrieved. If your application requires fast or
frequent access to your data, consider using Amazon S3. For more
information, see [Amazon Simple Storage Service (Amazon
S3)](http://aws.amazon.com/s3/).
You can store any kind of data in any format. There is no maximum limit on
the total amount of data you can store in Amazon Glacier.
If you are a first-time user of Amazon Glacier, we recommend that you begin
by reading the following sections in the *Amazon Glacier Developer Guide*:
<ul> <li> [What is Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html)
- This section of the Developer Guide describes the underlying data model,
the operations it supports, and the AWS SDKs that you can use to interact
with the service.
</li> <li> [Getting Started with Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html)
- The Getting Started section walks you through the process of creating a
vault, uploading archives, creating jobs to download archives, retrieving
the job output, and deleting archives.
</li> </ul>
"""
@doc """
This operation aborts a multipart upload identified by the upload ID.
After the Abort Multipart Upload request succeeds, you cannot upload any
more parts to the multipart upload or complete the multipart upload.
Aborting a completed upload fails. However, aborting an already-aborted
upload will succeed, for a short time. For more information about uploading
a part and completing a multipart upload, see `UploadMultipartPart` and
`CompleteMultipartUpload`.
This operation is idempotent.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Working with
Archives in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [Abort Multipart
Upload](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def abort_multipart_upload(client, account_id, upload_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
headers = []
request(client, :delete, url, headers, input, options, 204)
end
@doc """
This operation aborts the vault locking process if the vault lock is not in
the `Locked` state. If the vault lock is in the `Locked` state when this
operation is requested, the operation returns an `AccessDeniedException`
error. Aborting the vault locking process removes the vault lock policy
from the specified vault.
A vault lock is put into the `InProgress` state by calling
`InitiateVaultLock`. A vault lock is put into the `Locked` state by calling
`CompleteVaultLock`. You can get the state of a vault lock by calling
`GetVaultLock`. For more information about the vault locking process, see
[Amazon Glacier Vault
Lock](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
For more information about vault lock policies, see [Amazon Glacier Access
Control with Vault Lock
Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
This operation is idempotent. You can successfully invoke this operation
multiple times, if the vault lock is in the `InProgress` state or if there
is no policy associated with the vault.
"""
def abort_vault_lock(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
request(client, :delete, url, headers, input, options, 204)
end
@doc """
This operation adds the specified tags to a vault. Each tag is composed of
a key and a value. Each vault can have up to 10 tags. If your request would
cause the tag limit for the vault to be exceeded, the operation throws the
`LimitExceededException` error. If a tag already exists on the vault under
a specified key, the existing key value will be overwritten. For more
information about tags, see [Tagging Amazon Glacier
Resources](http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
"""
def add_tags_to_vault(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags?operation=add"
headers = []
request(client, :post, url, headers, input, options, 204)
end
@doc """
You call this operation to inform Amazon Glacier that all the archive parts
have been uploaded and that Amazon Glacier can now assemble the archive
from the uploaded parts. After assembling and saving the archive to the
vault, Amazon Glacier returns the URI path of the newly created archive
resource. Using the URI path, you can then access the archive. After you
upload an archive, you should save the archive ID returned to retrieve the
archive at a later point. You can also get the vault inventory to obtain a
list of archive IDs in a vault. For more information, see `InitiateJob`.
In the request, you must include the computed SHA256 tree hash of the
entire archive you have uploaded. For information about computing a SHA256
tree hash, see [Computing
Checksums](http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
On the server side, Amazon Glacier also constructs the SHA256 tree hash of
the assembled archive. If the values match, Amazon Glacier saves the
archive to the vault; otherwise, it returns an error, and the operation
fails. The `ListParts` operation returns a list of parts uploaded for a
specific multipart upload. It includes checksum information for each
uploaded part that can be used to debug a bad checksum issue.
Additionally, Amazon Glacier also checks for any missing content ranges
when assembling the archive, if missing content ranges are found, Amazon
Glacier returns an error and the operation fails.
Complete Multipart Upload is an idempotent operation. After your first
successful complete multipart upload, if you call the operation again
within a short period, the operation will succeed and return the same
archive ID. This is useful in the event you experience a network issue that
causes an aborted connection or receive a 500 server error, in which case
you can repeat your Complete Multipart Upload request and get the same
archive ID without creating duplicate archives. Note, however, that after
the multipart upload completes, you cannot call the List Parts operation
and the multipart upload will not appear in List Multipart Uploads
response, even if idempotent complete is possible.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large
Archives in Parts (Multipart
Upload)](http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Complete Multipart
Upload](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def complete_multipart_upload(client, account_id, upload_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
headers = []
if Dict.has_key?(input, "archiveSize") do
headers = [{"x-amz-archive-size", input["archiveSize"]}|headers]
input = Dict.delete(input, "archiveSize")
end
if Dict.has_key?(input, "checksum") do
headers = [{"x-amz-sha256-tree-hash", input["checksum"]}|headers]
input = Dict.delete(input, "checksum")
end
case request(client, :post, url, headers, input, options, 201) do
{:ok, body, response} ->
if !is_nil(response.headers["x-amz-archive-id"]) do
body = %{body | "archiveId" => response.headers["x-amz-archive-id"]}
end
if !is_nil(response.headers["x-amz-sha256-tree-hash"]) do
body = %{body | "checksum" => response.headers["x-amz-sha256-tree-hash"]}
end
if !is_nil(response.headers["Location"]) do
body = %{body | "location" => response.headers["Location"]}
end
{:ok, body, response}
result ->
result
end
end
@doc """
This operation completes the vault locking process by transitioning the
vault lock from the `InProgress` state to the `Locked` state, which causes
the vault lock policy to become unchangeable. A vault lock is put into the
`InProgress` state by calling `InitiateVaultLock`. You can obtain the state
of the vault lock by calling `GetVaultLock`. For more information about the
vault locking process, [Amazon Glacier Vault
Lock](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
This operation is idempotent. This request is always successful if the
vault lock is in the `Locked` state and the provided lock ID matches the
lock ID originally used to lock the vault.
If an invalid lock ID is passed in the request when the vault lock is in
the `Locked` state, the operation returns an `AccessDeniedException` error.
If an invalid lock ID is passed in the request when the vault lock is in
the `InProgress` state, the operation throws an `InvalidParameter` error.
"""
def complete_vault_lock(client, account_id, lock_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy/#{URI.encode(lock_id)}"
headers = []
request(client, :post, url, headers, input, options, 204)
end
@doc """
This operation creates a new vault with the specified name. The name of the
vault must be unique within a region for an AWS account. You can create up
to 1,000 vaults per account. If you need to create more vaults, contact
Amazon Glacier.
You must use the following guidelines when naming a vault.
<ul> <li> Names can be between 1 and 255 characters long.
</li> <li> Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-'
(hyphen), and '.' (period).
</li> </ul> This operation is idempotent.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Creating a Vault
in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html)
and [Create Vault
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html)
in the *Amazon Glacier Developer Guide*.
"""
def create_vault(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
case request(client, :put, url, headers, input, options, 201) do
{:ok, body, response} ->
if !is_nil(response.headers["Location"]) do
body = %{body | "location" => response.headers["Location"]}
end
{:ok, body, response}
result ->
result
end
end
@doc """
This operation deletes an archive from a vault. Subsequent requests to
initiate a retrieval of this archive will fail. Archive retrievals that are
in progress for this archive ID may or may not succeed according to the
following scenarios:
<ul> <li> If the archive retrieval job is actively preparing the data for
download when Amazon Glacier receives the delete archive request, the
archival retrieval operation might fail.
</li> <li> If the archive retrieval job has successfully prepared the
archive for download when Amazon Glacier receives the delete archive
request, you will be able to download the output.
</li> </ul> This operation is idempotent. Attempting to delete an
already-deleted archive does not result in an error.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Deleting an
Archive in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html)
and [Delete
Archive](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html)
in the *Amazon Glacier Developer Guide*.
"""
def delete_archive(client, account_id, archive_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/archives/#{URI.encode(archive_id)}"
headers = []
request(client, :delete, url, headers, input, options, 204)
end
@doc """
This operation deletes a vault. Amazon Glacier will delete a vault only if
there are no archives in the vault as of the last inventory and there have
been no writes to the vault since the last inventory. If either of these
conditions is not satisfied, the vault deletion fails (that is, the vault
is not removed) and Amazon Glacier returns an error. You can use
`DescribeVault` to return the number of archives in a vault, and you can
use [Initiate a Job (POST
jobs)](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html)
to initiate a new inventory retrieval for a vault. The inventory contains
the archive IDs you use to delete archives using [Delete Archive (DELETE
archive)](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html).
This operation is idempotent.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Deleting a Vault
in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html)
and [Delete Vault
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html)
in the *Amazon Glacier Developer Guide*.
"""
def delete_vault(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
request(client, :delete, url, headers, input, options, 204)
end
@doc """
This operation deletes the access policy associated with the specified
vault. The operation is eventually consistent; that is, it might take some
time for Amazon Glacier to completely remove the access policy, and you
might still see the effect of the policy for a short time after you send
the delete request.
This operation is idempotent. You can invoke delete multiple times, even if
there is no policy associated with the vault. For more information about
vault access policies, see [Amazon Glacier Access Control with Vault Access
Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def delete_vault_access_policy(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
request(client, :delete, url, headers, input, options, 204)
end
@doc """
This operation deletes the notification configuration set for a vault. The
operation is eventually consistent; that is, it might take some time for
Amazon Glacier to completely disable the notifications and you might still
receive some notifications for a short time after you send the delete
request.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault
Notifications in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Delete Vault Notification Configuration
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html)
in the Amazon Glacier Developer Guide.
"""
def delete_vault_notifications(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
request(client, :delete, url, headers, input, options, 204)
end
@doc """
This operation returns information about a job you previously initiated,
including the job initiation date, the user who initiated the job, the job
status code/message and the Amazon SNS topic to notify after Amazon Glacier
completes the job. For more information about initiating a job, see
`InitiateJob`.
<note> This operation enables you to check the status of your job. However,
it is strongly recommended that you set up an Amazon SNS topic and specify
it in your initiate job request so that Amazon Glacier can notify the topic
after it completes the job.
</note> A job ID will not expire for at least 24 hours after Amazon Glacier
completes the job.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For more information about using this operation, see the documentation for
the underlying REST API [Describe
Job](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def describe_job(client, account_id, job_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs/#{URI.encode(job_id)}"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation returns information about a vault, including the vault's
Amazon Resource Name (ARN), the date the vault was created, the number of
archives it contains, and the total size of all the archives in the vault.
The number of archives and their total size are as of the last inventory
generation. This means that if you add or remove an archive from a vault,
and then immediately use Describe Vault, the change in contents will not be
immediately reflected. If you want to retrieve the latest inventory of the
vault, use `InitiateJob`. Amazon Glacier generates vault inventories
approximately daily. For more information, see [Downloading a Vault
Inventory in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html).
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Retrieving Vault
Metadata in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html)
and [Describe Vault
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def describe_vault(client, account_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation returns the current data retrieval policy for the account
and region specified in the GET request. For more information about data
retrieval policies, see [Amazon Glacier Data Retrieval
Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html).
"""
def get_data_retrieval_policy(client, account_id, options \\ []) do
url = "/#{URI.encode(account_id)}/policies/data-retrieval"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation downloads the output of the job you initiated using
`InitiateJob`. Depending on the job type you specified when you initiated
the job, the output will be either the content of an archive or a vault
inventory.
You can download all the job output or download a portion of the output by
specifying a byte range. In the case of an archive retrieval job, depending
on the byte range you specify, Amazon Glacier returns the checksum for the
portion of the data. You can compute the checksum on the client and verify
that the values match to ensure the portion you downloaded is the correct
data.
A job ID will not expire for at least 24 hours after Amazon Glacier
completes the job. That a byte range. For both archive and inventory
retrieval jobs, you should verify the downloaded size against the size
returned in the headers from the **Get Job Output** response.
For archive retrieval jobs, you should also verify that the size is what
you expected. If you download a portion of the output, the expected size is
based on the range of bytes you specified. For example, if you specify a
range of `bytes=0-1048575`, you should verify your download size is
1,048,576 bytes. If you download an entire archive, the expected size is
the size of the archive when you uploaded it to Amazon Glacier The expected
size is also returned in the headers from the **Get Job Output** response.
In the case of an archive retrieval job, depending on the byte range you
specify, Amazon Glacier returns the checksum for the portion of the data.
To ensure the portion you downloaded is the correct data, compute the
checksum on the client, verify that the values match, and verify that the
size is what you expected.
A job ID does not expire for at least 24 hours after Amazon Glacier
completes the job. That is, you can download the job output within the 24
hours period after Amazon Glacier completes the job.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Downloading a
Vault
Inventory](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html),
[Downloading an
Archive](http://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html),
and [Get Job Output
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html)
"""
def get_job_output(client, account_id, job_id, vault_name, range \\ nil, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs/#{URI.encode(job_id)}/output"
headers = []
if !is_nil(range) do
headers = [{"Range", range}|headers]
end
case request(client, :get, url, headers, nil, options, nil) do
{:ok, body, response} ->
if !is_nil(response.headers["Accept-Ranges"]) do
body = %{body | "acceptRanges" => response.headers["Accept-Ranges"]}
end
if !is_nil(response.headers["x-amz-archive-description"]) do
body = %{body | "archiveDescription" => response.headers["x-amz-archive-description"]}
end
if !is_nil(response.headers["x-amz-sha256-tree-hash"]) do
body = %{body | "checksum" => response.headers["x-amz-sha256-tree-hash"]}
end
if !is_nil(response.headers["Content-Range"]) do
body = %{body | "contentRange" => response.headers["Content-Range"]}
end
if !is_nil(response.headers["Content-Type"]) do
body = %{body | "contentType" => response.headers["Content-Type"]}
end
{:ok, body, response}
result ->
result
end
end
@doc """
This operation retrieves the `access-policy` subresource set on the vault;
for more information on setting this subresource, see [Set Vault Access
Policy (PUT
access-policy)](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html).
If there is no access policy set on the vault, the operation returns a `404
Not found` error. For more information about vault access policies, see
[Amazon Glacier Access Control with Vault Access
Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def get_vault_access_policy(client, account_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation retrieves the following attributes from the `lock-policy`
subresource set on the specified vault:
<ul> <li> The vault lock policy set on the vault.
</li> <li> The state of the vault lock, which is either `InProgess` or
`Locked`.
</li> <li> When the lock ID expires. The lock ID is used to complete the
vault locking process.
</li> <li> When the vault lock was initiated and put into the `InProgress`
state.
</li> </ul> A vault lock is put into the `InProgress` state by calling
`InitiateVaultLock`. A vault lock is put into the `Locked` state by calling
`CompleteVaultLock`. You can abort the vault locking process by calling
`AbortVaultLock`. For more information about the vault locking process,
[Amazon Glacier Vault
Lock](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
If there is no vault lock policy set on the vault, the operation returns a
`404 Not found` error. For more information about vault lock policies,
[Amazon Glacier Access Control with Vault Lock
Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
"""
def get_vault_lock(client, account_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation retrieves the `notification-configuration` subresource of
the specified vault.
For information about setting a notification configuration on a vault, see
`SetVaultNotifications`. If a notification configuration for a vault is not
set, the operation returns a `404 Not Found` error. For more information
about vault notifications, see [Configuring Vault Notifications in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html).
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault
Notifications in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Get Vault Notification Configuration
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def get_vault_notifications(client, account_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation initiates a job of the specified type, which can be a
select, an archival retrieval, or a vault retrieval. For more information
about using this operation, see the documentation for the underlying REST
API [Initiate a
Job](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html).
"""
def initiate_job(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs"
headers = []
case request(client, :post, url, headers, input, options, 202) do
{:ok, body, response} ->
if !is_nil(response.headers["x-amz-job-id"]) do
body = %{body | "jobId" => response.headers["x-amz-job-id"]}
end
if !is_nil(response.headers["x-amz-job-output-path"]) do
body = %{body | "jobOutputPath" => response.headers["x-amz-job-output-path"]}
end
if !is_nil(response.headers["Location"]) do
body = %{body | "location" => response.headers["Location"]}
end
{:ok, body, response}
result ->
result
end
end
@doc """
This operation initiates a multipart upload. Amazon Glacier creates a
multipart upload resource and returns its ID in the response. The multipart
upload ID is used in subsequent requests to upload parts of an archive (see
`UploadMultipartPart`).
When you initiate a multipart upload, you specify the part size in number
of bytes. The part size must be a megabyte (1024 KB) multiplied by a power
of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608
(8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum
is 4 GB.
Every part you upload to this resource (see `UploadMultipartPart`), except
the last one, must have the same size. The last one can be the same size or
smaller. For example, suppose you want to upload a 16.2 MB file. If you
initiate the multipart upload with a part size of 4 MB, you will upload
four parts of 4 MB each and one part of 0.2 MB.
<note> You don't need to know the size of the archive when you start a
multipart upload because Amazon Glacier does not require you to specify the
overall archive size.
</note> After you complete the multipart upload, Amazon Glacier removes the
multipart upload resource referenced by the ID. Amazon Glacier also removes
the multipart upload resource if you cancel the multipart upload or it may
be removed if there is no activity for a period of 24 hours.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large
Archives in Parts (Multipart
Upload)](http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Initiate Multipart
Upload](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def initiate_multipart_upload(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads"
headers = []
if Dict.has_key?(input, "archiveDescription") do
headers = [{"x-amz-archive-description", input["archiveDescription"]}|headers]
input = Dict.delete(input, "archiveDescription")
end
if Dict.has_key?(input, "partSize") do
headers = [{"x-amz-part-size", input["partSize"]}|headers]
input = Dict.delete(input, "partSize")
end
case request(client, :post, url, headers, input, options, 201) do
{:ok, body, response} ->
if !is_nil(response.headers["Location"]) do
body = %{body | "location" => response.headers["Location"]}
end
if !is_nil(response.headers["x-amz-multipart-upload-id"]) do
body = %{body | "uploadId" => response.headers["x-amz-multipart-upload-id"]}
end
{:ok, body, response}
result ->
result
end
end
@doc """
This operation initiates the vault locking process by doing the following:
<ul> <li> Installing a vault lock policy on the specified vault.
</li> <li> Setting the lock state of vault lock to `InProgress`.
</li> <li> Returning a lock ID, which is used to complete the vault locking
process.
</li> </ul> You can set one vault lock policy for each vault and this
policy can be up to 20 KB in size. For more information about vault lock
policies, see [Amazon Glacier Access Control with Vault Lock
Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
You must complete the vault locking process within 24 hours after the vault
lock enters the `InProgress` state. After the 24 hour window ends, the lock
ID expires, the vault automatically exits the `InProgress` state, and the
vault lock policy is removed from the vault. You call `CompleteVaultLock`
to complete the vault locking process by setting the state of the vault
lock to `Locked`.
After a vault lock is in the `Locked` state, you cannot initiate a new
vault lock for the vault.
You can abort the vault locking process by calling `AbortVaultLock`. You
can get the state of the vault lock by calling `GetVaultLock`. For more
information about the vault locking process, [Amazon Glacier Vault
Lock](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
If this operation is called when the vault lock is in the `InProgress`
state, the operation returns an `AccessDeniedException` error. When the
vault lock is in the `InProgress` state you must call `AbortVaultLock`
before you can initiate a new vault lock policy.
"""
def initiate_vault_lock(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
case request(client, :post, url, headers, input, options, 201) do
{:ok, body, response} ->
if !is_nil(response.headers["x-amz-lock-id"]) do
body = %{body | "lockId" => response.headers["x-amz-lock-id"]}
end
{:ok, body, response}
result ->
result
end
end
@doc """
This operation lists jobs for a vault, including jobs that are in-progress
and jobs that have recently finished. The List Job operation returns a list
of these jobs sorted by job initiation time.
<note> Amazon Glacier retains recently completed jobs for a period before
deleting them; however, it eventually removes completed jobs. The output of
completed jobs can be retrieved. Retaining completed jobs for a period of
time after they have completed enables you to get a job output in the event
you miss the job completion notification or your first attempt to download
it fails. For example, suppose you start an archive retrieval job to
download an archive. After the job completes, you start to download the
archive but encounter a network error. In this scenario, you can retry and
download the archive while the job exists.
</note> The List Jobs operation supports pagination. You should always
check the response `Marker` field. If there are no more jobs to list, the
`Marker` field is set to `null`. If there are more jobs to list, the
`Marker` field is set to a non-null value, which you can use to continue
the pagination of the list. To return a list of jobs that begins at a
specific job, set the marker request parameter to the `Marker` value for
that job that you obtained from a previous List Jobs request.
You can set a maximum limit for the number of jobs returned in the response
by specifying the `limit` parameter in the request. The default limit is
50. The number of jobs returned might be fewer than the limit, but the
number of returned jobs never exceeds the limit.
Additionally, you can filter the jobs list returned by specifying the
optional `statuscode` parameter or `completed` parameter, or both. Using
the `statuscode` parameter, you can specify to return only jobs that match
either the `InProgress`, `Succeeded`, or `Failed` status. Using the
`completed` parameter, you can specify to return only jobs that were
completed (`true`) or jobs that were not completed (`false`).
For more information about using this operation, see the documentation for
the underlying REST API [List
Jobs](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html).
"""
def list_jobs(client, account_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation lists in-progress multipart uploads for the specified vault.
An in-progress multipart upload is a multipart upload that has been
initiated by an `InitiateMultipartUpload` request, but has not yet been
completed or aborted. The list returned in the List Multipart Upload
response has no guaranteed order.
The List Multipart Uploads operation supports pagination. By default, this
operation returns up to 50 multipart uploads in the response. You should
always check the response for a `marker` at which to continue the list; if
there are no more items the `marker` is `null`. To return a list of
multipart uploads that begins at a specific upload, set the `marker`
request parameter to the value you obtained from a previous List Multipart
Upload request. You can also limit the number of uploads returned in the
response by specifying the `limit` parameter in the request.
Note the difference between this operation and listing parts (`ListParts`).
The List Multipart Uploads operation lists all multipart uploads for a
vault and does not require a multipart upload ID. The List Parts operation
requires a multipart upload ID since parts are associated with a single
upload.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Working with
Archives in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [List Multipart Uploads
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html)
in the *Amazon Glacier Developer Guide*.
"""
def list_multipart_uploads(client, account_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation lists the parts of an archive that have been uploaded in a
specific multipart upload. You can make this request at any time during an
in-progress multipart upload before you complete the upload (see
`CompleteMultipartUpload`. List Parts returns an error for completed
uploads. The list returned in the List Parts response is sorted by part
range.
The List Parts operation supports pagination. By default, this operation
returns up to 50 uploaded parts in the response. You should always check
the response for a `marker` at which to continue the list; if there are no
more items the `marker` is `null`. To return a list of parts that begins at
a specific part, set the `marker` request parameter to the value you
obtained from a previous List Parts request. You can also limit the number
of parts returned in the response by specifying the `limit` parameter in
the request.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Working with
Archives in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [List
Parts](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html)
in the *Amazon Glacier Developer Guide*.
"""
def list_parts(client, account_id, upload_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation lists the provisioned capacity units for the specified AWS
account.
"""
def list_provisioned_capacity(client, account_id, options \\ []) do
url = "/#{URI.encode(account_id)}/provisioned-capacity"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation lists all the tags attached to a vault. The operation
returns an empty map if there are no tags. For more information about tags,
see [Tagging Amazon Glacier
Resources](http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
"""
def list_tags_for_vault(client, account_id, vault_name, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation lists all vaults owned by the calling user's account. The
list returned in the response is ASCII-sorted by vault name.
By default, this operation returns up to 10 items. If there are more vaults
to list, the response `marker` field contains the vault Amazon Resource
Name (ARN) at which to continue the list with a new List Vaults request;
otherwise, the `marker` field is `null`. To return a list of vaults that
begins at a specific vault, set the `marker` request parameter to the vault
ARN you obtained from a previous List Vaults request. You can also limit
the number of vaults returned in the response by specifying the `limit`
parameter in the request.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Retrieving Vault
Metadata in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html)
and [List Vaults
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def list_vaults(client, account_id, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults"
headers = []
request(client, :get, url, headers, nil, options, nil)
end
@doc """
This operation purchases a provisioned capacity unit for an AWS account.
"""
def purchase_provisioned_capacity(client, account_id, input, options \\ []) do
url = "/#{URI.encode(account_id)}/provisioned-capacity"
headers = []
case request(client, :post, url, headers, input, options, 201) do
{:ok, body, response} ->
if !is_nil(response.headers["x-amz-capacity-id"]) do
body = %{body | "capacityId" => response.headers["x-amz-capacity-id"]}
end
{:ok, body, response}
result ->
result
end
end
@doc """
This operation removes one or more tags from the set of tags attached to a
vault. For more information about tags, see [Tagging Amazon Glacier
Resources](http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
This operation is idempotent. The operation will be successful, even if
there are no tags attached to the vault.
"""
def remove_tags_from_vault(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags?operation=remove"
headers = []
request(client, :post, url, headers, input, options, 204)
end
@doc """
This operation sets and then enacts a data retrieval policy in the region
specified in the PUT request. You can set one policy per region for an AWS
account. The policy is enacted within a few minutes of a successful PUT
operation.
The set policy operation does not affect retrieval jobs that were in
progress before the policy was enacted. For more information about data
retrieval policies, see [Amazon Glacier Data Retrieval
Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html).
"""
def set_data_retrieval_policy(client, account_id, input, options \\ []) do
url = "/#{URI.encode(account_id)}/policies/data-retrieval"
headers = []
request(client, :put, url, headers, input, options, 204)
end
@doc """
This operation configures an access policy for a vault and will overwrite
an existing policy. To configure a vault access policy, send a PUT request
to the `access-policy` subresource of the vault. An access policy is
specific to a vault and is also called a vault subresource. You can set one
access policy per vault and the policy can be up to 20 KB in size. For more
information about vault access policies, see [Amazon Glacier Access Control
with Vault Access
Policies](http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def set_vault_access_policy(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
request(client, :put, url, headers, input, options, 204)
end
@doc """
This operation configures notifications that will be sent when specific
events happen to a vault. By default, you don't get any notifications.
To configure vault notifications, send a PUT request to the
`notification-configuration` subresource of the vault. The request should
include a JSON document that provides an Amazon SNS topic and specific
events for which you want Amazon Glacier to send notifications to the
topic.
Amazon SNS topics must grant permission to the vault to be allowed to
publish notifications to the topic. You can configure a vault to publish a
notification for the following vault events:
<ul> <li> **ArchiveRetrievalCompleted** This event occurs when a job that
was initiated for an archive retrieval is completed (`InitiateJob`). The
status of the completed job can be "Succeeded" or "Failed". The
notification sent to the SNS topic is the same output as returned from
`DescribeJob`.
</li> <li> **InventoryRetrievalCompleted** This event occurs when a job
that was initiated for an inventory retrieval is completed (`InitiateJob`).
The status of the completed job can be "Succeeded" or "Failed". The
notification sent to the SNS topic is the same output as returned from
`DescribeJob`.
</li> </ul> An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM) users don't
have any permissions by default. You must grant them explicit permission to
perform specific actions. For more information, see [Access Control Using
AWS Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault
Notifications in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Set Vault Notification Configuration
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html)
in the *Amazon Glacier Developer Guide*.
"""
def set_vault_notifications(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
request(client, :put, url, headers, input, options, 204)
end
@doc """
This operation adds an archive to a vault. This is a synchronous operation,
and for a successful upload, your data is durably persisted. Amazon Glacier
returns the archive ID in the `x-amz-archive-id` header of the response.
You must use the archive ID to access your data in Amazon Glacier. After
you upload an archive, you should save the archive ID returned so that you
can retrieve or delete the archive later. Besides saving the archive ID,
you can also index it and give it a friendly name to allow for better
searching. You can also use the optional archive description field to
specify how the archive is referred to in an external index of archives,
such as you might create in Amazon DynamoDB. You can also get the vault
inventory to obtain a list of archive IDs in a vault. For more information,
see `InitiateJob`.
You must provide a SHA256 tree hash of the data you are uploading. For
information about computing a SHA256 tree hash, see [Computing
Checksums](http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
You can optionally specify an archive description of up to 1,024 printable
ASCII characters. You can get the archive description when you either
retrieve the archive or get the vault inventory. For more information, see
`InitiateJob`. Amazon Glacier does not interpret the description in any
way. An archive description does not need to be unique. You cannot use the
description to retrieve or sort the archive list.
Archives are immutable. After you upload an archive, you cannot edit the
archive or its description.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading an
Archive in Amazon
Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html)
and [Upload
Archive](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html)
in the *Amazon Glacier Developer Guide*.
"""
def upload_archive(client, account_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/archives"
headers = []
if Dict.has_key?(input, "archiveDescription") do
headers = [{"x-amz-archive-description", input["archiveDescription"]}|headers]
input = Dict.delete(input, "archiveDescription")
end
if Dict.has_key?(input, "checksum") do
headers = [{"x-amz-sha256-tree-hash", input["checksum"]}|headers]
input = Dict.delete(input, "checksum")
end
case request(client, :post, url, headers, input, options, 201) do
{:ok, body, response} ->
if !is_nil(response.headers["x-amz-archive-id"]) do
body = %{body | "archiveId" => response.headers["x-amz-archive-id"]}
end
if !is_nil(response.headers["x-amz-sha256-tree-hash"]) do
body = %{body | "checksum" => response.headers["x-amz-sha256-tree-hash"]}
end
if !is_nil(response.headers["Location"]) do
body = %{body | "location" => response.headers["Location"]}
end
{:ok, body, response}
result ->
result
end
end
@doc """
This operation uploads a part of an archive. You can upload archive parts
in any order. You can also upload them in parallel. You can upload up to
10,000 parts for a multipart upload.
Amazon Glacier rejects your upload part request if any of the following
conditions is true:
<ul> <li> **SHA256 tree hash does not match**To ensure that part data is
not corrupted in transmission, you compute a SHA256 tree hash of the part
and include it in your request. Upon receiving the part data, Amazon
Glacier also computes a SHA256 tree hash. If these hash values don't match,
the operation fails. For information about computing a SHA256 tree hash,
see [Computing
Checksums](http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
</li> <li> **Part size does not match**The size of each part except the
last must match the size specified in the corresponding
`InitiateMultipartUpload` request. The size of the last part must be the
same size as, or smaller than, the specified size.
<note> If you upload a part whose size is smaller than the part size you
specified in your initiate multipart upload request and that part is not
the last part, then the upload part request will succeed. However, the
subsequent Complete Multipart Upload request will fail.
</note> </li> <li> **Range does not align**The byte range value in the
request does not align with the part size specified in the corresponding
initiate request. For example, if you specify a part size of 4194304 bytes
(4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8
MB - 1) are valid part ranges. However, if you set a range value of 2 MB to
6 MB, the range does not align with the part size and the upload will fail.
</li> </ul> This operation is idempotent. If you upload the same part
multiple times, the data included in the most recent request overwrites the
previously uploaded data.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large
Archives in Parts (Multipart
Upload)](http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Upload Part
](http://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html)
in the *Amazon Glacier Developer Guide*.
"""
def upload_multipart_part(client, account_id, upload_id, vault_name, input, options \\ []) do
url = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
headers = []
if Dict.has_key?(input, "checksum") do
headers = [{"x-amz-sha256-tree-hash", input["checksum"]}|headers]
input = Dict.delete(input, "checksum")
end
if Dict.has_key?(input, "range") do
headers = [{"Content-Range", input["range"]}|headers]
input = Dict.delete(input, "range")
end
case request(client, :put, url, headers, input, options, 204) do
{:ok, body, response} ->
if !is_nil(response.headers["x-amz-sha256-tree-hash"]) do
body = %{body | "checksum" => response.headers["x-amz-sha256-tree-hash"]}
end
{:ok, body, response}
result ->
result
end
end
defp request(client, method, url, headers, input, options, success_status_code) do
client = %{client | service: "glacier"}
host = get_host("glacier", client)
url = get_url(host, url, client)
headers = Enum.concat([{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"}],
headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, response=%HTTPoison.Response{status_code: 202, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, response=%HTTPoison.Response{status_code: 204, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
reason = Poison.Parser.parse!(body)["message"]
{:error, reason}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: ^success_status_code, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: ^success_status_code, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
reason = Poison.Parser.parse!(body)["message"]
{:error, reason}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, url, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{url}/"
end
defp encode_payload(input) do
if input != nil do
Poison.Encoder.encode(input, [])
else
""
end
end
end
|
lib/aws/glacier.ex
| 0.853638 | 0.594757 |
glacier.ex
|
starcoder
|
defmodule Mix.Task.Compiler do
@moduledoc """
This module defines the behaviour for a Mix task that does compilation.
A Mix compiler task can be defined by simply using `Mix.Task.Compiler`
in a module whose name starts with `Mix.Tasks.Compile.` and defining
the [`run/1`](`c:run/1`) function:
defmodule Mix.Tasks.Compile.MyLanguage do
use Mix.Task.Compiler
def run(_args) do
:ok
end
end
The [`run/1`](`c:run/1`) function returns an atom indicating the status of the
compilation, and optionally can also return a list of "diagnostics"
such as warnings or compilation errors. Doing this enables code
editors to display issues inline without having to analyze the
command-line output.
If the compiler uses manifest files to track stale sources, it should
define `manifests/0`, and if it writes any output to disk it should
also define `clean/0`.
A compiler supports the same attributes for configuration and
documentation as a regular Mix task. See `Mix.Task` for more information.
"""
defmodule Diagnostic do
@moduledoc """
Diagnostic information such as a warning or compilation error.
"""
@type t :: %__MODULE__{
file: Path.t(),
severity: severity,
message: String.t(),
position: position,
compiler_name: String.t(),
details: any
}
@typedoc """
Severity of a diagnostic:
* `:error` - An issue that caused compilation to fail
* `:warning` - An issue that did not cause failure but suggests the
programmer may have made a mistake
* `:hint` - A suggestion for style or good practices that is not as
severe as a warning
* `:information` - Any other information relevant to compilation that
does not fit into the above categories
"""
@type severity :: :error | :warning | :information | :hint
@typedoc """
Where in a file the diagnostic applies. Can be either a line number,
a `{line, column}` tuple, a range specified as `{start_line, start_col,
end_line, end_col}`. `0` line represents unknown.
Line numbers are one-based, and column numbers in a range are zero-based and refer
to the cursor position at the start of the character at that index. For example,
to indicate that a diagnostic applies to the first `n` characters of the
first line, the range would be `{1, 0, 1, n}`.
"""
@type position ::
non_neg_integer
| {pos_integer, non_neg_integer}
| {pos_integer, non_neg_integer, pos_integer, non_neg_integer}
@enforce_keys [:file, :severity, :message, :position, :compiler_name]
defstruct [:file, :severity, :message, :position, :compiler_name, :details]
end
@type status :: :ok | :noop | :error
@doc """
Receives command-line arguments and performs compilation. If it
produces errors, warnings, or any other diagnostic information,
it should return a tuple with the status and a list of diagnostics.
"""
@callback run([binary]) :: status | {status, [Diagnostic.t()]}
@doc """
Lists manifest files for the compiler.
"""
@callback manifests() :: [Path.t()]
@doc """
Removes build artifacts and manifests.
"""
@callback clean() :: any
@optional_callbacks clean: 0, manifests: 0
@doc """
Adds a callback that runs after a given compiler.
The callback is invoked after the compiler runs and
it receives a tuple with current status and the list
of diagnostic. It must return the updated status and
diagnostics.
"""
@doc since: "1.10.0"
@spec after_compiler(atom, ({status, [Diagnostic.t()]} -> {status, [Diagnostic.t()]})) :: :ok
def after_compiler(name, fun) when is_atom(name) and is_function(fun, 1) do
Mix.ProjectStack.prepend_after_compiler(name, fun)
end
@doc false
defmacro __using__(_opts) do
quote do
Enum.each(
Mix.Task.supported_attributes(),
&Module.register_attribute(__MODULE__, &1, persist: true)
)
@behaviour Mix.Task.Compiler
end
end
# Normalize the compiler result to a diagnostic tuple.
@doc false
def normalize(result, name) do
case result do
{status, diagnostics} when status in [:ok, :noop, :error] and is_list(diagnostics) ->
{status, diagnostics}
# ok/noop can come from tasks that have already run
_ when result in [:ok, :noop] ->
{result, []}
_ ->
# TODO: Convert this to an error on v2.0
Mix.shell().error(
"warning: Mix compiler #{inspect(name)} was supposed to return " <>
"{:ok | :noop | :error, [diagnostic]} but it returned #{inspect(result)}"
)
{:noop, []}
end
end
end
|
lib/mix/lib/mix/task.compiler.ex
| 0.831092 | 0.638673 |
task.compiler.ex
|
starcoder
|
defmodule Mix.Tasks.SendSlackUpdateNotification do
use Mix.Task
require Logger
import Ecto.Query, warn: false
alias ChatApi.{Slack, SlackAuthorizations, SlackConversationThreads}
alias ChatApi.SlackAuthorizations.SlackAuthorization
alias ChatApi.SlackConversationThreads.SlackConversationThread
@shortdoc "Sends notifications to Slack for channels missing the Hakerspeak app"
@moduledoc """
This task handles sending notifications to Slack for channels missing the Hakerspeak app,
which is required for some additional functionality (such as resolving/reopening conversations
directly from the Slack channel).
Example:
```
$ mix send_slack_update_notification
$ mix send_slack_update_notification debug
```
On Heroku:
```
$ heroku run "POOL_SIZE=2 mix send_slack_update_notification"
$ heroku run "POOL_SIZE=2 mix send_slack_update_notification debug"
```
"""
@spec run([binary()]) :: list()
def run(args) do
Application.ensure_all_started(:chat_api)
is_debug_mode =
case args do
["debug"] -> true
["DEBUG"] -> true
_ -> false
end
SlackAuthorizations.list_slack_authorizations(%{type: "reply"})
|> Enum.filter(&should_notify_channel?/1)
|> Enum.map(fn auth -> notify_Hakerspeak_app_required(auth, debug: is_debug_mode) end)
end
@spec should_notify_channel?(SlackAuthorization.t()) :: boolean()
def should_notify_channel?(%SlackAuthorization{
account_id: account_id,
channel_id: channel_id,
access_token: access_token
}) do
with %SlackConversationThread{slack_channel: channel, slack_thread_ts: ts} <-
SlackConversationThreads.get_latest_slack_conversation_thread(%{
"account_id" => account_id,
"slack_channel" => channel_id
}),
{:ok, %{body: %{"error" => "not_in_channel", "ok" => false}}} <-
Slack.Client.retrieve_message(channel, ts, access_token) do
true
else
_ -> false
end
end
def should_notify_channel?(_), do: false
@spec notify_Hakerspeak_app_required(SlackAuthorization.t(), keyword()) :: any()
def notify_Hakerspeak_app_required(authorization, opts \\ [])
def notify_Hakerspeak_app_required(%SlackAuthorization{} = authorization, debug: true) do
message = """
Would have send message:
#{inspect(slack_notification_message())}
To Slack channel:
#{inspect(Map.take(authorization, [:channel, :team_name, :webhook_url]))}
"""
Logger.info(message)
end
def notify_Hakerspeak_app_required(
%SlackAuthorization{
webhook_url: webhook_url
},
_opts
) do
message = slack_notification_message()
Logger.info(message)
Slack.Notification.log(message, webhook_url)
end
def notify_Hakerspeak_app_required(_authorization, _opts), do: nil
@spec slack_notification_message() :: String.t()
def slack_notification_message() do
"""
Hi there! :wave: This is an automated message from the Hakerspeak team.
We recently added some enhancements to our Slack integration that allow you to resolve, reopen, and view your conversations' status directly from Slack. :rocket: In order to do this, you'll need to add the *Hakerspeak app* to this channel.
You can do this by typing `/app` in the message box in this channel, clicking on "*Add apps to this channel*", and selecting the *Hakerspeak* app.
(If that doesn't work, try following these instructions: https://slack.com/help/articles/202035138-Add-apps-to-your-Slack-workspace)
If you're still having trouble, feel free to email us at <EMAIL>. We're happy to help!
"""
end
end
|
lib/mix/tasks/send_slack_update_notification.ex
| 0.772702 | 0.501648 |
send_slack_update_notification.ex
|
starcoder
|
defmodule RRPproxy.Deserializer do
@moduledoc """
Documentation for `RRPproxy.Deserializer` which provides deserialization helpers.
**It is used for low-level communication and should not be used directly by users of this library.**
"""
@multi_line_fields [
" procedure",
" policy",
"allowed characters notes",
"restrictions",
"tag"
]
defp to_value(value) do
String.to_integer(value)
rescue
ArgumentError ->
try do
String.to_float(value)
rescue
ArgumentError -> value
end
end
defp to_bool("true"), do: true
defp to_bool("false"), do: false
defp to_bool(1), do: true
defp to_bool(0), do: false
defp to_bool(value), do: value
defp is_multi_line_field(field), do: Enum.any?(@multi_line_fields, &String.contains?(field, &1))
defp case_parts({data, info, extra}, ["column", "0"], value, _is_multi_line, _is_single_result) do
{data, Map.put(info, :column, value), extra}
end
defp case_parts({data, info, extra}, ["first", "0"], value, _is_multi_line, _is_single_result) do
{data, Map.put(info, :offset, String.to_integer(value)), extra}
end
defp case_parts({data, info, extra}, ["last", "0"], value, _is_multi_line, _is_single_result) do
{data, Map.put(info, :last, String.to_integer(value)), extra}
end
defp case_parts({data, info, extra}, ["limit", "0"], value, _is_multi_line, _is_single_result) do
{data, Map.put(info, :limit, String.to_integer(value)), extra}
end
defp case_parts({data, info, extra}, ["total", "0"], value, _is_multi_line, _is_single_result) do
{data, Map.put(info, :total, String.to_integer(value)), extra}
end
defp case_parts({data, info, extra}, ["count", "0"], value, _is_multi_line, _is_single_result) do
{data, Map.put(info, :count, String.to_integer(value)), extra}
end
defp case_parts({data, info, extra}, [field, index], value, is_multi_line, is_single_result) do
f =
field
|> String.replace(" ", "_")
|> String.downcase()
|> String.to_atom()
v =
value
|> to_value()
|> to_bool()
cond do
is_multi_line and is_multi_line_field(field) ->
new_value = Map.get(extra, f, "") <> "#{v}"
{data, info, Map.put(extra, f, new_value)}
is_single_result ->
index_data = Map.get(data, "0", %{})
index_data =
if Map.has_key?(index_data, f) do
new_value =
case Map.get(index_data, f) do
arr when is_list(arr) -> arr ++ [v]
old_value -> [old_value, v]
end
Map.put(index_data, f, new_value)
else
Map.put(index_data, f, v)
end
{Map.put(data, "0", index_data), info, extra}
true ->
index_data = Map.get(data, "#{index}", %{})
index_data =
if Map.has_key?(index_data, f) do
new_value = Map.get(index_data, f) <> "#{v}"
Map.put(index_data, f, new_value)
else
Map.put(index_data, f, v)
end
{Map.put(data, "#{index}", index_data), info, extra}
end
end
defp case_parts(main, _, _, _, _), do: main
@doc "Transforms a Tesla results into a map structure."
@spec to_map({:ok, Tesla.Env.t()} | {:error, any()}, boolean(), boolean()) ::
{:ok, map()} | {:error, any()}
def to_map(
{:ok, %Tesla.Env{status: status, body: body}},
is_multi_line_response,
is_single_result
) do
String.split(body, "\n")
|> Enum.filter(&String.contains?(&1, "="))
|> Enum.map(&Regex.split(~r/\s*=\s*/, &1, parts: 2))
|> List.pop_at(0)
|> work_parts(status, is_multi_line_response, is_single_result)
end
def to_map({:ok, %Tesla.Env{status: status}}, _, _),
do: {:error, %{code: status}}
def to_map({:error, _} = error, _, _), do: error
defp work_parts({nil, []}, _, _, _), do: {:error, :bad_response}
defp work_parts({[_, rcode], parts}, status, is_multi_line_response, is_single_result) do
{[_, rdesc], parts} = List.pop_at(parts, 0)
{data, info, extra} =
Enum.reduce(parts, {%{}, %{}, %{}}, fn [k, v], red ->
parts =
Regex.split(~r/(\]\[|\]|\[)/, k)
|> Enum.filter(fn x -> x != "" end)
|> List.delete_at(0)
case_parts(red, parts, v, is_multi_line_response, is_single_result)
end)
data = Enum.map(data, fn {_, v} -> v end)
data =
if Enum.count(data) > 1 do
data
else
case Enum.at(data, 0) do
nil -> []
only_data -> [Map.merge(only_data, extra)]
end
end
rcode = String.to_integer(rcode)
code =
if status >= 300 or rcode >= 300,
do: :error,
else: :ok
{code, %{code: rcode, description: rdesc, data: data, info: info}}
end
end
|
lib/rrpproxy/deserializer.ex
| 0.59408 | 0.424233 |
deserializer.ex
|
starcoder
|
defmodule UAInspector.ClientHints do
@moduledoc """
Parse and store client hint headers for usage in device detection.
"""
@type t :: %__MODULE__{
architecture: String.t() | :unknown,
bitness: String.t() | :unknown,
full_version: String.t() | :unknown,
full_version_list: [{String.t(), String.t()}],
mobile: boolean,
model: String.t() | :unknown,
platform: String.t() | :unknown,
platform_version: String.t() | :unknown
}
defstruct architecture: :unknown,
bitness: :unknown,
full_version: :unknown,
full_version_list: [],
mobile: false,
model: :unknown,
platform: :unknown,
platform_version: :unknown
@regex_version ~r/^"([^"]+)"; ?v="([^"]+)"(?:, )?/
@doc """
Parse headers into a new client hint struct.
All headers are expected in dash-case (lowercase with dashes) format.
Last header (if multiple possible) will be used for the result.
"""
@spec new([{String.t(), String.t()}]) :: t()
def new([]), do: %__MODULE__{}
def new(headers) do
Enum.reduce(headers, %__MODULE__{}, fn
{"sec-ch-ua", _}, %{full_version_list: [_ | _]} = hints ->
# ignore header if "sec-ch-ua-full-version-list" is already parsed
hints
{"sec-ch-ua", version_list}, hints ->
%{hints | full_version_list: parse_version_list(version_list)}
{"sec-ch-ua-arch", architecture}, hints ->
%{hints | architecture: String.trim(architecture, ~s("))}
{"sec-ch-ua-bitness", bitness}, hints ->
%{hints | bitness: String.trim(bitness, ~s("))}
{"sec-ch-ua-full-version", full_version}, hints ->
%{hints | full_version: String.trim(full_version, ~s("))}
{"sec-ch-ua-full-version-list", version_list}, hints ->
%{hints | full_version_list: parse_version_list(version_list)}
{"sec-ch-ua-mobile", mobile}, hints ->
%{hints | mobile: mobile == "?1"}
{"sec-ch-ua-model", model}, hints ->
%{hints | model: String.trim(model, ~s("))}
{"sec-ch-ua-platform", platform}, hints ->
%{hints | platform: String.trim(platform, ~s("))}
{"sec-ch-ua-platform-version", platform_version}, hints ->
%{hints | platform_version: String.trim(platform_version, ~s("))}
_, hints ->
hints
end)
end
defp parse_version_list(version_list) do
version_list
|> String.split(",")
|> Enum.map(&String.trim/1)
|> Enum.map(&Regex.run(@regex_version, &1, capture: :all_but_first))
|> Enum.reject(&is_nil/1)
|> Enum.map(fn [brand, version] -> {brand, version} end)
end
end
|
lib/ua_inspector/client_hints.ex
| 0.812421 | 0.416856 |
client_hints.ex
|
starcoder
|
defmodule Still.Preprocessor.Renderer do
@moduledoc """
Defines the basic attributes of a markup renderer.
A renderer needs to implement a `compile/2` function and an optional `ast/0`
function. When a markup file is being compiled, a module is created on
demand. This module imports all template helpers defined by Still as well as any
template helper configured by the user:
config :still,
template_helpers: [Your.Module]
The created module implements a `render/0` which will return the result of
the `compile/2` call.
The `ast/0` can be used to tap into the AST of the new module and import or
require any necessary module.
Markup renderers should `use` `Still.Preprocessor.Renderer` and provide two
options:
* `:extensions` - the list of extensions compiled by the renderer;
* `:preprocessor` - the preprocessor used to render any necessary snippets
(e.g via `Still.Compiler.TemplateHelpers.ContentTag`).
"""
@type ast :: {atom(), keyword(), list()}
@callback compile(String.t(), [{atom(), any()}]) :: ast()
@callback ast() :: ast()
@optional_callbacks [ast: 0]
import Still.Utils, only: [config: 2]
alias Still.SourceFile
defmacro __using__(opts) do
quote do
@behaviour unquote(__MODULE__)
@preprocessor Keyword.fetch!(unquote(opts), :preprocessor)
@extensions Keyword.fetch!(unquote(opts), :extensions)
def create(%SourceFile{input_file: input_file, content: content, metadata: metadata}) do
metadata[:input_file]
|> file_path_to_module_name()
|> create_template_renderer(content, metadata)
end
defp file_path_to_module_name(file) do
name =
Path.split(file)
|> Enum.reject(&Enum.member?(["/" | @extensions], &1))
|> Enum.map(&String.replace(&1, @extensions, ""))
|> Enum.map(&String.replace(&1, "_", ""))
|> Enum.map(&String.capitalize/1)
Module.concat(["R#{Enum.random(0..100_000)}" | [@preprocessor | name]])
end
defp create_template_renderer(name, content, metadata) do
compiled = compile(content, metadata)
module_metadata =
metadata
|> ensure_preprocessor()
|> Map.to_list()
renderer_ast =
if Kernel.function_exported?(__MODULE__, :ast, 0) do
ast()
else
[]
end
ast =
quote do
@compile :nowarn_unused_vars
unquote(user_template_helpers_asts())
unquote(renderer_ast)
use Still.Compiler.TemplateHelpers, unquote(Macro.escape(module_metadata))
Enum.map(unquote(Macro.escape(metadata)), fn {k, v} ->
Module.put_attribute(__MODULE__, k, v)
end)
def render() do
var!(unquote(Macro.var(:assigns, __MODULE__))) = unquote(Macro.escape(metadata))
_ = var!(unquote(Macro.var(:assigns, __MODULE__)))
unquote(compiled)
end
end
with {:module, mod, _, _} <- Module.create(name, ast, Macro.Env.location(__ENV__)) do
mod
end
end
defp user_template_helpers_asts do
config(:template_helpers, [])
|> Enum.map(fn module ->
quote do
import unquote(module)
end
end)
end
defp ensure_preprocessor(metadata) do
Map.put_new(metadata, :preprocessor, @preprocessor)
end
end
end
end
|
lib/still/preprocessor/renderer.ex
| 0.872822 | 0.542076 |
renderer.ex
|
starcoder
|
defmodule Flop do
@moduledoc """
Flop is a helper library for filtering, ordering and pagination with Ecto.
## Usage
Derive `Flop.Schema` in your Ecto schemas.
defmodule Pet do
use Ecto.Schema
@derive {Flop.Schema,
filterable: [:name, :species], sortable: [:name, :age]}
schema "pets" do
field :name, :string
field :age, :integer
field :species, :string
field :social_security_number, :string
end
end
Validate a parameter map to get a `t:Flop.t/0` struct with `Flop.validate/1`.
Add the `t:Flop.t/0` to a `t:Ecto.Queryable.t/0` with `Flop.query/2`.
iex> params = %{"order_by" => ["name", "age"], "limit" => 5}
iex> {:ok, flop} = Flop.validate(params, for: Flop.Pet)
{:ok,
%Flop{
filters: [],
limit: 5,
offset: nil,
order_by: [:name, :age],
order_directions: nil,
page: nil,
page_size: nil
}}
iex> Flop.Pet |> Flop.query(flop)
#Ecto.Query<from p0 in Flop.Pet, order_by: [asc: p0.name, asc: p0.age], \
limit: ^5>
Use `Flop.validate_and_run/3`, `Flop.validate_and_run!/3`, `Flop.run/3`,
`Flop.all/3` or `Flop.meta/3` to query the database. Also consult the
[readme](https://hexdocs.pm/flop/readme.html) for more details.
"""
use Ecto.Schema
import Ecto.Changeset
import Flop.Schema
alias Ecto.Changeset
alias Ecto.Query
alias Ecto.Queryable
alias Flop.CustomTypes.ExistingAtom
alias Flop.CustomTypes.OrderDirection
alias Flop.Filter
alias Flop.Meta
require Ecto.Query
require Logger
@typedoc """
Represents the supported order direction values.
"""
@type order_direction ::
:asc
| :asc_nulls_first
| :asc_nulls_last
| :desc
| :desc_nulls_first
| :desc_nulls_last
@typedoc """
Represents the query parameters for filtering, ordering and pagination.
### Fields
- `limit`, `offset`: Used for pagination. May not be used together with
`page` and `page_size`.
- `page`, `page_size`: Used for pagination. May not be used together with
`limit` and `offset`.
- `order_by`: List of fields to order by. Fields can be restricted by
deriving `Flop.Schema` in your Ecto schema.
- `order_directions`: List of order directions applied to the fields defined
in `order_by`. If empty or the list is shorter than the `order_by` list,
`:asc` will be used as a default for each missing order direction.
- `filters`: List of filters, see `t:Flop.Filter.t/0`.
"""
@type t :: %__MODULE__{
filters: [Filter.t()] | nil,
limit: pos_integer | nil,
offset: non_neg_integer | nil,
order_by: [atom | String.t()] | nil,
order_directions: [order_direction()] | nil,
page: pos_integer | nil,
page_size: pos_integer | nil
}
@primary_key false
embedded_schema do
field :limit, :integer
field :offset, :integer
field :order_by, {:array, ExistingAtom}
field :order_directions, {:array, OrderDirection}
field :page, :integer
field :page_size, :integer
embeds_many :filters, Filter
end
@doc """
Adds clauses for filtering, ordering and pagination to a
`t:Ecto.Queryable.t/0`.
The parameters are represented by the `t:Flop.t/0` type. Any `nil` values
will be ignored.
## Examples
iex> flop = %Flop{limit: 10, offset: 19}
iex> Flop.query(Flop.Pet, flop)
#Ecto.Query<from p0 in Flop.Pet, limit: ^10, offset: ^19>
Or enhance an already defined query:
iex> require Ecto.Query
iex> flop = %Flop{limit: 10}
iex> Flop.Pet |> Ecto.Query.where(species: "dog") |> Flop.query(flop)
#Ecto.Query<from p0 in Flop.Pet, where: p0.species == \"dog\", limit: ^10>
"""
@spec query(Queryable.t(), Flop.t()) :: Queryable.t()
def query(q, flop) do
q
|> filter(flop)
|> order_by(flop)
|> paginate(flop)
end
@doc """
Applies the given Flop to the given queryable and returns all matchings
entries.
iex> Flop.all(Flop.Pet, %Flop{}, repo: Flop.Repo)
[]
You can also configure a default repo in your config files:
config :flop, repo: MyApp.Repo
This allows you to omit the third argument:
iex> Flop.all(Flop.Pet, %Flop{})
[]
"""
@doc since: "0.6.0"
@spec all(Queryable.t(), Flop.t(), keyword) :: [any]
def all(q, flop, opts \\ []) do
repo = opts[:repo] || default_repo() || raise no_repo_error("all")
apply(repo, :all, [query(q, flop)])
end
@doc """
Applies the given Flop to the given queryable, retrieves the data and the
meta data.
This function does not validate the given flop parameters. You can validate
the parameters with `Flop.validate/2` or `Flop.validate!/2`, or you can use
`Flop.validate_and_run/3` or `Flop.validate_and_run!/3` instead of this
function.
iex> {data, meta} = Flop.run(Flop.Pet, %Flop{})
iex> data == []
true
iex> match?(%Flop.Meta{}, meta)
true
"""
@doc since: "0.6.0"
@spec run(Queryable.t(), Flop.t(), keyword) :: {[any], Meta.t()}
def run(q, flop, opts \\ []) do
repo = opts[:repo] || default_repo() || raise no_repo_error("run")
{all(q, flop, repo: repo), meta(q, flop, repo: repo)}
end
@doc """
Validates the given flop parameters and retrieves the data and meta data on
success.
iex> {:ok, {[], %Flop.Meta{}}} =
...> Flop.validate_and_run(Flop.Pet, %Flop{}, for: Flop.Pet)
iex> {:error, %Ecto.Changeset{} = changeset} =
...> Flop.validate_and_run(Flop.Pet, %Flop{limit: -1})
iex> changeset.errors
[
limit: {"must be greater than %{number}",
[validation: :number, kind: :greater_than, number: 0]}
]
## Options
- `for`: Passed to `Flop.validate/2`.
- `repo`: The `Ecto.Repo` module. Required if no default repo is configured.
"""
@doc since: "0.6.0"
@spec validate_and_run(Queryable.t(), map | Flop.t(), keyword) ::
{:ok, {[any], Meta.t()}} | {:error, Changeset.t()}
def validate_and_run(q, flop, opts \\ []) do
repo = opts[:repo] || default_repo() || raise no_repo_error("run")
validate_opts = Keyword.take(opts, [:for])
with {:ok, flop} <- validate(flop, validate_opts) do
{:ok, {all(q, flop, repo: repo), meta(q, flop, repo: repo)}}
end
end
@doc """
Same as `Flop.validate_and_run/3`, but raises on error.
"""
@doc since: "0.6.0"
@spec validate_and_run!(Queryable.t(), map | Flop.t(), keyword) ::
{[any], Meta.t()}
def validate_and_run!(q, flop, opts \\ []) do
repo = opts[:repo] || default_repo() || raise no_repo_error("run")
validate_opts = Keyword.take(opts, [:for])
flop = validate!(flop, validate_opts)
{all(q, flop, repo: repo), meta(q, flop, repo: repo)}
end
@doc """
Returns the total count of entries matching the filter conditions of the
Flop.
The pagination and ordering option are disregarded.
iex> Flop.count(Flop.Pet, %Flop{}, repo: Flop.Repo)
0
You can also configure a default repo in your config files:
config :flop, repo: MyApp.Repo
This allows you to omit the third argument:
iex> Flop.count(Flop.Pet, %Flop{})
0
"""
@doc since: "0.6.0"
@spec count(Queryable.t(), Flop.t(), keyword) :: non_neg_integer
def count(q, flop, opts \\ []) do
repo = opts[:repo] || default_repo() || raise no_repo_error("count")
apply(repo, :aggregate, [filter(q, flop), :count])
end
@doc """
Returns meta information for the given query and flop that can be used for
building the pagination links.
iex> Flop.meta(Flop.Pet, %Flop{limit: 10}, repo: Flop.Repo)
%Flop.Meta{
current_offset: 0,
current_page: 1,
flop: %Flop{limit: 10},
has_next_page?: false,
has_previous_page?: false,
next_offset: nil,
next_page: nil,
page_size: 10,
previous_offset: nil,
previous_page: nil,
total_count: 0,
total_pages: 0
}
The function returns both the current offset and the current page, regardless
of the pagination type. If the offset lies in between pages, the current page
number is rounded up. This means that it is possible that the values for
`current_page` and `next_page` can be identical. This can only occur if you
use offset/limit based pagination with arbitrary offsets, but in that case,
you will use the `previous_offset`, `current_offset` and `next_offset` values
to render the pagination links anyway, so this shouldn't be a problem.
"""
@doc since: "0.6.0"
@spec meta(Queryable.t(), Flop.t(), keyword) :: Meta.t()
def meta(q, flop, opts \\ []) do
repo = opts[:repo] || default_repo() || raise no_repo_error("meta")
total_count = count(q, flop, repo: repo)
page_size = flop.page_size || flop.limit
total_pages = get_total_pages(total_count, page_size)
current_offset = get_current_offset(flop)
current_page = get_current_page(flop, total_pages)
{has_previous_page?, previous_offset, previous_page} =
get_previous(current_offset, current_page, page_size)
{has_next_page?, next_offset, next_page} =
get_next(
current_offset,
current_page,
page_size,
total_count,
total_pages
)
%Meta{
current_offset: current_offset,
current_page: current_page,
flop: flop,
has_next_page?: has_next_page?,
has_previous_page?: has_previous_page?,
next_offset: next_offset,
next_page: next_page,
page_size: page_size,
previous_offset: previous_offset,
previous_page: previous_page,
total_count: total_count,
total_pages: total_pages
}
end
defp get_previous(offset, current_page, limit) do
has_previous? = offset > 0
previous_offset = if has_previous?, do: max(0, offset - limit), else: nil
previous_page = if current_page > 1, do: current_page - 1, else: nil
{has_previous?, previous_offset, previous_page}
end
defp get_next(_, _, nil = _page_size, _, _) do
{false, nil, nil}
end
defp get_next(current_offset, _, _, total_count, _)
when current_offset >= total_count - 1 do
{false, nil, nil}
end
defp get_next(current_offset, current_page, page_size, _, total_pages) do
{true, current_offset + page_size, min(total_pages, current_page + 1)}
end
defp get_total_pages(0, _), do: 0
defp get_total_pages(_, nil), do: 1
defp get_total_pages(total_count, limit), do: ceil(total_count / limit)
defp get_current_offset(%Flop{offset: nil, page: nil}), do: 0
defp get_current_offset(%Flop{offset: nil, page: page, page_size: page_size}),
do: (page - 1) * page_size
defp get_current_offset(%Flop{offset: offset}), do: offset
defp get_current_page(%Flop{offset: nil, page: nil}, _), do: 1
defp get_current_page(%Flop{offset: nil, page: page}, _), do: page
defp get_current_page(%Flop{limit: limit, offset: offset, page: nil}, total),
do: min(ceil(offset / limit) + 1, total)
## Ordering
@doc """
Applies the `order_by` and `order_directions` parameters of a `t:Flop.t/0`
to an `t:Ecto.Queryable.t/0`.
Used by `Flop.query/2`.
"""
@spec order_by(Queryable.t(), Flop.t()) :: Queryable.t()
def order_by(q, %Flop{order_by: nil}), do: q
def order_by(q, %Flop{order_by: fields, order_directions: directions}) do
Query.order_by(q, ^prepare_order(fields, directions))
end
@spec prepare_order([atom], [order_direction()]) :: [
{order_direction(), atom}
]
defp prepare_order(fields, directions) do
directions = directions || []
field_count = length(fields)
direction_count = length(directions)
directions =
if direction_count < field_count,
do: directions ++ List.duplicate(:asc, field_count - direction_count),
else: directions
Enum.zip(directions, fields)
end
## Pagination
@doc """
Applies the pagination parameters of a `t:Flop.t/0` to an
`t:Ecto.Queryable.t/0`.
The function supports both `offset`/`limit` based pagination and
`page`/`page_size` based pagination.
If you validated the `t:Flop.t/0` with `Flop.validate/1` before, you can be
sure that the given `t:Flop.t/0` only has pagination parameters set for one
pagination method. If you pass an unvalidated `t:Flop.t/0` that has
pagination parameters set for multiple pagination methods, this function
will arbitrarily only apply one of the pagination methods.
Used by `Flop.query/2`.
"""
@spec paginate(Queryable.t(), Flop.t()) :: Queryable.t()
def paginate(q, %Flop{limit: limit, offset: offset})
when (is_integer(limit) and limit >= 1) or
(is_integer(offset) and offset >= 0) do
q
|> limit(limit)
|> offset(offset)
end
def paginate(q, %Flop{page: page, page_size: page_size})
when is_integer(page) and is_integer(page_size) and
page >= 1 and page_size >= 1 do
q
|> limit(page_size)
|> offset((page - 1) * page_size)
end
def paginate(q, _), do: q
## Offset/limit pagination
@spec limit(Queryable.t(), pos_integer | nil) :: Queryable.t()
defp limit(q, nil), do: q
defp limit(q, limit), do: Query.limit(q, ^limit)
@spec offset(Queryable.t(), non_neg_integer | nil) :: Queryable.t()
defp offset(q, nil), do: q
defp offset(q, offset), do: Query.offset(q, ^offset)
## Filter
@doc """
Applies the `filter` parameter of a `t:Flop.t/0` to an `t:Ecto.Queryable.t/0`.
Used by `Flop.query/2`.
"""
@spec filter(Queryable.t(), Flop.t()) :: Queryable.t()
def filter(q, %Flop{filters: nil}), do: q
def filter(q, %Flop{filters: []}), do: q
def filter(q, %Flop{filters: filters}) when is_list(filters) do
Enum.reduce(filters, q, &filter(&2, &1))
end
def filter(_, %Filter{field: field, op: _, value: value})
when is_nil(field) or is_nil(value) do
raise ArgumentError
end
def filter(q, %Filter{field: field, op: :==, value: value}),
do: Query.where(q, ^[{field, value}])
def filter(q, %Filter{field: field, op: :!=, value: value}),
do: Query.where(q, [r], field(r, ^field) != ^value)
def filter(q, %Filter{field: field, op: :=~, value: value}) do
query_value = "%#{value}%"
Query.where(q, [r], ilike(field(r, ^field), ^query_value))
end
def filter(q, %Filter{field: field, op: :>=, value: value}),
do: Query.where(q, [r], field(r, ^field) >= ^value)
def filter(q, %Filter{field: field, op: :<=, value: value}),
do: Query.where(q, [r], field(r, ^field) <= ^value)
def filter(q, %Filter{field: field, op: :>, value: value}),
do: Query.where(q, [r], field(r, ^field) > ^value)
def filter(q, %Filter{field: field, op: :<, value: value}),
do: Query.where(q, [r], field(r, ^field) < ^value)
def filter(q, %Filter{field: field, op: :in, value: value}),
do: Query.where(q, [r], field(r, ^field) in ^value)
## Validation
@doc """
Validates a `t:Flop.t/0`.
## Examples
iex> params = %{"limit" => 10, "offset" => 0, "texture" => "fluffy"}
iex> Flop.validate(params)
{:ok,
%Flop{
filters: [],
limit: 10,
offset: 0,
order_by: nil,
order_directions: nil,
page: nil,
page_size: nil
}}
iex> flop = %Flop{offset: -1}
iex> {:error, changeset} = Flop.validate(flop)
iex> changeset.valid?
false
iex> changeset.errors
[
offset: {"must be greater than or equal to %{number}",
[validation: :number, kind: :greater_than_or_equal_to, number: 0]}
]
It also makes sure that only one pagination method is used.
iex> params = %{limit: 10, offset: 0, page: 5, page_size: 10}
iex> {:error, changeset} = Flop.validate(params)
iex> changeset.valid?
false
iex> changeset.errors
[limit: {"cannot combine multiple pagination types", []}]
If you derived `Flop.Schema` in your Ecto schema to define the filterable
and sortable fields, you can pass the module name to the function to validate
that only allowed fields are used.
iex> params = %{"order_by" => ["species"]}
iex> {:error, changeset} = Flop.validate(params, for: Flop.Pet)
iex> changeset.valid?
false
iex> [order_by: {msg, [_, {_, enum}]}] = changeset.errors
iex> msg
"has an invalid entry"
iex> enum
[:name, :age]
Note that currently, trying to use an existing field that is not allowed as
seen above will result in the error message `has an invalid entry`, while
trying to use a field name that does not exist in the schema (or more
precisely: a field name that doesn't exist as an atom) will result in
the error message `is invalid`. This might change in the future.
"""
@spec validate(Flop.t() | map, keyword) ::
{:ok, Flop.t()} | {:error, Changeset.t()}
def validate(flop, opts \\ [])
def validate(%Flop{} = flop, opts) do
flop
|> Map.from_struct()
|> validate(opts)
end
def validate(%{} = params, opts) do
result =
params
|> changeset(opts)
|> apply_action(:replace)
case result do
{:ok, _} = r ->
r
{:error, %Changeset{} = changeset} = r ->
Logger.debug("Invalid Flop: #{inspect(changeset)}")
r
end
end
@doc """
Same as `Flop.validate/2`, but raises an `Ecto.InvalidChangesetError` if the
parameters are invalid.
"""
@doc since: "0.5.0"
@spec validate!(Flop.t() | map, keyword) :: Flop.t()
def validate!(flop, opts \\ []) do
case validate(flop, opts) do
{:ok, flop} ->
flop
{:error, changeset} ->
raise Ecto.InvalidChangesetError, action: :replace, changeset: changeset
end
end
@spec changeset(map, keyword) :: Changeset.t()
defp changeset(%{} = params, opts) do
%Flop{}
|> cast(params, [
:limit,
:offset,
:order_by,
:order_directions,
:page,
:page_size
])
|> cast_embed(:filters, with: {Filter, :changeset, [opts]})
|> validate_number(:limit, greater_than: 0)
|> validate_within_max_limit(:limit, opts[:for])
|> validate_number(:offset, greater_than_or_equal_to: 0)
|> validate_number(:page, greater_than: 0)
|> validate_number(:page_size, greater_than: 0)
|> validate_exclusive([[:limit, :offset], [:page, :page_size]],
message: "cannot combine multiple pagination types"
)
|> validate_sortable(opts[:for])
|> validate_page_and_page_size(opts[:for])
|> put_default_limit(opts[:for])
end
@spec validate_exclusive(Changeset.t(), [[atom]], keyword) :: Changeset.t()
defp validate_exclusive(changeset, field_groups, opts) do
changed_field_groups =
Enum.filter(field_groups, fn fields ->
Enum.any?(fields, fn field -> !is_nil(get_field(changeset, field)) end)
end)
if length(changed_field_groups) > 1 do
key =
changed_field_groups
|> List.first()
|> Enum.reject(&is_nil(get_field(changeset, &1)))
|> List.first()
add_error(
changeset,
key,
opts[:message] || "invalid combination of field groups"
)
else
changeset
end
end
@spec validate_sortable(Changeset.t(), module | nil) :: Changeset.t()
defp validate_sortable(changeset, nil), do: changeset
defp validate_sortable(changeset, module) do
sortable_fields =
module
|> struct()
|> sortable()
validate_subset(changeset, :order_by, sortable_fields)
end
@spec validate_page_and_page_size(Changeset.t(), module | nil) ::
Changeset.t()
defp validate_page_and_page_size(changeset, module) do
page = get_field(changeset, :page)
page_size = get_field(changeset, :page_size)
if !is_nil(page) || !is_nil(page_size) do
changeset
|> validate_required([:page, :page_size])
|> validate_within_max_limit(:page_size, module)
else
changeset
end
end
@spec validate_within_max_limit(Changeset.t(), atom, module | nil) ::
Changeset.t()
defp validate_within_max_limit(changeset, _field, nil), do: changeset
defp validate_within_max_limit(changeset, field, module) do
max_limit = module |> struct() |> max_limit()
if is_nil(max_limit),
do: changeset,
else: validate_number(changeset, field, less_than_or_equal_to: max_limit)
end
defp put_default_limit(changeset, nil), do: changeset
defp put_default_limit(%Changeset{valid?: false} = changeset, _),
do: changeset
defp put_default_limit(changeset, module) do
default_limit = module |> struct() |> default_limit()
if is_nil(default_limit) do
changeset
else
limit = get_field(changeset, :limit)
page_size = get_field(changeset, :page_size)
if is_nil(limit) && is_nil(page_size) do
put_change(changeset, :limit, default_limit)
else
changeset
end
end
end
defp default_repo, do: Application.get_env(:flop, :repo)
defp no_repo_error(function_name),
do: """
No repo specified. You can specify the repo either by passing it
explicitly:
Flop.#{function_name}(MyApp.Item, %Flop{}, repo: MyApp.Repo)
Or you can configure a default repo in your config:
config :flop, repo: MyApp.Repo
"""
end
|
lib/flop.ex
| 0.895794 | 0.639624 |
flop.ex
|
starcoder
|
defmodule Vapor.Configuration do
@moduledoc false
# Manages a layered set of configuration values.
# Not meant to be consumed by the end user
import Norm
defstruct [
layers: %{overrides: %{}},
translations: []
]
def s do
schema(%__MODULE__{
layers: map_of(one_of([spec(is_atom()), spec(is_integer())]), spec(is_map())),
translations: coll_of({spec(is_atom()), spec(is_function())}),
})
end
@doc """
Returns a new configuration with an initial set of layers and a list of
initial actions to run.
"""
def new(layers, translations) do
# We're abusing term ordering here. The `:overrides` atom will always
# be the highest precedence simply because its an atom
configuration = conform!(%__MODULE__{
layers: Map.merge(%{overrides: %{}}, layers),
translations: translations,
}, s())
merged = materialize(configuration)
actions =
merged
|> Enum.map(fn {key, value} -> {:upsert, key, value} end)
{configuration, merged, actions}
end
@doc """
Overwrites a value at a given path. Overwrites always take precedence over
any other configuration values.
"""
def set(config, key, value) do
overrides = config.layers.overrides
update(config, :overrides, Map.put(overrides, key, value))
end
@doc """
Updates a specific layer in the configuration.
"""
def update(%{layers: ls}=config, layer, value) do
old_paths = materialize(config)
new_config = %{config | layers: Map.put(ls, layer, value)}
new_paths = materialize(new_config)
actions = diff(new_paths, old_paths)
{new_config, new_paths, actions}
end
defp materialize(config) do
config
|> flatten()
|> Enum.map(& do_translation(&1, config.translations))
|> Enum.into(%{})
end
# Takes an old configuration and new configuration and returns a list of
# commands needed to convert the old config into the new config.
defp diff(new_paths, old_paths) when is_map(new_paths) and is_map(old_paths) do
new_list =
new_paths
|> Enum.to_list
old_list =
old_paths
|> Enum.to_list
# This is expensive but it allows us to only diff the meaningful bits
diff(new_list -- old_list, old_list -- new_list, [])
end
# If we're out of new paths then any remaining old paths are deletes.
defp diff([], old_paths, acc) do
acc ++ Enum.map(old_paths, fn {path, _} -> {:delete, path} end)
end
# If we're out of old paths then everything left is an upsert by default
defp diff(new_paths, [], acc) do
acc ++ Enum.map(new_paths, fn {path, value} -> {:upsert, path, value} end)
end
# If we get here then we know that we need to do an upsert and remove any
# old configs with a matching path to our new config. Then we can keep
# recursing
defp diff([{path, value} | nps], old_paths, acc) do
acc = [{:upsert, path, value} | acc]
old_paths = Enum.reject(old_paths, fn {old_path, _} -> path == old_path end)
diff(nps, old_paths, acc)
end
defp flatten(%{layers: layers}) do
layers
|> Enum.sort(fn {a, _}, {b, _} -> a < b end) # Ensure proper sorting
|> Enum.map(fn {_, map} -> map end)
|> Enum.reduce(%{}, fn map, acc -> Map.merge(acc, map) end)
end
defp do_translation({key, value}, translations) do
case Enum.find(translations, fn {k, _f} -> key == k end) do
{_, f} -> {key, f.(value)}
_ -> {key, value}
end
end
end
|
lib/vapor/configuration.ex
| 0.861086 | 0.508483 |
configuration.ex
|
starcoder
|
defmodule AWS.Kinesis.Firehose do
@moduledoc """
Amazon Kinesis Data Firehose API Reference
Amazon Kinesis Data Firehose is a fully managed service that delivers
real-time streaming data to destinations such as Amazon Simple Storage
Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), Amazon
Redshift, and Splunk.
"""
@doc """
Creates a Kinesis Data Firehose delivery stream.
By default, you can create up to 50 delivery streams per AWS Region.
This is an asynchronous operation that immediately returns. The initial
status of the delivery stream is `CREATING`. After the delivery stream is
created, its status is `ACTIVE` and it now accepts data. Attempts to send
data to a delivery stream that is not in the `ACTIVE` state cause an
exception. To check the state of a delivery stream, use
`DescribeDeliveryStream`.
A Kinesis Data Firehose delivery stream can be configured to receive
records directly from providers using `PutRecord` or `PutRecordBatch`, or
it can be configured to use an existing Kinesis stream as its source. To
specify a Kinesis data stream as input, set the `DeliveryStreamType`
parameter to `KinesisStreamAsSource`, and provide the Kinesis stream Amazon
Resource Name (ARN) and role ARN in the `KinesisStreamSourceConfiguration`
parameter.
A delivery stream is configured with a single destination: Amazon S3,
Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the
following destination configuration parameters:
`ExtendedS3DestinationConfiguration`, `S3DestinationConfiguration`,
`ElasticsearchDestinationConfiguration`,
`RedshiftDestinationConfiguration`, or `SplunkDestinationConfiguration`.
When you specify `S3DestinationConfiguration`, you can also provide the
following optional values: BufferingHints, `EncryptionConfiguration`, and
`CompressionFormat`. By default, if no `BufferingHints` value is provided,
Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever
condition is satisfied first. `BufferingHints` is a hint, so there are some
cases where the service cannot adhere to these conditions strictly. For
example, record boundaries might be such that the size is a little over or
under the configured buffering size. By default, no encryption is
performed. We strongly recommend that you enable encryption to ensure
secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
<ul> <li> An Amazon Redshift destination requires an S3 bucket as
intermediate location. Kinesis Data Firehose first delivers data to Amazon
S3 and then uses `COPY` syntax to load data into an Amazon Redshift table.
This is specified in the `RedshiftDestinationConfiguration.S3Configuration`
parameter.
</li> <li> The compression formats `SNAPPY` or `ZIP` cannot be specified in
`RedshiftDestinationConfiguration.S3Configuration` because the Amazon
Redshift `COPY` operation that reads from the S3 bucket doesn't support
these compression formats.
</li> <li> We strongly recommend that you use the user name and password
you provide exclusively with Kinesis Data Firehose, and that the
permissions for the account are restricted for Amazon Redshift `INSERT`
permissions.
</li> </ul> Kinesis Data Firehose assumes the IAM role that is configured
as part of the destination. The role should allow the Kinesis Data Firehose
principal to assume the role, and the role should have permissions that
allow the service to deliver the data. For more information, see [Grant
Kinesis Data Firehose Access to an Amazon S3
Destination](http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3)
in the *Amazon Kinesis Data Firehose Developer Guide*.
"""
def create_delivery_stream(client, input, options \\ []) do
request(client, "CreateDeliveryStream", input, options)
end
@doc """
Deletes a delivery stream and its data.
You can delete a delivery stream only if it is in `ACTIVE` or `DELETING`
state, and not in the `CREATING` state. While the deletion request is in
process, the delivery stream is in the `DELETING` state.
To check the state of a delivery stream, use `DescribeDeliveryStream`.
While the delivery stream is `DELETING` state, the service might continue
to accept the records, but it doesn't make any guarantees with respect to
delivering the data. Therefore, as a best practice, you should first stop
any applications that are sending records before deleting a delivery
stream.
"""
def delete_delivery_stream(client, input, options \\ []) do
request(client, "DeleteDeliveryStream", input, options)
end
@doc """
Describes the specified delivery stream and gets the status. For example,
after your delivery stream is created, call `DescribeDeliveryStream` to see
whether the delivery stream is `ACTIVE` and therefore ready for data to be
sent to it.
"""
def describe_delivery_stream(client, input, options \\ []) do
request(client, "DescribeDeliveryStream", input, options)
end
@doc """
Lists your delivery streams in alphabetical order of their names.
The number of delivery streams might be too large to return using a single
call to `ListDeliveryStreams`. You can limit the number of delivery streams
returned, using the `Limit` parameter. To determine whether there are more
delivery streams to list, check the value of `HasMoreDeliveryStreams` in
the output. If there are more delivery streams to list, you can request
them by calling this operation again and setting the
`ExclusiveStartDeliveryStreamName` parameter to the name of the last
delivery stream returned in the last call.
"""
def list_delivery_streams(client, input, options \\ []) do
request(client, "ListDeliveryStreams", input, options)
end
@doc """
Lists the tags for the specified delivery stream. This operation has a
limit of five transactions per second per account.
"""
def list_tags_for_delivery_stream(client, input, options \\ []) do
request(client, "ListTagsForDeliveryStream", input, options)
end
@doc """
Writes a single data record into an Amazon Kinesis Data Firehose delivery
stream. To write multiple data records into a delivery stream, use
`PutRecordBatch`. Applications using these operations are referred to as
producers.
By default, each delivery stream can take in up to 2,000 transactions per
second, 5,000 records per second, or 5 MB per second. If you use
`PutRecord` and `PutRecordBatch`, the limits are an aggregate across these
two operations for each delivery stream. For more information about limits
and how to request an increase, see [Amazon Kinesis Data Firehose
Limits](http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
You must specify the name of the delivery stream and the data record when
using `PutRecord`. The data record consists of a data blob that can be up
to 1,000 KB in size, and any kind of data. For example, it can be a segment
from a log file, geographic location data, website clickstream data, and so
on.
Kinesis Data Firehose buffers records before delivering them to the
destination. To disambiguate the data blobs at the destination, a common
solution is to use delimiters in the data, such as a newline (`\n`) or some
other character unique within the data. This allows the consumer
application to parse individual data items when reading the data from the
destination.
The `PutRecord` operation returns a `RecordId`, which is a unique string
assigned to each record. Producer applications can use this ID for purposes
such as auditability and investigation.
If the `PutRecord` operation throws a `ServiceUnavailableException`, back
off and retry. If the exception persists, it is possible that the
throughput limits have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the
time they are added to a delivery stream as it tries to send the records to
the destination. If the destination is unreachable for more than 24 hours,
the data is no longer available.
<important> Don't concatenate two or more base64 strings to form the data
fields of your records. Instead, concatenate the raw data, then perform
base64 encoding.
</important>
"""
def put_record(client, input, options \\ []) do
request(client, "PutRecord", input, options)
end
@doc """
Writes multiple data records into a delivery stream in a single call, which
can achieve higher throughput per producer than when writing single
records. To write single data records into a delivery stream, use
`PutRecord`. Applications using these operations are referred to as
producers.
By default, each delivery stream can take in up to 2,000 transactions per
second, 5,000 records per second, or 5 MB per second. If you use
`PutRecord` and `PutRecordBatch`, the limits are an aggregate across these
two operations for each delivery stream. For more information about limits,
see [Amazon Kinesis Data Firehose
Limits](http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
Each `PutRecordBatch` request supports up to 500 records. Each record in
the request can be as large as 1,000 KB (before 64-bit encoding), up to a
limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when
using `PutRecord`. The data record consists of a data blob that can be up
to 1,000 KB in size, and any kind of data. For example, it could be a
segment from a log file, geographic location data, website clickstream
data, and so on.
Kinesis Data Firehose buffers records before delivering them to the
destination. To disambiguate the data blobs at the destination, a common
solution is to use delimiters in the data, such as a newline (`\n`) or some
other character unique within the data. This allows the consumer
application to parse individual data items when reading the data from the
destination.
The `PutRecordBatch` response includes a count of failed records,
`FailedPutCount`, and an array of responses, `RequestResponses`. Even if
the `PutRecordBatch` call succeeds, the value of `FailedPutCount` may be
greater than 0, indicating that there are records for which the operation
didn't succeed. Each entry in the `RequestResponses` array provides
additional information about the processed record. It directly correlates
with a record in the request array using the same ordering, from the top to
the bottom. The response array always includes the same number of records
as the request array. `RequestResponses` includes both successfully and
unsuccessfully processed records. Kinesis Data Firehose tries to process
all records in each `PutRecordBatch` request. A single record failure does
not stop the processing of subsequent records.
A successfully processed record includes a `RecordId` value, which is
unique for the record. An unsuccessfully processed record includes
`ErrorCode` and `ErrorMessage` values. `ErrorCode` reflects the type of
error, and is one of the following values: `ServiceUnavailableException` or
`InternalFailure`. `ErrorMessage` provides more detailed information about
the error.
If there is an internal server error or a timeout, the write might have
completed or it might have failed. If `FailedPutCount` is greater than 0,
retry the request, resending only those records that might have failed
processing. This minimizes the possible duplicate records and also reduces
the total bytes sent (and corresponding charges). We recommend that you
handle any duplicates at the destination.
If `PutRecordBatch` throws `ServiceUnavailableException`, back off and
retry. If the exception persists, it is possible that the throughput limits
have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the
time they are added to a delivery stream as it attempts to send the records
to the destination. If the destination is unreachable for more than 24
hours, the data is no longer available.
<important> Don't concatenate two or more base64 strings to form the data
fields of your records. Instead, concatenate the raw data, then perform
base64 encoding.
</important>
"""
def put_record_batch(client, input, options \\ []) do
request(client, "PutRecordBatch", input, options)
end
@doc """
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it,
Kinesis Data Firehose first sets the status of the stream to `ENABLING`,
and then to `ENABLED`. You can continue to read and write data to your
stream while its status is `ENABLING`, but the data is not encrypted. It
can take up to 5 seconds after the encryption status changes to `ENABLED`
before all records written to the delivery stream are encrypted. To find
out whether a record or a batch of records was encrypted, check the
response elements `PutRecordOutput$Encrypted` and
`PutRecordBatchOutput$Encrypted`, respectively.
To check the encryption state of a delivery stream, use
`DescribeDeliveryStream`.
You can only enable SSE for a delivery stream that uses `DirectPut` as its
source.
The `StartDeliveryStreamEncryption` and `StopDeliveryStreamEncryption`
operations have a combined limit of 25 calls per delivery stream per 24
hours. For example, you reach the limit if you call
`StartDeliveryStreamEncryption` 13 times and `StopDeliveryStreamEncryption`
12 times for the same delivery stream in a 24-hour period.
"""
def start_delivery_stream_encryption(client, input, options \\ []) do
request(client, "StartDeliveryStreamEncryption", input, options)
end
@doc """
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it,
Kinesis Data Firehose first sets the status of the stream to `DISABLING`,
and then to `DISABLED`. You can continue to read and write data to your
stream while its status is `DISABLING`. It can take up to 5 seconds after
the encryption status changes to `DISABLED` before all records written to
the delivery stream are no longer subject to encryption. To find out
whether a record or a batch of records was encrypted, check the response
elements `PutRecordOutput$Encrypted` and `PutRecordBatchOutput$Encrypted`,
respectively.
To check the encryption state of a delivery stream, use
`DescribeDeliveryStream`.
The `StartDeliveryStreamEncryption` and `StopDeliveryStreamEncryption`
operations have a combined limit of 25 calls per delivery stream per 24
hours. For example, you reach the limit if you call
`StartDeliveryStreamEncryption` 13 times and `StopDeliveryStreamEncryption`
12 times for the same delivery stream in a 24-hour period.
"""
def stop_delivery_stream_encryption(client, input, options \\ []) do
request(client, "StopDeliveryStreamEncryption", input, options)
end
@doc """
Adds or updates tags for the specified delivery stream. A tag is a
key-value pair that you can define and assign to AWS resources. If you
specify a tag that already exists, the tag value is replaced with the value
that you specify in the request. Tags are metadata. For example, you can
add friendly names and descriptions or other types of information that can
help you distinguish the delivery stream. For more information about tags,
see [Using Cost Allocation
Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
in the *AWS Billing and Cost Management User Guide*.
Each delivery stream can have up to 50 tags.
This operation has a limit of five transactions per second per account.
"""
def tag_delivery_stream(client, input, options \\ []) do
request(client, "TagDeliveryStream", input, options)
end
@doc """
Removes tags from the specified delivery stream. Removed tags are deleted,
and you can't recover them after this operation successfully completes.
If you specify a tag that doesn't exist, the operation ignores it.
This operation has a limit of five transactions per second per account.
"""
def untag_delivery_stream(client, input, options \\ []) do
request(client, "UntagDeliveryStream", input, options)
end
@doc """
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace
the Amazon S3 destination with Amazon Redshift) or change the parameters
associated with a destination (for example, to change the bucket name of
the Amazon S3 destination). The update might not occur immediately. The
target delivery stream remains active while the configurations are updated,
so data writes to the delivery stream can continue during this process. The
updated configurations are usually effective within a few minutes.
Switching between Amazon ES and other services is not supported. For an
Amazon ES destination, you can only update to another Amazon ES
destination.
If the destination type is the same, Kinesis Data Firehose merges the
configuration parameters specified with the destination configuration that
already exists on the delivery stream. If any of the parameters are not
specified in the call, the existing values are retained. For example, in
the Amazon S3 destination, if `EncryptionConfiguration` is not specified,
then the existing `EncryptionConfiguration` is maintained on the
destination.
If the destination type is not the same, for example, changing the
destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does
not merge any parameters. In this case, all parameters must be specified.
Kinesis Data Firehose uses `CurrentDeliveryStreamVersionId` to avoid race
conditions and conflicting merges. This is a required field, and the
service updates the configuration only if the existing configuration has a
version ID that matches. After the update is applied successfully, the
version ID is updated, and can be retrieved using `DescribeDeliveryStream`.
Use the new version ID to set `CurrentDeliveryStreamVersionId` in the next
call.
"""
def update_destination(client, input, options \\ []) do
request(client, "UpdateDestination", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "firehose"}
host = get_host("firehose", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "Firehose_20150804.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/kinesis_firehose.ex
| 0.936088 | 0.802052 |
kinesis_firehose.ex
|
starcoder
|
defmodule EctoEnum.Use do
@moduledoc false
alias EctoEnum.Typespec
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
[h | _t] = opts
opts =
cond do
Keyword.keyword?(opts) ->
opts
is_binary(h) ->
Enum.map(opts, fn value -> {String.to_atom(value), value} end)
true ->
raise "Enum must be a keyword list or a list of strings"
end
typespec = Typespec.make(Keyword.keys(opts))
@behaviour Ecto.Type
@type t :: unquote(typespec)
keys = Keyword.keys(opts)
string_keys = Enum.map(keys, &Atom.to_string/1)
@valid_values Enum.uniq(keys ++ string_keys ++ Keyword.values(opts))
@valid_string_values Enum.filter(@valid_values, &is_binary/1)
@valid_atom_values Enum.filter(@valid_values, &is_atom/1)
@valid_integer_values Enum.filter(@valid_values, &is_integer/1)
{_key, value} = opts |> hd()
type =
if is_integer(value) do
:integer
else
:string
end
def type, do: unquote(type)
for {key, value} <- opts, k <- Enum.uniq([key, value, Atom.to_string(key)]) do
def cast(unquote(k)), do: {:ok, unquote(key)}
end
def cast(_other), do: :error
for {key, value} <- opts, k <- Enum.uniq([key, value, Atom.to_string(key)]) do
def cast!(unquote(k)), do: unquote(key)
end
def cast!(other), do: raise Ecto.CastError, type: __MODULE__, value: other
for {key, value} <- opts, k <- Enum.uniq([key, value, Atom.to_string(key)]) do
def dump(unquote(k)), do: {:ok, unquote(value)}
end
def dump(term) do
msg =
"Value `#{inspect(term)}` is not a valid enum for `#{inspect(__MODULE__)}`. " <>
"Valid enums are `#{inspect(__valid_values__())}`"
raise Ecto.ChangeError, message: msg
end
for {key, value} <- opts, k <- Enum.uniq([key, value, Atom.to_string(key)]) do
def dump!(unquote(k)), do: unquote(value)
end
def dump!(term) do
msg =
"Value `#{inspect(term)}` is not a valid enum for `#{inspect(__MODULE__)}`. " <>
"Valid enums are `#{inspect(__valid_values__())}`"
raise Ecto.ChangeError, message: msg
end
def embed_as(_), do: :self
def equal?(term1, term2), do: term1 == term2
for {key, value} <- opts do
def load(unquote(value)), do: {:ok, unquote(key)}
end
def valid_value?(value) do
Enum.member?(@valid_values, value)
end
for atom <- keys do
defmacro unquote(atom)(), do: unquote(atom)
end
# # Reflection
def __enum_map__(), do: unquote(opts)
def __valid_values__(), do: @valid_values
def __valid_values__(:atom), do: @valid_atom_values
def __valid_values__(:string), do: @valid_string_values
def __valid_values__(:integer), do: @valid_integer_values
end
end
end
|
lib/ecto_enum/use.ex
| 0.781914 | 0.412885 |
use.ex
|
starcoder
|
defmodule Vega.Issue do
@moduledoc """
The issue contains the details of the history of all modifications. For each modification an issue document
is created to record all details and references that belong to the modification.
At the front end a human friendly message is rendered: The field `:msg` contains the text of the modification while
the field `:keys` contains the relevant information as a simple map structure, which is used to build the text of
the modification with the help of `Gettext` to localize the message. This is done in function `Issues.to_struct/1`.
"""
alias Vega.Issue
alias Vega.Issues
alias Vega.User
alias Vega.Board
alias Vega.BoardList
alias Yildun.Collection
use Collection
@collection "issues"
collection "issues" do
attribute :ts, DateTime.t() , default: &DateTime.utc_now/0 ## timestamp
attribute :author_id, BSON.ObjectId.t() ## id of the user
attribute :t, non_neg_integer() ## the type of modification see Vega.IssueConsts
attribute :board, BSON.ObjectId.t() ## the id of the board
attribute :list, BSON.ObjectId.t() ## the id of the list
attribute :keys, map() ## keys for gettext
attribute :msg, String.t() ## the localized message of the modification
after_load &Issue.after_load/1
end
def new(type, %User{_id: author_id}) do
%Issue{new() | author_id: author_id, t: type}
end
def new(type, %User{_id: author_id}, %Board{_id: board}) do
%Issue{new() | author_id: author_id, t: type, board: board}
end
def new(type, %User{_id: author_id}, %Board{_id: board}, %BoardList{_id: list}) do
%Issue{new() | author_id: author_id, t: type, board: board, list: list}
end
@doc """
Add keys to the issue which are used to format a localized string in the history view of the app.
"""
def add_message_keys(issue, keys \\ []) do
%Issue{issue | keys: keys}
end
def author(%Issue{author_id: author_id}) do
User.get(author_id)
end
def fetch_all(nil) do
[]
end
def fetch_all(%Board{_id: id}) do
Mongo.find(:mongo, @collection, %{"board" => id}, sort: %{ts: -1}, limit: 5) |> Enum.map(fn issue -> issue |> load() end)
end
def fetch_all_raw(%Board{_id: id}) do
Mongo.find(:mongo, @collection, %{"board" => id})
end
def after_load(%Issue{keys: keys} = issue) when keys == nil do
Issues.add_message(%Issue{issue | keys: []})
end
def after_load(issue) do
Issues.add_message(issue)
end
def check() do
case Mongo.show_collections(:mongo) |> Enum.any?(fn coll -> coll == @collection end) do
false -> Mongo.create(:mongo, @collection)
true -> :ok
end
end
def clone_issues(issues, board, mapping) do
Enum.map(issues, fn issue -> clone(issue, board, Map.get(mapping, issue["list"])) end)
end
def clone(%{"list" => list} = issue, board, list) do
%{issue | "_id" => Mongo.object_id(), "board" => board, "list" => list}
end
def clone(issue, board, _list) do
%{issue | "_id" => Mongo.object_id(), "board" => board}
end
end
|
lib/vega/issue/issue.ex
| 0.783947 | 0.433382 |
issue.ex
|
starcoder
|
defmodule Erl2ex.Results.Collector do
@moduledoc """
Erl2ex.Results.Collector is a process that accumulates results of a
conversion run.
"""
alias Erl2ex.Results
@typedoc """
The ProcessID of a results collector process.
"""
@type t :: pid()
@typedoc """
A file identifier, which may be a filesystem path or a symbolic id.
"""
@type file_id :: Path.t | atom
@doc """
Starts a result collector and returns its PID.
"""
@spec start_link(list) :: t
def start_link(opts \\ []) do
{:ok, pid} = GenServer.start_link(__MODULE__, opts)
pid
end
@doc """
Record that a conversion was successful for the given input and output paths.
"""
@spec put_success(t, file_id, file_id) :: :ok | {:error, term}
def put_success(results, input_path, output_path) do
GenServer.call(results, {:success, input_path, output_path})
end
@doc """
Record that a conversion was unsuccessful for the given input path.
"""
@spec put_error(t, file_id, %CompileError{}) :: :ok | {:error, term}
def put_error(results, input_path, error) do
GenServer.call(results, {:error, input_path, error})
end
@doc """
Returns the results for the given input path.
"""
@spec get_file(t, file_id) :: {:ok, Results.File.t} | {:error, term}
def get_file(results, path) do
GenServer.call(results, {:get_file, path})
end
@doc """
Returns the results for the entire conversion so far.
"""
@spec get(t) :: Results.t
def get(results) do
GenServer.call(results, {:get})
end
@doc """
Stops the collector process.
"""
@spec stop(t) :: :ok
def stop(results) do
GenServer.cast(results, {:stop})
end
use GenServer
defmodule State do
@moduledoc false
defstruct(
data: %{},
allow_overwrite: false
)
end
def init(opts) do
state = %State{
allow_overwrite: Keyword.get(opts, :allow_overwrite, false)
}
{:ok, state}
end
def handle_call({:success, input_path, output_path}, _from, state) do
if not state.allow_overwrite and Map.has_key?(state.data, input_path) do
{:reply, {:error, :file_exists}, state}
else
file = %Results.File{
input_path: input_path,
output_path: output_path
}
state = %State{state | data: Map.put(state.data, input_path, file)}
{:reply, :ok, state}
end
end
def handle_call({:error, input_path, error}, _from, state) do
if not state.allow_overwrite and Map.has_key?(state.data, input_path) do
{:reply, {:error, :file_exists}, state}
else
file = %Results.File{
input_path: input_path,
error: error
}
state = %State{state | data: Map.put(state.data, input_path, file)}
{:reply, :ok, state}
end
end
def handle_call({:get_file, input_path}, _from, %State{data: data} = state) do
reply = case Map.fetch(data, input_path) do
{:ok, file} -> {:ok, file}
:error -> {:error, :not_found}
end
{:reply, reply, state}
end
def handle_call({:get}, _from, %State{data: data} = state) do
{:reply, %Results{files: Map.values(data)}, state}
end
def handle_cast({:stop}, state) do
{:stop, :normal, state}
end
end
|
lib/erl2ex/results_collector.ex
| 0.555194 | 0.515193 |
results_collector.ex
|
starcoder
|
defmodule Astarte.Flow.Blocks.HttpSource do
@moduledoc """
This is a producer block that generates messages by polling HTTP URLs with a GET request.
It works by specifying a `base_url` and a list of `target_paths` to perform requests on.
`HttpSource` will perform GET requests in a round robin fashion on all `target_paths`,
waiting `polling_interval_ms` between two consecutive requests.
If the request can't be performed or an error status (`>= 400`) is returned, no message
is produced.
If the request succeeds, `HttpSource` produces an `%Astarte.Flow.Message{}` containing
these fields:
* `key` contains the `target_path` of the request.
* `data` contains the body of the response.
* `type` is always `:binary`.
* `subtype` is populated with the contents of the `content-type` HTTP header, defaulting
to `"application/octet-stream"` if it's not found.
* `metadata` contains the `"Astarte.Flow.Blocks.HttpSource.base_url"` key with `base_url`
as value. Moreover, it contains all the HTTP headers contained in the response with
their keys prefixed with `"Astarte.Flow.HttpSource."`.
* `timestamp` contains the timestamp (in microseconds) the response was received.
"""
use GenStage
require Logger
alias Astarte.Flow.Message
@meta_namespace "Astarte.Flow.Blocks.HttpSource."
defmodule State do
@moduledoc false
defstruct [
:client,
:base_url,
:initial_target_paths,
:target_paths,
:polling_interval_ms,
:pending_demand,
:queue
]
end
@doc """
Starts the `HttpSource`.
## Options
* `:base_url` (required) - The base URL for the GET requests. This gets prepended to the `target_path` when
performing a request.
* `:target_paths` (required) - A non-empty list of target paths for GET requests.
* `:polling_interval_ms` - The interval between two consecutive GET requests, in milliseconds. Defaults to 1000 ms.
* `:headers` - A list of `{key, value}` tuples where `key` and `value` are `String` and represent
headers to be set in the GET request.
* `:ignore_ssl_errors` - If `true`, ignore SSL errors that happen while performing the request.
Defaults to `false`.
"""
@spec start_link(options) :: GenServer.on_start()
when options: [option],
option:
{:base_url, url :: String.t()}
| {:target_paths, target_paths :: nonempty_list(String.t())}
| {:polling_interval_ms, polling_interval_ms :: number()}
| {:headers, headers :: [{String.t(), String.t()}]}
def start_link(opts) do
GenStage.start_link(__MODULE__, opts)
end
# Callbacks
@impl true
def init(opts) do
with {:url, {:ok, base_url}} <- {:url, Keyword.fetch(opts, :base_url)},
{:paths, {:ok, target_paths}} <- {:paths, Keyword.fetch(opts, :target_paths)},
polling_interval_ms = Keyword.get(opts, :polling_interval_ms, 1000),
headers = Keyword.get(opts, :headers, []),
:ok <- validate_target_paths(target_paths),
:ok <- validate_headers(headers) do
client = build_client(base_url, opts)
state = %State{
client: client,
base_url: base_url,
initial_target_paths: target_paths,
target_paths: target_paths,
polling_interval_ms: trunc(polling_interval_ms),
pending_demand: 0,
queue: :queue.new()
}
send(self(), :poll)
{:producer, state, dispatcher: GenStage.BroadcastDispatcher}
else
{:url, _} ->
{:stop, :missing_base_url}
{:paths, _} ->
{:stop, :missing_target_paths}
{:error, reason} ->
{:stop, reason}
end
end
@impl true
def handle_demand(incoming_demand, %State{pending_demand: demand} = state) do
dispatch_messages(%{state | pending_demand: demand + incoming_demand}, [])
end
@impl true
def handle_info(:poll, %State{target_paths: []} = state) do
# Finished all target_paths, start from the beginning
handle_info(:poll, %{state | target_paths: state.initial_target_paths})
end
def handle_info(:poll, state) do
%State{
client: client,
base_url: base_url,
target_paths: [target_path | target_paths_tail],
polling_interval_ms: polling_interval_ms,
queue: queue
} = state
# Schedule next polling
_ = Process.send_after(self(), :poll, polling_interval_ms)
case get(client, target_path) do
{:ok, response} ->
new_queue =
build_message(base_url, target_path, response)
|> :queue.in(queue)
new_state = %{state | target_paths: target_paths_tail, queue: new_queue}
dispatch_messages(new_state, [])
{:error, _reason} ->
new_state = %{state | target_paths: target_paths_tail}
{:noreply, [], new_state}
end
end
defp get(client, target_path) do
case Tesla.get(client, target_path) do
{:ok, %{status: status} = response} when status < 400 ->
{:ok, response}
{:ok, %{status: status, body: body}} ->
_ =
Logger.warn("HttpSource received error status",
status: status,
body: body
)
{:error, :http_error_response}
{:error, reason} ->
_ = Logger.warn("HttpSource cannot make GET request", reason: reason)
{:error, :request_failed}
end
end
defp build_message(base_url, target_path, %Tesla.Env{body: body, headers: headers}) do
{subtype, headers_metadata} = extract_headers(headers)
metadata = Map.put(headers_metadata, @meta_namespace <> "base_url", base_url)
timestamp =
DateTime.utc_now()
|> DateTime.to_unix(:microsecond)
%Message{
key: target_path,
data: body,
type: :binary,
subtype: subtype,
metadata: metadata,
timestamp: timestamp
}
end
defp extract_headers(headers) do
Enum.reduce(headers, {"application/octet-stream", %{}}, fn
{"content-type", subtype}, {_default_subtype, meta} ->
{subtype, meta}
{header, value}, {subtype, meta} ->
prefixed_key = @meta_namespace <> header
{subtype, Map.put(meta, prefixed_key, value)}
end)
end
defp dispatch_messages(%State{pending_demand: 0} = state, messages) do
{:noreply, Enum.reverse(messages), state}
end
defp dispatch_messages(%State{pending_demand: demand, queue: queue} = state, messages) do
case :queue.out(queue) do
{{:value, message}, updated_queue} ->
updated_state = %{state | pending_demand: demand - 1, queue: updated_queue}
updated_messages = [message | messages]
dispatch_messages(updated_state, updated_messages)
{:empty, _queue} ->
{:noreply, Enum.reverse(messages), state}
end
end
defp validate_target_paths([]) do
{:error, :empty_target_paths}
end
defp validate_target_paths(target_paths) when is_list(target_paths) do
valid? = Enum.all?(target_paths, &String.starts_with?(&1, "/"))
if valid? do
:ok
else
{:error, :invalid_target_paths}
end
end
defp validate_headers([]) do
:ok
end
defp validate_headers([{key, value} | tail]) when is_binary(key) and is_binary(value) do
validate_headers(tail)
end
defp validate_headers(_) do
{:error, :invalid_headers}
end
defp build_client(base_url, opts) do
headers = Keyword.get(opts, :headers, [])
middleware = [
Tesla.Middleware.FollowRedirects,
Tesla.Middleware.DecompressResponse,
{Tesla.Middleware.BaseUrl, base_url},
{Tesla.Middleware.Headers, headers}
]
if Keyword.get(opts, :ignore_ssl_errors) do
# Build adapter with insecure SSL to ignore SSL errors
adapter_opts = [insecure: true]
adapter = {Tesla.Adapter.Hackney, adapter_opts}
Tesla.client(middleware, adapter)
else
# Use default adapter
Tesla.client(middleware)
end
end
end
|
lib/astarte_flow/blocks/http_source.ex
| 0.908193 | 0.471284 |
http_source.ex
|
starcoder
|
defmodule Modbux.Rtu.Framer do
@moduledoc """
A framer for Modbus RTU frames. This framer doesn't do anything for the transmit
direction, but for receives, it will collect bytes that follows the Modbus RTU protocol,
it also execute the CRC validation and returns the frame or the error.
"""
@behaviour Circuits.UART.Framing
require Logger
alias Modbux.Helper
defmodule State do
@moduledoc false
defstruct behavior: nil,
max_len: nil,
expected_length: nil,
index: 0,
processed: <<>>,
in_process: <<>>,
fc: nil,
error: nil,
error_message: nil,
lines: []
end
def init(args) do
# modbus standard max len
max_len = Keyword.get(args, :max_len, 255)
behavior = Keyword.get(args, :behavior, :slave)
state = %State{max_len: max_len, behavior: behavior}
{:ok, state}
end
# do nothing, we assume this is already in the right form
def add_framing(data, state) do
{:ok, data, state}
end
def remove_framing(data, state) do
# Logger.debug("new data #{inspect(state)}")
new_state = process_data(state.in_process <> data, state.expected_length, state.processed, state)
rc = if buffer_empty?(new_state), do: :ok, else: :in_frame
# Logger.debug("new data processed #{inspect(new_state)}, #{inspect(rc)}")
if has_error?(new_state),
do: dispatch({:ok, new_state.error_message, new_state}),
else: dispatch({rc, new_state.lines, new_state})
end
def frame_timeout(state) do
partial_line = {:partial, state.processed <> state.in_process}
new_state = %State{max_len: state.max_len, behavior: state.behavior}
{:ok, [partial_line], new_state}
end
def flush(direction, state) when direction == :receive or direction == :both do
%State{max_len: state.max_len, behavior: state.behavior}
end
def flush(_direction, state) do
state
end
# helper functions
# handle empty byte
defp process_data(<<>>, _len, _in_process, state) do
# Logger.debug("End #{inspect(state)}")
state
end
# get first byte (index 0)
defp process_data(<<slave_id::size(8), b_tail::binary>>, nil, processed, %{index: 0} = state) do
# Logger.debug("line 0")
new_state = %{state | index: 1, processed: processed <> <<slave_id>>, in_process: <<>>}
process_data(b_tail, nil, new_state.processed, new_state)
end
# get second byte (function code) (index 1)
defp process_data(<<fc::size(8), b_tail::binary>>, nil, processed, %{index: 1} = state) do
# Logger.debug("line 1")
new_state = %{state | fc: fc, index: 2, processed: processed <> <<fc>>}
process_data(b_tail, nil, new_state.processed, new_state)
end
# Clause for functions code => 5, 6
defp process_data(<<len::size(8), b_tail::binary>>, nil, processed, %{index: 2, fc: fc} = state)
when fc in [5, 6] do
# Logger.debug("fc 5 or 6")
new_state = %{state | expected_length: 8, index: 3, processed: processed <> <<len>>}
process_data(b_tail, new_state.expected_length, new_state.processed, new_state)
end
# Clause for functions code => 15 and 16
defp process_data(<<len::size(8), b_tail::binary>>, nil, processed, %{index: 2, fc: fc} = state)
when fc in [15, 16] do
# Logger.debug("fc 15 or 16")
new_state =
if state.behavior == :slave do
%{state | expected_length: 7, index: 3, processed: processed <> <<len>>}
else
%{state | expected_length: 8, index: 3, processed: processed <> <<len>>}
end
process_data(b_tail, new_state.expected_length, new_state.processed, new_state)
end
defp process_data(<<len::size(8), b_tail::binary>>, _len, processed, %{index: 6, fc: fc} = state)
when fc in [15, 16] do
# Logger.debug("fc 15 or 16, len")
new_state =
if state.behavior == :slave do
%{state | expected_length: len + 9, index: 7, processed: processed <> <<len>>}
else
%{state | expected_length: 8, index: 7, processed: processed <> <<len>>}
end
process_data(b_tail, new_state.expected_length, new_state.processed, new_state)
end
# Clause for functions code => 1, 2, 3 and 4
defp process_data(<<len::size(8), b_tail::binary>>, nil, processed, %{index: 2, fc: fc} = state)
when fc in 1..4 do
# Logger.debug("fc 1, 2, 3 or 4")
new_state =
if state.behavior == :slave do
%{state | expected_length: 8, index: 3, processed: processed <> <<len>>}
else
%{state | expected_length: len + 5, index: 3, processed: processed <> <<len>>}
end
process_data(b_tail, new_state.expected_length, new_state.processed, new_state)
end
# Clause for exceptions.
defp process_data(<<len::size(8), b_tail::binary>>, nil, processed, %{index: 2, fc: fc} = state)
when fc in 129..144 do
# Logger.debug("exceptions")
new_state = %{state | expected_length: 5, index: 3, processed: processed <> <<len>>}
process_data(b_tail, new_state.expected_length, new_state.processed, new_state)
end
# Catch all fc (error)
defp process_data(_data, nil, processed, %{index: 2, fc: _fc} = state) do
%{state | error: true, error_message: [{:error, :einval, processed}]}
end
defp process_data(<<data::size(8), b_tail::binary>>, len, processed, state) when is_binary(processed) do
current_data = processed <> <<data>>
# Logger.info(
# "(#{__MODULE__}) data_len: #{byte_size(current_data)}, len: #{inspect(len)}, state: #{inspect(state)}"
# )
if len == byte_size(current_data) do
new_state = %{
state
| expected_length: nil,
lines: [current_data],
in_process: b_tail,
index: 0,
processed: <<>>
}
# we got the whole thing in 1 pass, so we're done
check_crc(new_state)
else
new_state = %{state | index: state.index + 1, processed: current_data}
# need to keep reading
process_data(b_tail, len, current_data, new_state)
end
end
def buffer_empty?(state) do
state.processed == <<>> and state.in_process == <<>>
end
defp has_error?(state) do
state.error != nil
end
defp dispatch({:in_frame, _lines, _state} = msg), do: msg
defp dispatch({rc, msg, state}) do
{rc, msg, %State{max_len: state.max_len, behavior: state.behavior}}
end
# once we have the full packet, verify it's CRC16
defp check_crc(state) do
[packet] = state.lines
packet_without_crc = Kernel.binary_part(packet, 0, byte_size(packet) - 2)
expected_crc = Kernel.binary_part(packet, byte_size(packet), -2)
<<hi_crc, lo_crc>> = Helper.crc(packet_without_crc)
real_crc = <<lo_crc, hi_crc>>
# Logger.info("(#{__MODULE__}) #{inspect(expected_crc)} == #{inspect(real_crc)}")
if real_crc == expected_crc,
do: state,
else: %{state | error: true, error_message: [{:error, :ecrc, "CRC Error"}]}
end
end
|
lib/rtu/framer.ex
| 0.562898 | 0.482673 |
framer.ex
|
starcoder
|
defmodule Faker.Vehicle.En do
import Faker, only: [sampler: 2]
alias Faker.Util
@moduledoc """
Functions for generating Vehicle related data in English
"""
@makes [
"BMW",
"Audi",
"Toyota",
"Chevy",
"Ford",
"Dodge",
"Lincoln",
"Buick",
"Honda",
"Nissan"
]
@models %{
"BMW" => ["328i", "M3", "M5", "X1", "X3", "X5"],
"Audi" => ["A4", "A5", "S5", "A7", "A8"],
"Toyota" => ["Prius", "Camry", "Corolla"],
"Chevy" => ["Camero", "Silverado", "Malibu"],
"Ford" => ["Mustang", "F150", "Focus", "Fiesta"],
"Dodge" => ["Ram", "Challenger", "Charger", "Durango"],
"Lincoln" => ["Navigator", "MKZ", "MKX", "MKS"],
"Buick" => ["Enclave", "Regal", "LaCrosse", "Verano", "Encore", "Riveria"],
"Honda" => ["Accord", "Civic", "CR-V", "Odyssey"],
"Nissan" => ["Rogue", "Juke", "Cube", "Pathfiner", "Versa", "Altima"]
}
@options [
"A/C: Front",
"Airbag: Driver",
"AM/FM Stereo",
"A/C: Rear",
"Airbag: Passenger",
"Cassette Player",
"8-Track Player",
"Cruise Control",
"Airbag: Side",
"CD (Single Disc)",
"Navigation",
"Alarm",
"CD (Multi Disc)",
"Power Locks",
"Antilock Brakes",
"MP3 (Single Disc)",
"Power Steering",
"Fog Lights",
"MP3 (Multi Disc)",
"Keyless Entry",
"Premium Sound",
"Integrated Phone",
"DVD System",
"Bucket Seats",
"Power Windows",
"Alloy Wheels",
"Leather Interior",
"Rear Window Defroster",
"Moonroof/Sunroof",
"Memory Seats",
"Rear Window Wiper",
"Third Row Seats",
"Power Seats",
"Tinted Glass",
"Tow Package"
]
@standard_specs [
"1.8L DOHC 16-valve I4 engine -inc: engine cover",
"Engine mounts -inc: (2) solid, (1) liquid-filled",
"Front wheel drive",
"Battery saver",
"Independent strut front suspension w/stabilizer bar",
"Torsion beam rear suspension w/stabilizer bar",
"Electric speed-sensitive variable-assist pwr steering",
"Pwr front vented disc/rear drum brakes",
"Compact spare tire",
"Body color front/rear bumpers",
"Multi-reflector halogen headlamps",
"Body color folding remote-controlled pwr mirrors",
"Variable intermittent windshield wipers w/mist function",
"Intermittent rear wiper w/washer",
"Body color door handles",
"Roof mounted antenna",
"Reclining front bucket seats -inc: active head restraints, double-thickness foam in front seats",
"60/40 split fold-down rear seat w/outboard adjustable headrests",
"Dual front & rear cup holders",
"Tilt steering column",
"Silver accent IP trim finisher -inc: silver shifter finisher",
"Tachometer",
"Fasten seat belt warning light/chime",
"Pwr windows",
"Remote fuel lid release",
"Immobilizer system",
"Pwr rear liftgate release",
"Air conditioning w/in-cabin microfilter",
"Rear window defroster w/timer",
"12V pwr outlet",
"Silver finish interior door handles",
"Driver & front passenger map pockets",
"Rear passenger map pockets",
"Front & rear passenger folding assist grips",
"Carpeted floor & cargo area",
"Cargo area lamp",
"Anti-lock brake system (ABS) -inc: electronic brake force distribution (EBD), brake assist",
"Energy absorbing front/rear bumpers",
"Steel side-door impact beams",
"Zone body construction -inc: front/rear crumple zones, hood deformation point",
"Dual-stage front airbags w/occupant classification system",
"Front side-impact airbags",
"Front & rear side curtain airbags",
"3-point ELR driver seat belt w/pretensioner & load limiter",
"3-point ELR/ALR front passenger seat belt w/pretensioner & load limiter",
"3-point ELR/ALR rear seat belts at all positions",
"Child safety rear door locks",
"Rear child seat tether anchors (LATCH)",
"Tire pressure monitoring system (TPMS)",
"Energy absorbing steering column",
"4.6L DOHC 32-valve V8 engine -inc: DI & SFI dual fuel injection, dual variable valve timing w/intelligence & electronically controlled intake (VVT-iE), aluminum block & heads",
"Vibration-dampening liquid-filled engine mounts",
"Electronic throttle control system w/intelligence (ETCS-i)",
"Acoustic control induction system (ACIS)",
"8-speed automatic transmission -inc: intelligence (ECT-i), gated shifter, sequential sport-shift mode",
"Full-time all-wheel drive",
"Front/rear aluminum multi-link double joint suspension w/coil springs",
"Front/rear stabilizer bars",
"Electric pwr rack & pinion steering (EPS)",
"4-wheel ventilated pwr disc brakes -inc: brake override system",
"Dual chrome exhaust tips",
"Tool kit",
"P235/50R18 all-season tires",
"Full-size spare tire w/aluminum alloy wheel",
"Scratch-resistant paint clearcoating",
"Pwr tilt/slide moonroof -inc: 1-touch open/close",
"1-piece chrome window surround",
"Xenon high-intensity discharge (HID) headlamps -inc: adaptive front lighting system, delayed auto-off",
"Integrated fog lamps",
"LED lights -inc: brake lamps, tail lamps, license plate",
"Electrochromic pwr folding heated mirrors w/memory -inc: puddle lamps, integrated turn signals, auto reverse tilt-down",
"Acoustic glass windshield",
"Water-repellent windshield & front door glass",
"Laminated side window glass",
"Rain-sensing wipers",
"XM satellite radio receiver -inc: 90 day trial subscription",
"Rear bench seat -inc: (3) adjustable headrests",
"Center console",
"Optitron electroluminescent instrumentation",
"Multi-info display -inc: driving range, average MPG, current MPG, average speed, outside temp, elapsed time, maintenance & diagnostic messages",
"Eco drive indicator",
"Pwr windows -inc: 1-touch open/close",
"HomeLink universal transceiver",
"Dual-zone automatic climate control system -inc: smog sensor, auto recirculation, clear air filter, pollen filter",
"Rear-window defogger w/auto-off timer",
"(2) aux 12V pwr outlets -inc: (1) in center console, (1) w/cigarette lighter",
"Grain-matched wood trim -inc: center console, dash, door panels",
"Electrochromic rearview mirror",
"Foldable front door storage pockets",
"Dual front illuminated visor vanity mirrors",
"Front/rear spot-lamp illumination",
"4-wheel/4-channel anti-lock brake system (ABS)",
"Electronic control braking (ECB)",
"Electronic brakeforce distribution (EBD) w/brake assist (BA) -inc: Smart stop technology",
"Electronic parking brake",
"Vehicle dynamics integrated management (VDIM) system -inc: vehicle stability control (VSC), traction control (TRAC)",
"Front/rear crumple zones",
"Daytime running lights (DRL)",
"Side-impact door beams",
"Dual front 2-stage airbags -inc: passenger occupant classification system w/twin-chamber airbag",
"Front/rear side curtain airbags",
"Dual front knee airbags",
"Back-up camera",
"All-position 3-point seat belts -inc: outboard pretensioners & force limiters, dual front pwr shoulder height adjusters, rear outboard emergency auto locking retractors, driver emergency locking retractor",
"Child restraint seat anchors for outboard positions",
"Rear door child safety locks",
"Direct-type tire pressure monitor system",
"Impact-dissipating upper interior trim",
"Collapsible steering column",
"Emergency interior trunk release",
"First aid kit",
"6.1L SRT V8 \"Hemi\" engine",
"3.73 axle ratio",
"Quadra-Trac active on demand 4WD system",
"200mm front axle",
"Dana 44/226mm rear axle",
"625-amp maintenance-free battery",
"160-amp alternator",
"Tip start system",
"Pwr accessory delay",
"Trailer tow wiring harness",
"High performance suspension",
"Pwr steering cooler",
"Pwr rack & pinion performance tuned steering",
"Anti-lock 4-wheel performance disc brakes",
"Brake assist",
"Dual bright exhaust tips",
"Run flat tires",
"20\" x 9.0\" front & 20\" x 10.0\" rear aluminum wheels",
"Monotone paint",
"Black roof molding",
"Rear body-color spoiler",
"Body color grille",
"Chrome bodyside molding",
"Black windshield molding",
"Body color fascias w/bright insert",
"Body color sill extension",
"Fog lamps",
"Front door tinted glass",
"\"Flipper\" liftgate glass",
"Rear window wiper/washer",
"Body color front license plate brow",
"Body color door handles",
"6.5\" touch screen display",
"Fixed long mast antenna",
"Pwr 8-way driver seat w/4-way front passenger seat",
"60/40 folding rear seat",
"Full-length floor console",
"Luxury front & rear floor mats w/logo",
"Floor carpeting",
"Tilt/telescoping steering column",
"Leather-wrapped steering wheel w/audio controls",
"Instrument cluster w/tachometer",
"Vehicle info center",
"Traveler/mini trip computer",
"Pwr front windows w/(1) touch up/down feature",
"Speed control",
"Sentry Key theft deterrent system",
"Security alarm",
"Bright pedals",
"Rear window defroster",
"Locking glove box",
"Highline door trim panel",
"Cloth covered headliner",
"Overhead console",
"Dual illuminated visor vanity mirrors",
"Universal garage door opener",
"Passenger assist handles",
"Deluxe insulation group",
"Cargo compartment lamp",
"Glove box lamp",
"Rear reading & courtesy lamps",
"Illuminated entry",
"Leather-wrapped shift knob",
"Leather-wrapped parking brake handle",
"Carpeted cargo area",
"Trim-panel-mounted storage net",
"Cargo-area tie down loops",
"Cargo compartment cover",
"Reversible/waterproof cargo storage",
"Driver & front passenger advanced multistage airbags w/occupant sensors",
"Supplemental side curtain air bags",
"Enhanced accident response system unlocks the doors, shuts off the fuel pump and turns on interior lights after airbag deploys",
"3-point rear center seat belts",
"Child seat upper tether anchorages",
"LATCH-ready child seat anchor system",
"Child safety rear door locks",
"Dual note horn",
"Tire pressure monitoring display"
]
defp all_models do
Enum.reduce(Map.values(@models), [], fn models, acc -> acc ++ models end)
end
@doc """
Returns a vehicle body style string
## Examples
iex> Faker.Vehicle.En.body_style()
"Minivan"
iex> Faker.Vehicle.En.body_style()
"Hatchback"
iex> Faker.Vehicle.En.body_style()
"Crew Cab Pickup"
iex> Faker.Vehicle.En.body_style()
"Regular Cab Pickup"
"""
@spec body_style() :: String.t()
sampler(:body_style, [
"Cargo Van",
"Convertible",
"Coupe",
"Crew Cab Pickup",
"Extended Cab Pickup",
"Hatchback",
"Minivan",
"Passenger Van",
"Regular Cab Pickup",
"Sedan",
"SUV",
"Wagon"
])
@doc """
Returns a vehicle drivetrain string
## Examples
iex> Faker.Vehicle.En.drivetrain()
"4x2/2-wheel drive"
iex> Faker.Vehicle.En.drivetrain()
"4x4/4-wheel drive"
iex> Faker.Vehicle.En.drivetrain()
"4x2/2-wheel drive"
iex> Faker.Vehicle.En.drivetrain()
"RWD"
"""
@spec drivetrain() :: String.t()
sampler(:drivetrain, ["4x2/2-wheel drive", "4x4/4-wheel drive", "AWD", "FWD", "RWD"])
@doc """
Returns a vehicle fuel type string
## Examples
iex> Faker.Vehicle.En.fuel_type()
"Ethanol"
iex> Faker.Vehicle.En.fuel_type()
"E-85/Gasoline"
iex> Faker.Vehicle.En.fuel_type()
"Compressed Natural Gas"
iex> Faker.Vehicle.En.fuel_type()
"Gasoline Hybrid"
"""
@spec fuel_type() :: String.t()
sampler(:fuel_type, [
"Compressed Natural Gas",
"Diesel",
"E-85/Gasoline",
"Electric",
"Gasoline",
"Gasoline Hybrid",
"Ethanol"
])
@doc """
Returns a vehicle make string
## Examples
iex> Faker.Vehicle.En.make()
"BMW"
iex> Faker.Vehicle.En.make()
"Audi"
iex> Faker.Vehicle.En.make()
"Dodge"
iex> Faker.Vehicle.En.make()
"Ford"
"""
@spec make() :: String.t()
def make do
Util.pick(@makes)
end
@doc """
Returns a vehicle make and model string
## Examples
iex> Faker.Vehicle.En.make_and_model()
"BMW X5"
iex> Faker.Vehicle.En.make_and_model()
"Dodge Ram"
iex> Faker.Vehicle.En.make_and_model()
"Toyota Prius"
iex> Faker.Vehicle.En.make_and_model()
"Ford Focus"
"""
@spec make_and_model() :: String.t()
def make_and_model do
m = make()
"#{m} #{model(m)}"
end
@doc """
Returns a vehicle model string
## Examples
iex> Faker.Vehicle.En.model()
"CR-V"
iex> Faker.Vehicle.En.model()
"Enclave"
iex> Faker.Vehicle.En.model()
"Encore"
iex> Faker.Vehicle.En.model()
"Verano"
"""
@spec model() :: String.t()
def model do
Util.pick(all_models())
end
@doc """
Returns a realistic vehicle model string
for the given model.
## Examples
iex> Faker.Vehicle.En.model("Ford")
"Focus"
iex> Faker.Vehicle.En.model("BMW")
"X5"
iex> Faker.Vehicle.En.model("Audi")
"A4"
iex> Faker.Vehicle.En.model("Toyota")
"Corolla"
"""
@spec model(String.t()) :: String.t()
def model(make) do
if Enum.member?(@makes, make) do
Util.pick(@models[make])
else
model()
end
end
@doc """
Returns a vehicle option string
## Examples
iex> Faker.Vehicle.En.option()
"Premium Sound"
iex> Faker.Vehicle.En.option()
"Power Steering"
iex> Faker.Vehicle.En.option()
"A/C: Front"
iex> Faker.Vehicle.En.option()
"Keyless Entry"
"""
@spec option() :: String.t()
def option do
Util.pick(@options)
end
@doc """
Returns a list of vehicle options()
## Examples
iex> Faker.Vehicle.En.options
["Power Steering", "A/C: Front", "Keyless Entry", "AM/FM Stereo", "Power Steering", "Antilock Brakes", "8-Track Player", "Leather Interior"]
iex> Faker.Vehicle.En.options
["MP3 (Multi Disc)", "A/C: Rear", "Fog Lights", "Power Windows", "Cruise Control", "Premium Sound", "A/C: Front"]
iex> Faker.Vehicle.En.options
["Tinted Glass", "MP3 (Single Disc)", "CD (Multi Disc)"]
iex> Faker.Vehicle.En.options
["Fog Lights", "Rear Window Wiper", "MP3 (Multi Disc)", "Navigation", "Airbag: Side", "Rear Window Defroster", "Premium Sound"]
"""
@spec options() :: list(String.t())
def options do
Util.list(Faker.random_between(2, 8), fn _ -> Util.pick(@options) end)
end
@doc """
Returns a list of vehicle options of the given length
## Examples
iex> Faker.Vehicle.En.options(3)
["Premium Sound", "Power Steering", "A/C: Front"]
iex> Faker.Vehicle.En.options(3)
["Keyless Entry", "AM/FM Stereo", "Power Steering"]
iex> Faker.Vehicle.En.options(3)
["Antilock Brakes", "8-Track Player", "Leather Interior"]
iex> Faker.Vehicle.En.options(3)
["Cassette Player", "MP3 (Multi Disc)", "A/C: Rear"]
"""
@spec options(non_neg_integer()) :: list(String.t())
def options(number) do
Util.list(number, fn _ -> Util.pick(@options) end)
end
@doc """
Reterns a vehicle standard option string
## Examples
iex> Faker.Vehicle.En.standard_spec()
"Tire pressure monitoring system (TPMS)"
iex> Faker.Vehicle.En.standard_spec()
"20\\" x 9.0\\" front & 20\\" x 10.0\\" rear aluminum wheels"
iex> Faker.Vehicle.En.standard_spec()
"Deluxe insulation group"
iex> Faker.Vehicle.En.standard_spec()
"Torsion beam rear suspension w/stabilizer bar"
"""
@spec standard_spec() :: String.t()
def standard_spec do
Util.pick(@standard_specs)
end
@doc """
Returns a list of vehicle standard specs
## Examples
iex> Faker.Vehicle.En.standard_specs()
["20\\" x 9.0\\" front & 20\\" x 10.0\\" rear aluminum wheels", "Deluxe insulation group", "Torsion beam rear suspension w/stabilizer bar", "High performance suspension", "200mm front axle", "Traveler/mini trip computer", "P235/50R18 all-season tires", "Front door tinted glass"]
iex> Faker.Vehicle.En.standard_specs()
["625-amp maintenance-free battery", "Body color sill extension", "Cargo compartment cover", "Dana 44/226mm rear axle", "Tachometer", "Leather-wrapped parking brake handle", "Side-impact door beams"]
iex> Faker.Vehicle.En.standard_specs()
["Tilt steering column", "Luxury front & rear floor mats w/logo", "HomeLink universal transceiver"]
iex> Faker.Vehicle.En.standard_specs()
["Multi-reflector halogen headlamps", "Multi-info display -inc: driving range, average MPG, current MPG, average speed, outside temp, elapsed time, maintenance & diagnostic messages", "Zone body construction -inc: front/rear crumple zones, hood deformation point", "60/40 split fold-down rear seat w/outboard adjustable headrests", "Trim-panel-mounted storage net", "Front side-impact airbags", "Front/rear spot-lamp illumination"]
"""
@spec standard_specs() :: list(String.t())
def standard_specs do
Util.list(Faker.random_between(2, 8), fn _ -> Util.pick(@standard_specs) end)
end
@doc """
Returns a list of vehicle standard specs of the given length
## Examples
iex> Faker.Vehicle.En.standard_specs(3)
["Tire pressure monitoring system (TPMS)", "20\\" x 9.0\\" front & 20\\" x 10.0\\" rear aluminum wheels", "Deluxe insulation group"]
iex> Faker.Vehicle.En.standard_specs(3)
["Torsion beam rear suspension w/stabilizer bar", "High performance suspension", "200mm front axle"]
iex> Faker.Vehicle.En.standard_specs(3)
["Traveler/mini trip computer", "P235/50R18 all-season tires", "Front door tinted glass"]
iex> Faker.Vehicle.En.standard_specs(3)
["XM satellite radio receiver -inc: 90 day trial subscription", "625-amp maintenance-free battery", "Body color sill extension"]
"""
@spec standard_specs(non_neg_integer()) :: list(String.t())
def standard_specs(number) do
Util.list(number, fn _ -> Util.pick(@standard_specs) end)
end
@doc """
Returns a vehicle transmission string
## Examples
iex> Faker.Vehicle.En.transmission()
"CVT"
iex> Faker.Vehicle.En.transmission()
"Automatic"
iex> Faker.Vehicle.En.transmission()
"Manual"
iex> Faker.Vehicle.En.transmission()
"Automanual"
"""
@spec transmission() :: String.t()
sampler(:transmission, ["Automanual", "Automatic", "CVT", "Manual"])
end
|
lib/faker/vehicle/en.ex
| 0.637708 | 0.428951 |
en.ex
|
starcoder
|
defmodule Scenic.Cache.Hash do
@moduledoc """
Helper functions to work with hash signatures.
Both the [`Cache.File`](Scenic.Cache.File.html) and [`Cache.Term`](Scenic.Cache.Term.html)
modules use cryptographic hash signatures to verify that files are valid before using
the data they contain.
This modules provides a collection of helper functions that make it easy to use, generate
and validate these hashes.
Any time one of these functions asks for a type of hash, the supported types are:
`:sha`, `:sha224`, `:sha256`, `:sha384`, `:sha512`, and `:ripemd160`
"""
@hash_types [:sha, :sha224, :sha256, :sha384, :sha512, :ripemd160]
@type hash_type ::
:sha
| :sha224
| :sha256
| :sha384
| :sha512
| :ripemd160
# @type type_error :: {:error, :invalid_hash_type}
# @type hash_error :: {:error, :hash_failure}
# ===========================================================================
defmodule Error do
@moduledoc false
defexception message: "Hash check failed"
end
# --------------------------------------------------------
@doc false
@deprecated "Cache.valid_hash_types/0 will be removed in 0.10.0"
@spec valid_hash_types() :: [:ripemd160 | :sha | :sha224 | :sha256 | :sha384 | :sha512, ...]
def valid_hash_types(), do: @hash_types
# --------------------------------------------------------
@spec valid_hash_type?(type :: hash_type) :: boolean()
defp valid_hash_type?(type), do: Enum.member?(@hash_types, type)
# --------------------------------------------------------
@spec valid_hash_type!(type :: hash_type) :: hash_type | no_return
defp valid_hash_type!(type) do
case Enum.member?(@hash_types, type) do
true ->
type
false ->
msg = "Invalid hash type: #{type}\r\n" <> "Must be one of: #{inspect(@hash_types)}"
raise Error, message: msg
end
end
# --------------------------------------------------------
@doc """
Calculate the hash of binary data
Returns the hash wrapped in a `{:ok, hash}` tuple.
"""
@spec binary(data :: binary, type :: hash_type) ::
{:ok, bitstring()} | {:error, :invalid_hash_type}
def binary(data, type) do
case valid_hash_type?(type) do
true -> {:ok, type |> :crypto.hash(data) |> Base.url_encode64(padding: false)}
false -> {:error, :invalid_hash_type}
end
end
@doc """
Calculate the hash of binary data
Returns the hash directly.
"""
@spec binary!(data :: binary, type :: hash_type) :: bitstring()
def binary!(data, type) do
valid_hash_type!(type)
|> :crypto.hash(data)
|> Base.url_encode64(padding: false)
end
# --------------------------------------------------------
@spec file(path :: bitstring, type :: hash_type) ::
{:ok, bitstring()} | {:error, :invalid_hash_type}
def file(path, hash_type) do
do_compute_file(
path,
hash_type,
valid_hash_type?(hash_type)
)
end
@spec file!(path :: bitstring, type :: hash_type) :: bitstring()
def file!(path, hash_type) do
# start the hash context
hash_context =
valid_hash_type!(hash_type)
|> :crypto.hash_init()
# stream the file into the hash
File.stream!(path, [], 2048)
|> Enum.reduce(hash_context, &:crypto.hash_update(&2, &1))
|> :crypto.hash_final()
|> Base.url_encode64(padding: false)
end
defp do_compute_file(_, _, false), do: {:error, :invalid_hash_type}
defp do_compute_file(path, hash_type, true) do
# start the hash context
hash_context = :crypto.hash_init(hash_type)
# since there is no File.stream option, only File.stream!, catch the error
try do
# stream the file into the hash
hash =
File.stream!(path, [], 2048)
|> Enum.reduce(hash_context, &:crypto.hash_update(&2, &1))
|> :crypto.hash_final()
|> Base.url_encode64(padding: false)
{:ok, hash}
rescue
err ->
:crypto.hash_final(hash_context)
case err do
%{reason: reason} -> {:error, reason}
_ -> {:error, :hash}
end
end
end
# --------------------------------------------------------
@doc """
Verify that the given data conforms to the given hash.
If the verification passes, returns `{:ok, data}`
If it fails, returns `{:error, :hash_failure}`
"""
@spec verify(data :: binary, hash :: bitstring, type :: hash_type) ::
{:ok, binary} | {:error, :hash_failure}
def verify(data, hash, hash_type) do
case binary(data, hash_type) do
{:ok, ^hash} -> {:ok, data}
_ -> {:error, :hash_failure}
end
end
# --------------------------------------------------------
@doc """
Verify that the given data conforms to the given hash.
If the verification passes, returns the data unchanged.
If it fails, raises an error
"""
@spec verify!(data :: binary, hash :: bitstring, type :: hash_type) :: binary | no_return
def verify!(data, hash, hash_type) do
case binary!(data, hash_type) == hash do
true -> data
false -> raise Error
end
end
# --------------------------------------------------------
@doc """
Verify that the data in a file conforms to the given hash.
If the verification passes, returns `{:ok, hash}`
If it fails, returns `{:error, :hash_failure}`
"""
@spec verify_file(path :: bitstring, hash :: bitstring, type :: hash_type) ::
{:ok, binary} | {:error, :hash_failure}
def verify_file(path, hash, hash_type) do
case file(path, hash_type) do
{:ok, computed_hash} ->
case computed_hash == hash do
true -> {:ok, hash}
false -> {:error, :hash_failure}
end
err ->
err
end
end
# --------------------------------------------------------
@doc """
Verify that the data in a file conforms to the given hash.
If the verification passes, returns the hash unchanged.
If it fails, raises an error
"""
@spec verify_file!(path :: bitstring, hash :: bitstring, type :: hash_type) ::
binary | no_return
def verify_file!(path, hash, hash_type) do
case file!(path, hash_type) == hash do
true -> hash
false -> raise Error
end
end
# # --------------------------------------------------------
# defp from_path(path) do
# path
# |> String.split(".")
# |> List.last()
# end
# # --------------------------------------------------------
# defp path_params(path)
# defp path_params(path) when is_bitstring(path) do
# hash = from_path(path)
# path_params({path, hash, @default_hash})
# end
# defp path_params({path, hash_type}) when is_atom(hash_type) do
# hash = from_path(path)
# path_params({path, hash, hash_type})
# end
# defp path_params({path_or_data, hash}), do: path_params({path_or_data, hash, @default_hash})
# defp path_params({path_or_data, hash, hash_type})
# when is_binary(path_or_data) and is_bitstring(hash) and is_atom(hash_type) do
# {path_or_data, hash, valid_hash_type!(hash_type)}
# end
# defp path_params(path_or_data, hash_or_type), do: path_params({path_or_data, hash_or_type})
# defp path_params(path_or_data, hash, hash_type), do: path_params({path_or_data, hash, hash_type})
end
|
lib/scenic/cache/hash.ex
| 0.919625 | 0.574514 |
hash.ex
|
starcoder
|
defmodule ParseTorrent do
alias ParseTorrent.Error
@moduledoc """
A module for convenient .torrent parsing
"""
defstruct(
info_hash: nil,
name: nil,
private: false,
created_at: nil,
created_by: nil,
comment: nil,
announce: [],
url_list: [],
files: [],
length: nil,
piece_length: nil,
last_piece_length: nil,
pieces: []
)
alias ParseTorrent, as: Torrent
@doc """
Parses a torrent binary and returns a map.
## Example:
data = File.read!("test/torrents/leaves.torrent")
{:ok, torrent_map} = ParseTorrent.parse(data)
"""
@spec parse(binary) :: {:ok, map} | :error
def parse(data) do
try do
{:ok, parse!(data)}
rescue
_e -> :error
end
end
@doc """
Parses a torrent binary and returns a map.
Will raise if binary is invalid.
## Example:
data = File.read!("test/torrents/leaves.torrent")
torrent_map = ParseTorrent.parse!(data)
"""
@spec parse!(binary) :: map | no_return
def parse!(<<"d", _::binary>> = data) do
{torrent, info_hash_sha} = data |> Bencode.decode_with_info_hash!()
torrent
|> torrent_valid?
|> do_parse
|> parse_info_hash(info_hash_sha)
end
defp torrent_valid?(torrent) do
torrent
|> has_key_or_raise!("info")
torrent
|> Map.get("info")
|> has_key_or_raise!("name")
|> has_key_or_raise!("piece length")
|> has_key_or_raise!("pieces")
torrent
end
defp has_key_or_raise!(torrent, key) do
case Map.has_key?(torrent, key) do
true -> torrent
_ -> raise Error, missing_key: key
end
end
defp do_parse(torrent) do
{_torrent, %Torrent{} = parsed} =
{torrent, %Torrent{}}
|> parse(:name, &parse_name/1)
|> parse(:private, &parse_private/1)
|> parse(:created_at, &parse_created_at/1)
|> parse(:created_by, &parse_created_by/1)
|> parse(:comment, &parse_comment/1)
|> parse(:announce, &parse_announce/1)
|> parse(:url_list, &parse_url_list/1)
|> parse(:files, &parse_files/1)
|> parse(:length, &parse_length/1)
|> parse(:piece_length, &parse_piece_length/1)
|> parse(:last_piece_length, &parse_last_piece_length/1)
|> parse(:pieces, &parse_pieces/1)
parsed
end
defp parse({torrent, %Torrent{} = parsed}, key, func) do
parsed_value = func.({torrent, parsed})
{torrent, Map.put(parsed, key, parsed_value)}
end
defp parse_info_hash(%Torrent{} = parsed, info_hash_sha) do
info_hash =
info_hash_sha
|> Base.encode16()
|> String.downcase()
%Torrent{parsed | info_hash: info_hash}
end
defp parse_name({torrent, _}) do
torrent["info"]["name.utf-8"] || torrent["info"]["name"]
end
defp parse_private({torrent, _}) do
!!torrent["info"]["private"]
end
defp parse_created_at({torrent, _}) do
case torrent["creation date"] do
nil ->
nil
_time ->
epoch = {{1970, 1, 1}, {0, 0, 0}} |> :calendar.datetime_to_gregorian_seconds()
(torrent["creation date"] + epoch) |> :calendar.gregorian_seconds_to_datetime()
end
end
defp parse_created_by({torrent, _parsed}) do
torrent["created by"]
end
defp parse_comment({torrent, _parsed}) do
torrent["comment"]
end
defp parse_announce({torrent, _parsed}) do
announce =
cond do
is_list(torrent["announce-list"]) ->
List.flatten(torrent["announce-list"])
torrent["announce"] ->
[torrent["announce"]]
true ->
[]
end
announce |> Enum.uniq()
end
defp parse_url_list({torrent, _parsed}) do
do_parse_url_list(torrent["url-list"])
end
defp do_parse_url_list(nil), do: []
defp do_parse_url_list(""), do: []
defp do_parse_url_list(list), do: Enum.uniq(list)
defp parse_files({torrent, %Torrent{} = parsed}) do
torrent["info"]["files"] ||
[torrent["info"]]
|> do_parse_files(parsed.name)
end
defp do_parse_files(files, name) do
files
|> Enum.with_index()
|> Enum.map(fn {file, i} ->
parts = [name | file["path.utf-8"] || file["path"] || []]
%{
path: Enum.reduce(parts, &(&2 <> "/" <> &1)),
name: List.last(parts),
length: file["length"],
offset: offset(files, i)
}
end)
end
defp offset(_files, 0), do: 0
defp offset(files, i) do
files
|> Enum.slice(0..(i - 1))
|> Enum.map(& &1["length"])
|> Enum.sum()
end
defp parse_length({_torrent, %Torrent{} = parsed}) do
parsed.files
|> Enum.map(& &1[:length])
|> Enum.sum()
end
defp parse_piece_length({torrent, _parsed}) do
torrent["info"]["piece length"]
end
defp parse_last_piece_length({_torrent, %Torrent{} = parsed}) do
last_file =
parsed.files
|> List.last()
piece_length = parsed.piece_length
rem_length = rem(last_file.offset + last_file.length, piece_length)
case rem_length do
0 -> piece_length
_ -> rem_length
end
end
defp parse_pieces({torrent, _parsed}) do
torrent["info"]["pieces"]
|> Base.encode16()
|> String.split("", trim: true)
|> Enum.chunk_every(40, 40, [])
|> Enum.map(&Enum.join/1)
|> Enum.map(&String.downcase/1)
end
end
|
lib/parse_torrent.ex
| 0.700997 | 0.47591 |
parse_torrent.ex
|
starcoder
|
defmodule PairingHeap do
if Application.get_env(:priority_queue, :native) do
@compile :native
@compile {:hipe, [:o3]}
end
@moduledoc """
Pairing Heap implementation
see:
http://en.wikipedia.org/wiki/Pairing_heap
A Pairing Heap is a type of heap structure with relatively simple implementation and
excellent practical amortised performance.
Determining the precise asymptotic running time of pairing heaps has proved difficult,
see the Wikipedia page referenced above for a more complete discussion.
In particular practical performance of decrease-key is excellent (and initially
conjectured to be O(1)), but at present it's known to be "no worse" then O(log n).
However, no tight bound is known.
Operation
find-min: Θ(1)
delete-min: Θ(log n)
insert: Θ(1)
decrease-key: Θ(log n) - however, tight bound not known
merge: Θ(1)
Guts: pairing heaps
A pairing heap is either nil or a term {key, value, [sub_heaps]}
where sub_heaps is a list of heaps.
TODO: Allow the comparison function to be specified
Implement decrease_key
"""
@type key :: any
@type value :: any
@type t :: {key, value, list} | nil
@type element :: {key, value}
@doc """
return the heap with the min item removed
iex> PairingHeap.new(1, ["first"]) |> PairingHeap.delete_min |> PairingHeap.empty?
true
iex> PairingHeap.new(1, ["first"]) |> PairingHeap.delete_min |> PairingHeap.delete_min |> PairingHeap.empty?
true
"""
@spec delete_min(t) :: t
def delete_min(nil), do: nil
def delete_min({_key, _v, sub_heaps}) do
pair(sub_heaps)
end
@doc """
True iff argument is an empty priority queue
iex> PairingHeap.new |> PairingHeap.empty?
true
iex> PairingHeap.new(1, ["first"]) |> PairingHeap.empty?
false
iex> PairingHeap.new(1, ["first"]) |> PairingHeap.delete_min |> PairingHeap.empty?
true
"""
@spec empty?(t) :: boolean
def empty?(nil), do: true
def empty?(_), do: false
@doc """
Merge (meld) two heaps
"""
@spec meld(t, t) :: t
def meld(nil, heap), do: heap
def meld(heap, nil), do: heap
# defp meld(_l = {key_l, value_l, sub_l}, r = {key_r, _value_r, _sub_r}) when key_l < key_r do
# {key_l, value_l, [r | sub_l]}
# end
# defp meld(l, _r = {key_r, value_r, sub_r}) do
# {key_r, value_r, [l | sub_r]}
# end
def meld(l = {key_l, value_l, sub_l}, r = {key_r, value_r, sub_r}) do
cond do
key_l < key_r -> {key_l, value_l, [r | sub_l]}
key_l == key_r -> {key_l, value_l ++ value_r, sub_l}
true -> {key_r, value_r, [l | sub_r]}
end
end
@doc """
Merge (meld) two heaps
"""
@spec merge(t, t) :: t
def merge(h1, h2), do: meld(h1, h2)
@doc """
min item in the heap
"""
@spec min(t, element) :: element
def min(heap, default \\ {nil, nil})
def min(nil, default), do: default
def min({key, value, _}, _default), do: {key, value}
@doc """
Create new empty heap.
Optionally pass in initial key, value
"""
@spec new :: t
@spec new(key, value) :: t
def new(), do: nil
def new(key, value), do: {key, value, []}
@doc """
Pairing Heaps get their name from the special "pair" operation, which is used to
'Pair up' (recursively meld) a list of pairing heaps.
"""
@spec pair([t]) :: t
defp pair([]), do: nil
defp pair([h]), do: h
defp pair([h0, h1 | hs]), do: meld(meld(h0, h1), pair(hs))
@doc """
Returns the min item, as well as the heap without the min item
Equivalent to:
{min(heap), delete_min(heap)}
iex> PairingHeap.new(1, ["first"]) |> PairingHeap.pop |> elem(0)
{1, ["first"]}
"""
@spec pop(t, element) :: {element, t}
def pop(heap, default \\ {nil, nil}) do
{__MODULE__.min(heap, default), delete_min(heap)}
end
@doc """
Add element X to priority queue
iex> PairingHeap.new |> PairingHeap.put(1, ["first"]) |> PairingHeap.pop |> elem(0)
{1, ["first"]}
"""
@spec put(t, key, value) :: t
def put(heap, key, value) do
meld(heap, new(key, value))
end
end
|
lib/pairing_heap.ex
| 0.602296 | 0.533033 |
pairing_heap.ex
|
starcoder
|
defmodule ExAws.S3.DirectUpload do
@moduledoc """
Pre-signed S3 upload helper for client-side multipart POSTs, with support for using AWS Instance Roles,
which produce temporary credentials. This approach reduces the number of ENV variables to pass, among other benefits.
See:
[Browser-Based Upload using HTTP POST (Using AWS Signature Version 4)](http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html)
[Task 3: Calculate the Signature for AWS Signature Version 4](http://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html)
This module does not require any further configuration other than the default already in place, when using `ex_aws` or `ex_aws_s3`.
The default configuration is as follows:
```elixir
config :ex_aws,
access_key_id: [{:system, "AWS_ACCESS_KEY_ID"}, :instance_role],
secret_access_key: [{:system, "AWS_SECRET_ACCESS_KEY"}, :instance_role]
```
The Authentication Resolver will look for credentials in ENV variables, and fall back to Instance Role.
"""
@doc """
The `S3DirectUpload` struct represents the data necessary to
generate an S3 pre-signed upload object.
The required fields are:
- `file_name` the name of the file being uploaded
- `mimetype` the mimetype of the file being uploaded
- `path` the path where the file will be uploaded in the bucket
- `bucket` the name of the bucket to which to upload the file
Fields that can be over-ridden are:
- `acl` defaults to `public-read`
"""
defstruct file_name: nil, mimetype: nil, path: nil, acl: "public-read", bucket: nil
@date_util Application.get_env(:ex_aws, :s3_direct_upload_date_util, ExAws.S3.DirectUpload.DateUtil)
@doc """
Returns a map with `url` and `credentials` keys.
- `url` - the form action URL
- `credentials` - name/value pairs for hidden input fields
## Examples
iex> %ExAws.S3.DirectUpload{file_name: "image.jpg", mimetype: "image/jpeg", path: "path/to/file", bucket: "s3-bucket"}
...> |> ExAws.S3.DirectUpload.presigned
...> |> Map.get(:url)
"https://s3-bucket.s3.us-east-1.amazonaws.com"
iex> %ExAws.S3.DirectUpload{file_name: "image.jpg", mimetype: "image/jpeg", path: "path/to/file", bucket: "s3-bucket"}
...> |> ExAws.S3.DirectUpload.presigned
...> |> Map.get(:credentials) |> Map.get(:"x-amz-credential")
"123abc/20170101/us-east-1/s3/aws4_request"
iex> %ExAws.S3.DirectUpload{file_name: "image.jpg", mimetype: "image/jpeg", path: "path/to/file", bucket: "s3-bucket"}
...> |> ExAws.S3.DirectUpload.presigned
...> |> Map.get(:credentials) |> Map.get(:key)
"path/to/file/image.jpg"
"""
def presigned(%ExAws.S3.DirectUpload{} = upload) do
%{
url: url(upload),
credentials: credentials(upload)
}
end
@doc """
Returns a json object with `url` and `credentials` properties.
- `url` - the form action URL
- `credentials` - name/value pairs for hidden input fields
"""
def presigned_json(%ExAws.S3.DirectUpload{} = upload) do
presigned(upload)
|> Poison.encode!
end
defp credentials(%ExAws.S3.DirectUpload{} = upload) do
credentials = %{
policy: policy(upload),
"x-amz-algorithm": "AWS4-HMAC-SHA256",
"x-amz-credential": credential(),
"x-amz-date": @date_util.today_datetime(),
"x-amz-signature": signature(upload),
acl: upload.acl,
key: file_path(upload)
}
credentials = case security_token() do
nil -> credentials
_ -> credentials |> Map.put(:"x-amz-security-token", security_token())
end
credentials
end
defp signature(%ExAws.S3.DirectUpload{} = upload) do
signing_key()
|> hmac(policy(upload))
|> Base.encode16(case: :lower)
end
defp signing_key do
"AWS4#{secret_key()}"
|> hmac(@date_util.today_date())
|> hmac(region())
|> hmac("s3")
|> hmac("aws4_request")
end
defp policy(%ExAws.S3.DirectUpload{} = upload) do
%{
expiration: @date_util.expiration_datetime,
conditions: conditions(upload)
}
|> Poison.encode!
|> Base.encode64
end
defp conditions(%ExAws.S3.DirectUpload{} = upload) do
conditions = [
%{"bucket" => upload.bucket},
%{"acl" => upload.acl},
%{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
%{"x-amz-credential": credential()},
%{"x-amz-date": @date_util.today_datetime()},
["starts-with", "$Content-Type", upload.mimetype],
["starts-with", "$key", upload.path]
]
conditions = case security_token() do
nil -> conditions
_ -> [%{"x-amz-security-token" => security_token()} | conditions]
end
conditions
end
defp url(%ExAws.S3.DirectUpload{bucket: bucket}) do
"https://#{bucket}.s3.#{region()}.amazonaws.com"
end
defp credential() do
"#{access_key()}/#{@date_util.today_date()}/#{region()}/s3/aws4_request"
end
defp file_path(upload) do
"#{upload.path}/#{upload.file_name}"
end
defp hmac(key, data) do
:crypto.hmac(:sha256, key, data)
end
defp security_token do
ExAws.Config.new(:s3)
|> Map.get(:security_token)
end
defp access_key do
ExAws.Config.new(:s3)
|> Map.get(:access_key_id)
end
defp secret_key do
ExAws.Config.new(:s3)
|> Map.get(:secret_access_key)
end
defp region do
ExAws.Config.new(:s3)
|> Map.get(:region)
end
end
|
lib/ex_aws/s3/direct_upload.ex
| 0.856498 | 0.752331 |
direct_upload.ex
|
starcoder
|
defmodule Extatistics.Base do
@moduledoc """
* Module to implement basic estatistics functions
"""
@type twoNumTL() :: [{number, number}]
@type numEnum() :: Enumerate.number()
@doc """
Apara o array em N posições, retirando:
- N/2 no início e N/2 no final, caso N par
- N/2 + 1 no início e N/2 no final, caso N ímpar
## Parâmetros
- array: um enumerável
- n: a quantidade de dados a serem aparados
## Exemplos
iex > array = [10, 54, 12.71, "Sean", 0, [4.2,10]]
iex > Extatistics.Base.trim(array, 4)
[ 12.71, "Sean" ]
iex> Extatistics.Base.trim(array, 3)
[ 12.71, "Sean", 0 ]
"""
defp trim(array, n) when rem(n, 2) == 0 do
x = div(n, 2)
array
|> Enum.drop(x)
|> Enum.drop(-x)
end
defp trim(array, n) when rem(n, 2) == 1 do
lower = div(n, 2) + 1
upper = div(n, 2)
array
|> Enum.drop(lower)
|> Enum.drop(-upper)
end
@doc """
Calcula a influência do peso para seu valor
## Parâmetros
- value: um valor
- weight: o respectivo peso do valor
## Exemplos
iex> Extatistics.Base.calculate_weight(5,4)
20
"""
defp calculate_weight(value, weight), do: value * weight
@doc """
Calcula a distância(desvio) de um valor e uma referência
## Parâmetros
- value: um valor
- reference: o respectivo peso do valor
## Exemplos
iex> Extatistics.Base.deviation(5,4)
1
"""
defp deviation(value, reference), do: value - reference
@doc """
Calcula a distância(desvio) absoluta de um valor e uma referência
## Parâmetros
- value: um valor
- reference: o respectivo peso do valor
## Exemplos
iex> Extatistics.Base.absolute_deviation(4,8)
4
"""
defp absolute_deviation(value, reference) do
value
|> deviation(reference)
|> abs()
end
@doc """
Calcula a distância(desvio) quadrática de um valor e uma referência
## Parâmetros
- value: um valor
- reference: o respectivo peso do valor
## Exemplos
iex> Extatistics.Base.square_deviation(4,8)
16
"""
defp square_deviation(value, reference) do
value
|> deviation(reference)
|> :math.pow(2)
end
@doc """
Calcula a média aritmética dos dados de um enumerável.
Realiza a divisão flutuante do somatório com a contagem total de todos os dados.
## Parâmetros
- array: um enumerável de números
## Exemplos
iex> Extatistics.Base.mean([2,7,25,1,12.3,0.7])
8.0
"""
@spec mean(numEnum()) :: number()
def mean(array),
do: Enum.sum(array)/Enum.count(array)
@doc """
Calcula a média ponderada dos dados de um enumerável.
Ordena os dados, apara-os e calcula a média com os dados restantes.
## Parâmetros
- array: um enumerável de números
- n: a quantidade de dados a serem aparados
## Exemplos
iex> Extatistics.Base.mean([2,7,25,1,12.3,0.7], 2)
5.575
"""
@spec mean(numEnum(), number()) :: number()
def mean(array, n) do
array
|> Enum.sort()
|> trim(n)
|> mean()
end
@doc """
Calcula a média ponderada dos dados de um lista de tuplas.
Os dados são apresentados em uma lista de tuplas de 2 elementos, representando um par
{valor, peso}. Cada valor é influenciado por seu respectivo peso. O resultado é a divisão
flutuante do somatório dos valores atualizados com os pesos, pelo somatório dos pesos.
## Parâmetros
- array: uma lista de tuplas de números com o par {valor, peso}
## Exemplos
iex> Extatistics.Base.weighted_mean([{1.5, 2}, {2, 0}, {5.84, 1}])
2.9466666666666668
"""
@spec weighted_mean(twoNumTL()) :: number()
def weighted_mean(array) do
sum = array
|> Enum.map(fn {v, w} -> calculate_weight(v, w) end)
|> Enum.sum()
sum_weights = array
|> Enum.map(fn {_v, w} -> w end)
|> Enum.sum()
sum / sum_weights
end
@doc """
Calcula a média ponderada dos dados de um array valor e um array peso.
Os valores e seus respectivos pesos são analisados de acordo com o índice dos arrays
(o valor do índice zero do vetor de valores está relacionado ao peso do indíce zero do
vetor de pesos). Os cálculos realizados são o mesmo de `weighted_mean/1`.
## Parâmetros
- values: um enumerável de valores
- weight: um enumerável de pesos
## Exemplos
iex> Extatistics.Base.weighted_mean([1.5, 2, 5.84],[2,0,1])
2.9466666666666668
"""
@spec weighted_mean(numEnum(), numEnum()) :: number()
def weighted_mean(values, weights) do
values
|> Enum.zip(weights)
|> weighted_mean()
end
@doc """
Calcula a mediana de um enumerável.
A mediana é calculada através da ordenação dos dados e da seleção do
elemento central do enumerável resultante. Quando a contagem de elementos
do enumerável for par, a mediana é o cálculo da média aritmética entre os
2 elementos centrais (não há um único elemento central, por causa da divisão
exata dos elementos em 2 grupos).
## Parâmetros
- array: um enumerável de números
## Exemplos
iex> Extatistics.Base.median([4,2,10,-6,1,1.7]) # Contagem de elementos par
1.85
iex> Extatistics.Base.median([2,10,-6,1,1.7]) # Contagem de elementos ímpar
1.7
"""
@spec median(numEnum()) :: number()
def median(array) do
n = Enum.count(array)
sorted = Enum.sort(array)
case rem(n, 2) do
0 -> (Enum.at(sorted, div(n, 2) - 1) + Enum.at(sorted, div(n, 2))) / 2
_ -> Enum.at(sorted, div(n, 2))
end
end
@doc """
Calcula a mediana ponderada dos dados de um lista de tuplas.
## Parâmetros
- array: uma lista de tuplas de números com o par {valor, peso}
## Exemplos
iex> Extatistics.Base.weighted_median([{4, 1}, {1.5, 5}, {5, 2}, {1.5, 2}, {4.7, 0}])
4
"""
@spec weighted_median(twoNumTL()) :: number()
def weighted_median(array) do
array
|> Enum.map(fn {v, w} -> calculate_weight(v, w) end)
|> Enum.sort()
|> median()
end
@doc """
Calcula a mediana ponderada dos dados de um array valor e um array peso.
## Parâmetros
- values: um enumerável de valores
- weight: um enumerável de pesos
## Exemplos
iex> Extatistics.Base.weighted_median([4,1.5,5,1.5,4.7],[1,5,2,2,0])
4
"""
@spec weighted_median(numEnum(), numEnum()) :: number()
def weighted_median(values, weights) do
values
|> Enum.zip(weights)
|> weighted_median()
end
@doc """
Calcula o desvio absoluto médio de um enumerável.
## Parâmetros
- array: um enumerável de números
## Exemplos
iex> Extatistics.Base.mean_dev([4,2,10,-6,1,1.7])
3.255555555555556
"""
@spec mean_dev(numEnum()) :: number()
def mean_dev(array) do
m = mean(array)
array
|> Enum.map(&(absolute_deviation(&1, m)))
|> mean()
end
@doc """
Calcula a variância de um enumerável
## Parâmetros
- array: um enumerável de números
## Exemplos
iex> Extatistics.Base.variance([4,2,10,-6,1,1.7])
26.601666666666667
"""
@spec variance(numEnum()) :: number()
def variance(array) do
m = mean(array)
sum = array
|> Enum.map(&(square_deviation(&1, m)))
|> Enum.sum()
sum / (Enum.count(array) - 1)
end
@doc """
Calcula o desvio padrão de um enumerável
## Parâmetros
- array: um enumerável de números
## Exemplos
iex> Extatistics.Base.stdev([4,2,10,-6,1,1.7])
5.157680357163157
"""
@spec stdev(numEnum()) :: number()
def stdev(array) do
array
|> variance()
|> :math.sqrt()
end
@doc """
Calcula o Coeficiente de Correlação de Pearson.
## Parâmetros
- array_a: enumerável de números A
- array_b: enumerável de números B
## Exemplos
iex> Extatistics.Base.corr([4,1.5,5,1.5,4.7],[1,5,2,2,0])
-0.6920927019297618
"""
@spec pearson(numEnum(), numEnum()) :: number()
def pearson(array_a, array_b) do
#Correlação de Pearson
mean_a = mean(array_a)
mean_b = mean(array_b)
diff_a = Enum.map(array_a, &(deviation(&1, mean_a)))
diff_b = Enum.map(array_b, &(deviation(&1, mean_b)))
sum = diff_a
|> Enum.zip(diff_b)
|> Enum.map(fn {a, b} -> calculate_weight(a, b) end)
|> Enum.sum()
sqrt_a = array_a
|> Enum.map(&(square_deviation(&1, mean_a)))
|> Enum.sum()
|> :math.sqrt()
sqrt_b = array_b
|> Enum.map(&(square_deviation(&1, mean_b)))
|> Enum.sum()
|> :math.sqrt()
sum / (sqrt_a * sqrt_b)
end
@doc """
Calcula o erro padrão de um enumerável
## Parâmetros
- array: um enumerável de números
## Exemplos
iex> Extatistics.Base.std_error([4,2,10,-6,1,1.7])
2.105614188570905
"""
@spec std_error(numEnum()) :: number()
def std_error(array) do
s = stdev(array)
n = array
|> Enum.count()
|> :math.sqrt()
s / n
end
end
|
lib/extatistics/base.ex
| 0.83104 | 0.711431 |
base.ex
|
starcoder
|
defmodule Plymio.Fontais.Codi.State do
@moduledoc false
# note: all the keys are prefixed with 'state_'
@vekil %{
state_def_new_doc:
quote do
@doc ~S"""
`new/1` creates a new instance of the module's `struct` and, if the optional
*opts* were given, calls `update/2` with the instance and the *opts*,
returning `{:ok, instance}`, else `{:error, error}`.
"""
end,
state_def_new_since: nil,
state_def_new_spec:
quote do
@spec new(any) :: {:ok, t} | {:error, error}
end,
state_def_new_header:
quote do
def new(opts \\ [])
end,
state_def_new_clause_arg0_t:
quote do
def new(%__MODULE__{} = value) do
{:ok, value}
end
end,
state_def_new_clause_arg0_l0:
quote do
def new([]) do
{:ok, %__MODULE__{}}
end
end,
state_def_new_clause_arg0_any:
quote do
def new(opts) do
with {:ok, %__MODULE__{} = state} <- new() do
state |> update(opts)
else
{:error, %{__exception__: true}} = result -> result
end
end
end,
state_def_new: [
:state_def_new_doc,
:state_def_new_since,
:state_def_new_spec,
:state_def_new_header,
:state_def_new_clause_arg0_t,
:state_def_new_clause_arg0_l0,
:state_def_new_clause_arg0_any
],
state_def_new_doc!:
quote do
@doc ~S"""
`new!/1` calls`new/1` and, if the result is `{:ok, instance}` returns the `instance.
"""
end,
state_def_new_since!: nil,
state_def_new_spec!:
quote do
@spec new!(any) :: t | no_return
end,
state_def_new_header!:
quote do
def new!(opts \\ [])
end,
state_def_new_clause_arg0_any!:
quote do
def new!(opts) do
opts
|> new()
|> case do
{:ok, %__MODULE__{} = state} -> state
{:error, error} -> raise error
end
end
end,
state_def_new!: [
:state_def_new_doc!,
:state_def_new_since!,
:state_def_new_spec!,
:state_def_new_header!,
:state_def_new_clause_arg0_any!
],
state_def_update_doc:
quote do
@doc ~S"""
`update/2` takes an `instance` of the module's `struct` and an optional *opts*.
The *opts* are normalised by calling the module's `update_canonical_opts/1`
and then reduced with `update_field/2`:
opts |> Enum.reduce(instance, fn {k,v}, s -> s |> update_field({k,v}) end)
`{:ok, instance}` is returned.
"""
end,
state_def_update_since: nil,
state_def_update_spec:
quote do
@spec update(t, opts) :: {:ok, t} | {:error, error}
end,
state_def_update_header:
quote do
def update(t, opts \\ [])
end,
state_def_update_clause_arg0_t_arg1_l0:
quote do
def update(%__MODULE__{} = state, []) do
{:ok, state}
end
end,
state_def_update_clause_arg0_t_arg1_any:
quote do
def update(%__MODULE__{} = state, opts) do
with {:ok, opts} <- opts |> update_canonical_opts do
opts
|> Enum.reduce_while(state, fn {k, v}, s ->
s
|> update_field({k, v})
|> case do
{:ok, %__MODULE__{} = s} -> {:cont, s}
{:error, %{__struct__: _}} = result -> {:halt, result}
end
end)
|> case do
{:error, %{__exception__: true}} = result -> result
%__MODULE__{} = value -> {:ok, value}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end,
state_def_update: [
:state_def_update_doc,
:state_def_update_since,
:state_def_update_spec,
:state_def_update_header,
:state_def_update_clause_arg0_t_arg1_l0,
:state_def_update_clause_arg0_t_arg1_any
],
state_def_update_doc!:
quote do
@doc ~S"""
`update!/2` calls`update/2` and, if the result is `{:ok, instance}`
returns the `instance.
"""
end,
state_def_update_since!: nil,
state_def_update_spec!:
quote do
@spec update!(t, any) :: t | no_return
end,
state_def_update_header!:
quote do
def update!(t, opts \\ [])
end,
state_def_update_clause_arg0_t_arg1_any!:
quote do
def update!(%__MODULE__{} = state, opts) do
state
|> update(opts)
|> case do
{:ok, %__MODULE__{} = state} -> state
{:error, error} -> raise error
end
end
end,
state_def_update!: [
:state_def_update_doc!,
:state_def_update_since!,
:state_def_update_spec!,
:state_def_update_header!,
:state_def_update_clause_arg0_t_arg1_any!
],
state_defp_update_field_header:
quote do
@spec update_field(t, kv) :: {:ok, t} | {:error, error}
defp update_field(state, kv)
end,
state_defp_update_field_passthru:
quote do
defp update_field(%__MODULE__{} = state, {k, v}) do
{:ok, state |> struct([{k, v}])}
end
end,
state_defp_update_field_unknown:
quote do
defp update_field(%__MODULE__{} = state, {k, v}) do
new_error_result(m: "update field #{inspect(k)} unknown", v: v)
end
end,
state_defp_update_proxy_field_passthru:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
{:ok, state |> struct!([{k, v}])}
end
end,
state_defp_update_proxy_field_unset:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field and Plymio.Fontais.Guard.is_value_unset(v) do
{:ok, state |> struct!([{k, v}])}
end
end,
state_defp_update_proxy_field_normalise:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
v
|> Plymio.Fontais.Guard.is_value_unset()
|> case do
true ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
_ ->
with {:ok, v} <- v |> proxy_field_normalise() do
{:ok, state |> struct!([{k, v}])}
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end,
state_defp_update_proxy_field_opts_validate:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
true ->
with {:ok, opts} <- v |> Plymio.Fontais.Option.validate_opts() do
opts
|> Plymio.Fontais.Guard.is_filled_list()
|> case do
true ->
{:ok, state |> struct!([{k, opts}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end,
state_defp_update_proxy_field_opts_normalise:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
true ->
with {:ok, opts} <- v |> Plymio.Fontais.Option.normalise_opts() do
opts
|> Plymio.Fontais.Guard.is_filled_list()
|> case do
true ->
{:ok, state |> struct!([{k, opts}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end,
state_defp_update_proxy_field_opzioni_validate:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
true ->
with {:ok, opts} <- v |> Plymio.Fontais.Option.opzioni_validate() do
opts
|> Plymio.Fontais.Guard.is_filled_list()
|> case do
true ->
{:ok, state |> struct!([{k, opts}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end,
state_defp_update_proxy_field_opzioni_normalise:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
true ->
with {:ok, opts} <- v |> Plymio.Fontais.Option.opzioni_normalise() do
opts
|> Plymio.Fontais.Guard.is_filled_list()
|> case do
true ->
{:ok, state |> struct!([{k, opts}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end,
state_defp_update_proxy_field_forms_validate:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
true ->
with {:ok, forms} <- v |> Plymio.Fontais.Option.forms_validate() do
forms
|> Plymio.Fontais.Guard.is_filled_list()
|> case do
true ->
{:ok, state |> struct!([{k, forms}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end,
state_defp_update_proxy_field_forms_normalise:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
true ->
with {:ok, forms} <- v |> Plymio.Fontais.Form.forms_normalise() do
forms
|> Plymio.Fontais.Guard.is_filled_list()
|> case do
true ->
{:ok, state |> struct!([{k, forms}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end,
state_defp_update_proxy_field_keyword:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
Keyword.keyword?(v) ->
v
|> Plymio.Fontais.Guard.is_filled_list()
|> case do
true ->
{:ok, state |> struct!([{k, v}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
true ->
new_error_result(m: "update keyword field #{inspect(:proxy_field)} invalid", v: v)
end
end
end,
state_defp_update_proxy_field_list:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
is_list?(v) ->
v
|> Plymio.Fontais.Guard.is_filled_list()
|> case do
true ->
{:ok, state |> struct!([{k, v}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
true ->
new_error_result(m: "update list field #{inspect(:proxy_field)} invalid", v: v)
end
end
end,
state_defp_update_proxy_field_normalise_list:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
true ->
v
|> List.wrap()
|> case do
x when Plymio.Fontais.Guard.is_filled_list(x) ->
{:ok, state |> struct!([{k, x}])}
_ ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
end
end
end
end,
state_defp_update_proxy_field_map:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
is_map(v) ->
{:ok, state |> struct!([{k, v}])}
true ->
new_error_result(m: "update map field #{inspect(k)} invalid", v: v)
end
end
end,
state_defp_update_proxy_field_normalise_map:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
is_map(v) ->
{:ok, state |> struct!([{k, v}])}
Keyword.keyword?(v) ->
{:ok, state |> struct!([{k, v |> Map.new()}])}
true ->
new_error_result(m: "update map field #{inspect(k)} invalid", v: v)
end
end
end,
state_defp_update_proxy_field_atom:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
is_atom(v) ->
{:ok, state |> struct!([{k, v}])}
true ->
new_error_result(m: "update atom field #{inspect(k)} invalid", v: v)
end
end
end,
state_defp_update_proxy_field_binary:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
is_binary(v) ->
{:ok, state |> struct!([{k, v}])}
true ->
new_error_result(m: "update atom field #{inspect(k)} invalid", v: v)
end
end
end,
state_defp_update_proxy_field_fun:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
is_function(v, 1) ->
{:ok, state |> struct!([{k, v}])}
true ->
new_error_result(m: "update fun field #{inspect(k)} invalid", v: v)
end
end
end,
state_defp_update_proxy_field_fun1:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
is_function(v, 1) ->
{:ok, state |> struct!([{k, v}])}
true ->
new_error_result(m: "update fun/1 field #{inspect(k)} invalid", v: v)
end
end
end,
state_defp_update_proxy_field_fun2:
quote do
defp update_field(%__MODULE__{} = state, {k, v})
when k == :proxy_field do
cond do
Plymio.Fontais.Guard.is_value_unset(v) ->
{:ok, state |> struct!([{k, Plymio.Fontais.Guard.the_unset_value()}])}
is_function(v, 2) ->
{:ok, state |> struct!([{k, v}])}
true ->
new_error_result(m: "update fun/1 field #{inspect(k)} invalid", v: v)
end
end
end,
state_base_package: [
:state_def_new,
:state_def_new!,
:state_def_update,
:state_def_update!
]
}
def __vekil__() do
@vekil
end
end
|
lib/fontais/codi/state/state.ex
| 0.816918 | 0.420481 |
state.ex
|
starcoder
|
defmodule Kaffy.Routes do
@moduledoc """
Kaffy.Routes must be "used" in your phoenix routes:
```elixir
use Kaffy.Routes, scope: "/admin", pipe_through: [:browser, :authenticate]
```
`:scope` defaults to `"/admin"`
`:pipe_through` defaults to kaffy's `[:kaffy_browser]`
"""
# use Phoenix.Router
defmacro __using__(options \\ []) do
scoped = Keyword.get(options, :scope, "/admin")
custom_pipes = Keyword.get(options, :pipe_through, [])
pipes = [:kaffy_browser] ++ custom_pipes
quote do
pipeline :kaffy_browser do
plug(:accepts, ["html", "json"])
plug(:fetch_session)
plug(:fetch_flash)
plug(:protect_from_forgery)
plug(:put_secure_browser_headers)
end
scope unquote(scoped), KaffyWeb do
pipe_through(unquote(pipes))
get("/", HomeController, :index, as: :kaffy_home)
get("/dashboard", HomeController, :dashboard, as: :kaffy_dashboard)
get("/tasks", TaskController, :index, as: :kaffy_task)
get("/p/:slug", PageController, :index, as: :kaffy_page)
get("/:context/:resource", ResourceController, :index, as: :kaffy_resource)
post("/:context/:resource", ResourceController, :create, as: :kaffy_resource)
post("/:context/:resource/:id/action/:action_key", ResourceController, :single_action,
as: :kaffy_resource
)
post("/:context/:resource/action/:action_key", ResourceController, :list_action,
as: :kaffy_resource
)
get("/:context/:resource/new", ResourceController, :new, as: :kaffy_resource)
get("/:context/:resource/:id", ResourceController, :show, as: :kaffy_resource)
put("/:context/:resource/:id", ResourceController, :update, as: :kaffy_resource)
delete("/:context/:resource/:id", ResourceController, :delete, as: :kaffy_resource)
get("/kaffy/api/:context/:resource", ResourceController, :api, as: :kaffy_api_resource)
end
end
end
end
|
lib/kaffy/routes.ex
| 0.707708 | 0.451387 |
routes.ex
|
starcoder
|
defmodule Apoc.Adapter.Hash do
@moduledoc """
Behaviour and functions for hashing messages.
This behaviour can be used with virtually any hashing scheme but Apoc
comes with a set of standard hashes described in
[FIPS PUB 180-4](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf).
See `Apoc.Hash.SHA224`, `Apoc.Hash.SHA256`, `Apoc.Hash.SHA384`, `Apoc.Hash.SHA512` and `Apoc.Hash.SHA`.
Note that `Apoc.Hash.SHA` is included for backwards compatibility with older applications
but should generally *not* be used in new applications.
## Differences to Erlang's Crypto
The Erlang `:crypto` module will raise `ArgumentError` if it is called with
invalid messages so this module provides a wrapper to simply return `:error` instead.
You can use the `hash!` function if you still want to raise exceptions.
This module also makes piping easier (see examples).
## Default Scheme
Apoc has a default hashing scheme: `Apoc.Hash.SHA256`
which will be configurable in a future version.
`Apoc.Hash` delegates to the default scheme so you can do:
As a binary:
```
iex> Apoc.Hash.hash("hi there")
{:ok,
<<155, 150, 161, 254, 29, 84, 140, 187, 201, 96, 204, 106, 2, 134, 102, 143,
215, 74, 118, 54, 103, 176, 99, 102, 251, 35, 36, 38, 159, 202, 186, 164>>}
```
Hex encoded:
```
iex> Apoc.Hash.hash_hex("hi there")
{:ok, "9b96a1fe1d548cbbc960cc6a0286668fd74a763667b06366fb2324269fcabaa4"}
```
"Apoc encoded" (URL safe Base64):
```
iex> Apoc.Hash.hash_encode("hi there")
{:ok, "m5ah_h1UjLvJYMxqAoZmj9dKdjZnsGNm-yMkJp_KuqQ"}
```
## Using a Specific Scheme
You can use any scheme directly.
```
iex> Apoc.Hash.SHA256.hash("Frankly, dear I don't give a damn")
{:ok,
<<151, 36, 41, 93, 136, 226, 106, 59, 241, 71, 212, 151, 51, 62, 217, 229, 178,
91, 149, 80, 185, 157, 172, 90, 178, 233, 238, 252, 153, 216, 63, 242>>}
```
Or like this (note that this could raise an exception if message isn't the right type).
```
iex> "I know Kung Fu"
...> |> Apoc.Hash.SHA256.hash!
...> |> Apoc.encode
"FUyFM1fucP6g_glXy3MmxWuquaWsgm5l78gIv_0Il0o"
```
## Creating your own hash scheme
You could create a (very silly and totally useless)
hashing scheme as follows:
```
defmodule MyNaiveHash do
use Apoc.Hash
def hash!(message) do
message
|> String.pad_trailing(32, <<0>>)
|> binary_part(0, 32)
end
def hash(message) do
{:ok, hash!(message)}
end
end
```
"""
@default Apoc.Hazmat.Hash.SHA256
@doc """
Generate a hash for the given message
"""
@callback hash(message :: binary) :: {:ok, binary()} | :error
@doc """
Generates a hash for the message and raises if there are any errors
"""
@callback hash!(message :: binary) :: binary()
@doc """
Generates a hash for the message in hex format (base16)
"""
@callback hash_hex(message :: binary) :: {:ok, Apoc.hexstring()} | :error
@doc """
Generates a hash for the message and then encodes with `Apoc.encode`
"""
@callback hash_encode(message :: binary) :: {:ok, Apoc.encoded_string()} | :error
defdelegate hash(message), to: @default
defdelegate hash!(message), to: @default
defdelegate hash_hex(message), to: @default
defdelegate hash_encode(message), to: @default
defmacro __using__(_) do
quote do
@behaviour unquote(__MODULE__)
def hash_hex(message) do
with {:ok, hash} <- hash(message) do
{:ok, Apoc.hex(hash)}
end
end
def hash_encode(message) do
with {:ok, hash} <- hash(message) do
{:ok, Apoc.encode(hash)}
end
end
def hash(message) do
try do
{:ok, hash!(message)}
rescue
_ -> :error
end
end
end
end
end
|
lib/apoc/adapter/hash.ex
| 0.88958 | 0.927953 |
hash.ex
|
starcoder
|
defmodule ScrapyCloudEx.Endpoints.Storage.Items do
@moduledoc """
Wraps the [Items](https://doc.scrapinghub.com/api/items.html) endpoint.
The Items API lets you interact with the items stored in the hubstorage backend for your projects.
"""
require Logger
import ScrapyCloudEx.Endpoints.Guards
alias ScrapyCloudEx.Endpoints.Helpers
alias ScrapyCloudEx.Endpoints.Storage.QueryParams
alias ScrapyCloudEx.HttpAdapter.RequestConfig
@typedoc """
A scraped item.
Map with the following (optional) keys:
* `"_type"` - the item definition (`t:String.t/0`).
* `"_template"` - the template matched against. Portia only.
* `"_cached_page_id"` - cached page ID. Used to identify the scraped page in storage.
Scraped fields will be top level alongside the internal fields listed above.
"""
@type item_object :: %{required(String.t()) => any()}
@base_url "https://storage.scrapinghub.com/items"
@doc """
Retrieve items for a project, spider, or job.
The following parameters are supported in the `params` argument:
* `:format` - the [format](ScrapyCloudEx.Endpoints.Storage.html#module-format) to be used
for returning results. Can be `:json`, `:xml`, `:csv`, or `:jl`. Defaults to `:json`.
* `:pagination` - [pagination parameters](ScrapyCloudEx.Endpoints.Storage.html#module-pagination).
* `:meta` - [meta parameters](ScrapyCloudEx.Endpoints.Storage.html#module-meta-parameters) to show.
* `:nodata` - if set, no data will be returned other than specified `:meta` keys.
Please always use pagination parameters (`:start`, `:startafter`, and `:count`) to limit amount of
items in response to prevent timeouts and different performance issues. A warning will be logged if
the `composite_id` refers to more than a single item and no pagination parameters were provided.
The `opts` value is documented [here](ScrapyCloudEx.Endpoints.html#module-options).
See docs [here](https://doc.scrapinghub.com/api/items.html#items-project-id-spider-id-job-id-item-no-field-name) (GET method).
## Examples
Retrieve all items from a given job
```
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7")
```
Retrive first item from a given job
```
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7/0")
```
Retrieve values from a single field
```
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7/fieldname")
```
Retrieve all items from a given spider
```
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34")
```
Retrieve all items from a given project
```
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53")
```
## Pagination examples
Retrieve first 10 items from a given job
```
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7", pagination: [count: 10])
```
Retrieve 10 items from a given job starting from the 20th item
```
pagination = [count: 10, start: "53/34/7/20"]
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7", pagination: pagination)
```
Retrieve 10 items from a given job starting from the item following to the given one
```
pagination = [count: 10, startafter: "53/34/7/19"]
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7", pagination: pagination)
```
Retrieve a few items from a given job by their IDs
```
pagination = [index: 5, index: 6]
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7", pagination: pagination)
```
## Get items in a specific format
```
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7/0", format: :json)
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7/0", format: :jl)
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7/0", format: :xml)
params = [format: :csv, csv: [fields: ~w(some_field some_other_field)]]
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7/0", params)
```
## Get meta field from items
To get only metadata from items, pass the `nodata: true` parameter along with the meta field
that you want to get.
```
ScrapyCloudEx.Endpoints.Storage.Items.get("API_KEY", "53/34/7/0", meta: [:_key], nodata: true)
```
"""
@spec get(String.t(), String.t(), Keyword.t(), Keyword.t()) ::
ScrapyCloudEx.result([item_object()])
def get(api_key, composite_id, params \\ [], opts \\ [])
when is_api_key(api_key)
when is_binary(composite_id) and composite_id != ""
when is_list(params)
when is_list(opts) do
with %QueryParams{error: nil} = query_params <- params |> QueryParams.from_keywords() do
query_string =
query_params
|> warn_if_no_pagination(composite_id)
|> QueryParams.to_query()
base_url = [@base_url, composite_id] |> merge_sections()
RequestConfig.new()
|> RequestConfig.put(:api_key, api_key)
|> RequestConfig.put(:url, "#{base_url}?#{query_string}")
|> RequestConfig.put(:headers, Keyword.get(opts, :headers, []))
|> RequestConfig.put(:opts, opts)
|> Helpers.make_request()
else
%QueryParams{error: error} -> {:error, error}
error -> {:error, error}
end
end
@doc """
Retrieves the item stats for a given job.
The `composite_id` must have 3 sections (i.e. refer to a job).
The `opts` value is documented [here](ScrapyCloudEx.Endpoints.html#module-options).
The response will contain the following information:
| Field | Description |
| --------------------- | ---------------------------------------- |
| `counts[field]` | The number of times the field occurs. |
| `totals.input_bytes` | The total size of all requests in bytes. |
| `totals.input_values` | The total number of requests. |
See docs [here](https://doc.scrapinghub.com/api/items.html#items-project-id-spider-id-job-id-stats).
## Example
```
ScrapyCloudEx.Endpoints.Storage.Items.stats("API_KEY", "14/13/12")
```
"""
@spec stats(String.t(), String.t(), Keyword.t()) :: ScrapyCloudEx.result(map())
def stats(api_key, composite_id, opts \\ [])
when is_api_key(api_key)
when is_binary(composite_id)
when is_list(opts) do
RequestConfig.new()
|> RequestConfig.put(:api_key, api_key)
|> RequestConfig.put(:opts, opts)
|> RequestConfig.put(:url, [@base_url, composite_id, "stats"] |> merge_sections())
|> Helpers.make_request()
end
@spec maps_to_single_item?(String.t()) :: boolean
defp maps_to_single_item?(id) do
id
|> String.split("/")
|> List.last()
|> String.match?(~r"^\d+$")
end
@spec warn_if_no_pagination(QueryParams.t(), String.t()) :: QueryParams.t()
defp warn_if_no_pagination(%QueryParams{} = query_params, id) when is_binary(id) do
case section_count(id) do
4 -> if !maps_to_single_item?(id), do: warn_if_no_pagination(query_params)
count when count < 4 -> warn_if_no_pagination(query_params)
_count -> :ok
end
query_params
end
@spec warn_if_no_pagination(QueryParams.t()) :: QueryParams.t()
defp warn_if_no_pagination(%QueryParams{} = query_params) do
query_params |> QueryParams.warn_if_no_pagination("#{__MODULE__}.get/4")
end
@spec section_count(String.t()) :: integer
defp section_count(id), do: id |> String.split("/") |> Enum.reject(&(&1 == "")) |> length()
@spec merge_sections([String.t()]) :: String.t()
defp merge_sections(sections), do: sections |> Enum.join("/")
end
|
lib/endpoints/storage/items.ex
| 0.891838 | 0.765681 |
items.ex
|
starcoder
|
defmodule Phoenix.PubSub.Redis do
use Supervisor
@moduledoc """
Phoenix PubSub adapter based on Redis.
To use Redis as your PubSub adapter, simply add it to your Endpoint's config:
config :my_app, MyApp.Endpoint,
pubsub: [adapter: Phoenix.PubSub.Redis,
host: "192.168.1.100", node_name: System.get_env("NODE")]
You will also need to add `:phoenix_pubsub_redis` to your deps:
defp deps do
[{:phoenix_pubsub_redis, "~> 2.1.0"}]
end
And also add `:phoenix_pubsub_redis` to your list of applications:
def application do
[mod: {MyApp, []},
applications: [..., :phoenix, :phoenix_pubsub_redis]]
end
## Options
* `:url` - The url to the redis server ie: `redis://username:password@host:port`
* `:name` - The required name to register the PubSub processes, ie: `MyApp.PubSub`
* `:node_name` - The required name of the node, defaults to Erlang --sname flag.
* `:host` - The redis-server host IP, defaults `"127.0.0.1"`
* `:port` - The redis-server port, defaults `6379`
* `:password` - The redis-server password, defaults `""`
* `:redis_pool_size` - The size of the redis connection pool. Defaults `5`
* `:pool_size` - Both the size of the local pubsub server pool and subscriber
shard size. Defaults `1`. A single pool is often enough for most use-cases,
but for high subscriber counts on a single topic or greater than 1M
clients, a pool size equal to the number of schedulers (cores) is a well
rounded size.
"""
@redis_pool_size 5
@defaults [host: "127.0.0.1", port: 6379]
def start_link(name, opts) do
supervisor_name = Module.concat(name, Supervisor)
Supervisor.start_link(__MODULE__, [name, opts], name: supervisor_name)
end
@doc false
def init([server_name, opts]) do
pool_size = Keyword.fetch!(opts, :pool_size)
opts = handle_url_opts(opts)
opts = Keyword.merge(@defaults, opts)
redis_opts = Keyword.take(opts, [:host, :port, :password, :database])
pool_name = Module.concat(server_name, Pool)
namespace = redis_namespace(server_name)
node_ref = :crypto.strong_rand_bytes(24)
node_name = validate_node_name!(opts)
fastlane = opts[:fastlane]
server_opts = Keyword.merge(opts, name: server_name,
server_name: server_name,
pool_name: pool_name,
namespace: namespace,
node_ref: node_ref)
pool_opts = [
name: {:local, pool_name},
worker_module: Redix,
size: opts[:redis_pool_size] || @redis_pool_size,
max_overflow: 0
]
dispatch_rules = [{:broadcast, Phoenix.PubSub.RedisServer, [fastlane, pool_name, pool_size, namespace, node_ref]},
{:direct_broadcast, Phoenix.PubSub.RedisServer, [fastlane, pool_name, pool_size, namespace, node_ref]},
{:node_name, __MODULE__, [node_name]}]
children = [
supervisor(Phoenix.PubSub.LocalSupervisor, [server_name, pool_size, dispatch_rules]),
worker(Phoenix.PubSub.RedisServer, [server_opts]),
:poolboy.child_spec(pool_name, pool_opts, redis_opts),
]
supervise children, strategy: :rest_for_one
end
defp redis_namespace(server_name), do: "phx:#{server_name}"
defp handle_url_opts(opts) do
if opts[:url] do
do_handle_url_opts(opts)
else
opts
end
end
defp do_handle_url_opts(opts) do
info = URI.parse(opts[:url])
user_opts =
case String.split(info.userinfo || "", ":") do
[""] -> []
[username] -> [username: username]
[username, password] -> [username: username, password: password]
end
opts
|> Keyword.merge(user_opts)
|> Keyword.merge(host: info.host, port: info.port || @defaults[:port])
end
@doc false
def node_name(nil), do: node()
def node_name(configured_name), do: configured_name
defp validate_node_name!(opts) do
case opts[:node_name] || node() do
name when name in [nil, :nonode@nohost] ->
raise ArgumentError, ":node_name is a required option for unnamed nodes"
name -> name
end
end
end
|
lib/phoenix_pubsub_redis/redis.ex
| 0.743541 | 0.402216 |
redis.ex
|
starcoder
|
defmodule VintageNetMobile.ExChat do
@moduledoc """
Send commands to your modem and get notifications
This module is used by the "monitor" modules for reporting modem and
connection status.
It can be handy to debug modems too. If you'd like to send commands and
receive notifications from the IEx prompt, here's what to do:
```elixir
require Logger
RingLogger.attach
tty_name = "ttyUSB2" # set to your AT command interface
VintageNetMobile.ExChat.register(tty_name, "+", fn m -> Logger.debug("Got: " <> inspect(m)) end)
VintageNetMobile.ExChat.send(tty_name, "AT+CSQ")
```
To reset the registrations, `VintageNet.deconfigure/2` and
`VintageNet.configure/3` your modem.
"""
alias VintageNetMobile.ExChat.Core
# This limits the restart rate for this GenServer on tty errors.
# Errors usually mean the interface is going away and vintage_net
# will clean things up soon. If nothing else, the UART won't be
# pegged by restarts and the logs won't be filled with errors.
@error_delay 1000
@typedoc """
The options for the ATCommand server are:
* `:speed` - the speed of the serial connection
* `:tty` - the tty name for sending AT commands
* `:uart` - use an alternative UART-provider (for testing)
* `:uart_opts` - additional options to pass to UART.open
"""
@type opt() ::
{:speed, non_neg_integer()}
| {:tty, String.t()}
| {:uart, module()}
| {:uart_opts, keyword()}
use GenServer
require Logger
@spec start_link([opt]) :: GenServer.on_start()
def start_link(opts) do
tty_name = Keyword.fetch!(opts, :tty)
GenServer.start_link(__MODULE__, opts, name: server_name(tty_name))
end
@doc """
Send a command to the modem
On success, this returns a list of the lines received back from the modem.
"""
@spec send(binary(), iodata(), Core.send_options()) :: {:ok, [binary()]} | {:error, any()}
def send(tty_name, command, options \\ []) do
# Make sure we wait long enough for the command to be processed by the modem
command_timeout = Keyword.get(options, :timeout, 10000) + 500
GenServer.call(server_name(tty_name), {:send, command, options}, command_timeout)
end
@doc """
Helper for sending commands to the modem as best effort
This function always succeeds. Failed commands log errors, but that's it. This
is useful for monitoring operations where intermittent failures should be logged,
but really aren't worth dealing with.
"""
@spec send_best_effort(binary(), iodata(), Core.send_options()) :: :ok
def send_best_effort(tty_name, command, options \\ []) do
case send(tty_name, command, options) do
{:ok, _response} ->
:ok
error ->
Logger.warn("Send #{inspect(command)} failed: #{inspect(error)}. Ignoring...")
:ok
end
end
@doc """
Register a callback function for reports
"""
@spec register(binary(), binary(), function()) :: :ok
def register(tty_name, type, callback) do
GenServer.call(server_name(tty_name), {:register, type, callback})
end
@impl GenServer
def init(opts) do
speed = Keyword.get(opts, :speed, 115_200)
tty_name = Keyword.fetch!(opts, :tty)
uart = Keyword.get(opts, :uart, Circuits.UART)
uart_opts = Keyword.get(opts, :uart_opts, [])
{:ok, uart_ref} = uart.start_link()
all_uart_opts =
[
speed: speed,
framing: {Circuits.UART.Framing.Line, separator: "\r\n"},
rx_framing_timeout: 500
] ++ uart_opts
{:ok,
%{uart: uart, uart_ref: uart_ref, tty_name: tty_name, core: Core.init(), timer_ref: nil},
{:continue, all_uart_opts}}
end
@impl GenServer
def handle_continue(uart_opts, state) do
case state.uart.open(state.uart_ref, state.tty_name, uart_opts) do
:ok ->
{:noreply, state}
{:error, error} ->
Logger.warn("vintage_net_mobile: can't open #{state.tty_name}: #{inspect(error)}")
Process.sleep(@error_delay)
{:stop, :tty_error, state}
end
end
@impl GenServer
def handle_call({:send, command, options}, from, state) do
{new_core_state, actions} = Core.send(state.core, command, from, options)
new_state =
%{state | core: new_core_state}
|> run_actions(actions)
{:noreply, new_state}
end
@impl GenServer
def handle_call({:register, type, callback}, _from, state) do
{new_core_state, actions} = Core.register(state.core, type, callback)
new_state =
%{state | core: new_core_state}
|> run_actions(actions)
{:reply, :ok, new_state}
end
@impl GenServer
def handle_info({:circuits_uart, tty_name, {:partial, fragment}}, state) do
Logger.warn("vintage_net_mobile: dropping junk from #{tty_name}: #{inspect(fragment)}")
{:noreply, state}
end
def handle_info({:circuits_uart, tty_name, {:error, error}}, state) do
Logger.warn("vintage_net_mobile: error from #{tty_name}: #{inspect(error)}")
Process.sleep(@error_delay)
{:stop, :tty_error, state}
end
def handle_info({:circuits_uart, _tty_name, message}, state) do
{new_core_state, actions} = Core.process(state.core, message)
new_state =
%{state | core: new_core_state}
|> run_actions(actions)
{:noreply, new_state}
end
def handle_info({:timeout, core_timer_ref}, state) do
{new_core_state, actions} = Core.timeout(state.core, core_timer_ref)
new_state =
%{state | core: new_core_state, timer_ref: nil}
|> run_actions(actions)
{:noreply, new_state}
end
defp run_actions(state, actions) do
Enum.reduce(actions, state, &run_action(&2, &1))
end
defp run_action(state, {:notify, what, who}) do
apply(who, [what])
state
end
defp run_action(state, {:reply, what, who}) do
GenServer.reply(who, what)
state
end
defp run_action(state, {:send, what}) do
:ok = state.uart.write(state.uart_ref, what)
state
end
defp run_action(state, {:start_timer, timeout, core_timer_ref}) do
timer_ref = Process.send_after(self(), {:timeout, core_timer_ref}, timeout)
%{state | timer_ref: timer_ref}
end
defp run_action(state, :stop_timer) do
_ = Process.cancel_timer(state.timer_ref)
%{state | timer_ref: nil}
end
defp server_name("/dev/" <> tty_name) do
server_name(tty_name)
end
defp server_name(tty_name) do
Module.concat([__MODULE__, tty_name])
end
end
|
lib/vintage_net_mobile/ex_chat.ex
| 0.670069 | 0.539105 |
ex_chat.ex
|
starcoder
|
defmodule Segment.Encoder do
@moduledoc """
Responsible to transforming structs into JSON strings.
"""
@doc """
Encodes a given struct into a JSON string.
## Options
* `drop_nil_fields`: If set to `true` all the struct `nil` fields will be
filtered out from the JSON string. Defaults to `false`.
## Examples
iex> library = %Segment.Analytics.Context.Library{
...> name: "foo",
...> version: "1.0.0"
...> }
...> #{inspect(__MODULE__)}.encode!(library, [])
~s({"version":"1.0.0","transport":null,"name":"foo"})
iex> library = %Segment.Analytics.Context.Library{
...> name: "foo",
...> version: "1.0.0"
...> }
...> #{inspect(__MODULE__)}.encode!(library, drop_nil_fields: true)
~s({"version":"1.0.0","name":"foo"})
"""
@spec encode!(struct(), list()) :: String.t()
def encode!(struct, options) do
struct
|> maybe_drop_nil_fields(options)
|> Poison.encode!()
end
@spec maybe_drop_nil_fields(struct(), list()) :: String.t()
defp maybe_drop_nil_fields(struct, options) do
if Keyword.get(options, :drop_nil_fields) == true do
drop_nil_fields_from_struct(struct)
else
struct
end
end
@spec drop_nil_fields_from_struct(struct()) :: map()
defp drop_nil_fields_from_struct(struct) do
struct
|> Map.from_struct()
|> drop_nil_fields_from_map()
end
@spec drop_nil_fields_from_map(map()) :: map()
def drop_nil_fields_from_map(map), do: Enum.reduce(map, %{}, &drop_nil_fields/2)
@spec drop_nil_fields({any(), any()}, map()) :: map()
defp drop_nil_fields({key, value}, map) when is_struct(value),
do: Map.put(map, key, drop_nil_fields_from_struct(value))
defp drop_nil_fields({key, value}, map) when is_map(value),
do: Map.put(map, key, drop_nil_fields_from_map(value))
defp drop_nil_fields({key, [item | _items] = value}, map)
when is_list(value) and is_struct(item),
do: Map.put(map, key, Enum.map(value, &drop_nil_fields_from_struct/1))
defp drop_nil_fields({key, [item | _items] = value}, map)
when is_list(value) and is_map(item),
do: Map.put(map, key, Enum.map(value, &drop_nil_fields_from_map/1))
defp drop_nil_fields({_field, value}, map) when is_nil(value), do: map
defp drop_nil_fields({key, value}, map), do: Map.put(map, key, value)
end
|
lib/segment/encoder.ex
| 0.906963 | 0.473109 |
encoder.ex
|
starcoder
|
defmodule Asteroid.ObjectStore.GenericKV.Mnesia do
@moduledoc """
Mnesia implementation of the `Asteroid.ObjectStore.GenericKV` behaviour
## Options
The options (`Asteroid.ObjectStore.GenericKV.opts()`) are:
- `:table_name`: an `atom()` for the table name. No default, **mandatory**
- `:tab_def`: Mnesia's table definitions of the `:mnesia.create_table/2` function. Defaults to
the options below. User-defined `:tab_def` will be merged on a key basis, i.e. defaults will
not be erased
- `:purge_interval`: the `integer()` interval in seconds the purge process will be triggered,
or `:no_purge` to disable purge. Defaults to `720` (12 minutes). The purge uses the `"exp"`
attribute of the value map as the expiration unix timestamp. If the value is not a map, it
cannot be purged.
## Default Mnesia table definition
```elixir
[
attributes: [:id, :data]
]
```
## Purge process
The purge process uses the `Singleton` library. Therefore the purge process will be unique
per cluster (and that's probably what you want if you use Mnesia).
"""
require Logger
@behaviour Asteroid.ObjectStore.GenericKV
@impl true
def install(opts) do
unless is_atom(opts[:table_name]) do
raise "Table name not specified for #{__MODULE__} store"
end
:mnesia.stop()
:mnesia.create_schema([node()])
:mnesia.start()
tab_def =
[
attributes: [:id, :data]
]
|> Keyword.merge(opts[:tab_def] || [])
case :mnesia.create_table(opts[:table_name], tab_def) do
{:atomic, :ok} ->
Logger.info("#{__MODULE__}: created generiv KV store #{opts[:table_name]}")
:ok
{:aborted, {:already_exists, _}} ->
Logger.info("#{__MODULE__}: generic KV store #{opts[:table_name]} already exists")
:ok
{:aborted, reason} ->
Logger.error(
"#{__MODULE__}: failed to create generic KV store #{opts[:table_name]} " <>
"(reason: #{inspect(reason)})"
)
{:error, reason}
end
end
@impl true
def start_link(opts) do
case :mnesia.start() do
:ok ->
opts = Keyword.merge([purge_interval: 12 * 60], opts)
# we launch the process anyway because we need to return a process
# but the singleton will do nothing if the value is `:no_purge`
Singleton.start_child(__MODULE__.Purge, opts, __MODULE__)
{:error, _} = error ->
error
end
end
@impl true
def get(key, opts) do
table_name = opts[:table_name] || raise "Table name not specified for #{__MODULE__} store"
case :mnesia.dirty_read(table_name, key) do
[] ->
{:ok, nil}
[{^table_name, ^key, data}] ->
{:ok, data}
_ ->
{:error, "Multiple results from Mnesia"}
end
catch
:exit, reason ->
{:error, reason}
end
@impl true
def put(key, value, opts) do
table_name = opts[:table_name] || raise "Table name not specified for #{__MODULE__} store"
:mnesia.dirty_write({table_name, key, value})
Logger.debug(
"#{__MODULE__}: stored object `#{inspect(key)}`, " <>
"value: `#{inspect(value)}` in table `#{table_name}`"
)
:ok
catch
:exit, reason ->
{:error, reason}
end
@impl true
def delete(key, opts) do
table_name = opts[:table_name] || raise "Table name not specified for #{__MODULE__} store"
:mnesia.dirty_delete(table_name, key)
Logger.debug("#{__MODULE__}: deleted object `#{key}`")
:ok
catch
:exit, reason ->
{:error, reason}
end
end
|
lib/asteroid/object_store/generic_kv/mnesia.ex
| 0.905076 | 0.856453 |
mnesia.ex
|
starcoder
|
defmodule GGity.Scale.Linetype.Discrete do
@moduledoc false
alias GGity.{Draw, Labels}
alias GGity.Scale.Linetype
@linetype_specs %{
solid: "",
dashed: "4",
dotted: "1",
longdash: "6 2",
dotdash: "1 2 3 2",
twodash: "2 2 6 2"
}
@palette [:solid, :dashed, :dotted, :longdash, :dotdash, :twodash]
defstruct transform: nil,
levels: nil,
labels: :waivers,
guide: :legend
@type t() :: %__MODULE__{}
@spec new(keyword()) :: Linetype.Discrete.t()
def new(options \\ []), do: struct(Linetype.Discrete, options)
@spec train(Linetype.Discrete.t(), list(binary())) :: Linetype.Discrete.t()
def train(scale, [level | _other_levels] = levels) when is_list(levels) and is_binary(level) do
number_of_levels = length(levels)
palette =
@palette
|> Stream.cycle()
|> Enum.take(number_of_levels)
|> List.to_tuple()
values_map =
levels
|> Stream.with_index()
|> Stream.map(fn {level, index} ->
{level, elem(palette, index)}
end)
|> Enum.into(%{})
transform = fn value -> @linetype_specs[values_map[value]] end
struct(scale, levels: levels, transform: transform)
end
@spec draw_legend(Linetype.Discrete.t(), binary(), atom(), number()) :: iolist()
def draw_legend(%Linetype.Discrete{guide: :none}, _label, _key_glyph, _key_height), do: []
def draw_legend(%Linetype.Discrete{levels: [_]}, _label, _key_glyph, _key_height), do: []
def draw_legend(%Linetype.Discrete{levels: levels} = scale, label, key_glyph, key_height) do
[
Draw.text(
"#{label}",
x: "0",
y: "-5",
class: "gg-text gg-legend-title",
text_anchor: "left"
),
Stream.with_index(levels)
|> Enum.map(fn {level, index} ->
draw_legend_item(scale, {level, index}, key_glyph, key_height)
end)
]
end
defp draw_legend_item(scale, {level, index}, key_glyph, key_height) do
[
Draw.rect(
x: "0",
y: "#{key_height * index}",
height: key_height,
width: key_height,
class: "gg-legend-key"
),
draw_key_glyph(scale, level, index, key_glyph, key_height),
Draw.text(
"#{Labels.format(scale, level)}",
x: "#{key_height + 5}",
y: "#{10 + key_height * index}",
class: "gg-text gg-legend-text",
text_anchor: "left"
)
]
end
defp draw_key_glyph(scale, level, index, :path, key_height) do
Draw.line(
x1: 1,
y1: key_height / 2 + key_height * index,
x2: key_height - 1,
y2: key_height / 2 + key_height * index,
stroke: "black",
stroke_dasharray: "#{scale.transform.(level)}",
stroke_opacity: "1"
)
end
defp draw_key_glyph(scale, level, index, :timeseries, key_height) do
offset = key_height * index
Draw.polyline(
[
{1, key_height - 1 + offset},
{key_height / 5 * 2, key_height / 5 * 2 + offset},
{key_height / 5 * 3, key_height / 5 * 3 + offset},
{key_height - 1, 1 + offset}
],
"black",
1,
1,
scale.transform.(level)
)
end
end
|
lib/ggity/scale/linetype_discrete.ex
| 0.815783 | 0.430746 |
linetype_discrete.ex
|
starcoder
|
defmodule YubikeyOTP do
@moduledoc """
YubikeyOTP is an Elixir client for validating Yubikey one-time-passwords. It can validate OTPs using Yubico's public
API or by using your own or third-party OTP validation services.
## Requirements
This module contains all the functions you'll need to authenticate Yubikey OTPs. You will also need a developer API
ID (and optionally a shared secret) that can be got from [Yubico](https://upgrade.yubico.com/getapikey/).
You will obviously need at least one Yubikey that supports the OTP protocol.
## Example
iex> my_id = Application.get_env(:my_app, :yubikey_client_id)
iex> {:ok, service} = YubikeyOTP.service(api_id: my_id)
iex> YubikeyOTP.verify("<KEY>", service)
{:ok, :ok}
iex> YubikeyOTP.verify("<KEY>", service)
{:error, :replayed_otp}
"""
alias YubikeyOTP.Controller
alias YubikeyOTP.OTP
alias YubikeyOTP.Request
alias YubikeyOTP.Response
alias YubikeyOTP.Service
@doc """
Returns a Service structure that defines the API backend to use. Default settings are for the Yubicloud service.
The only required key is *:api_id*
You don't need to create this for each request - it can be set as a module attribute.
## Example
iex> {:ok, service} = YubikeyOTP.service(api_id: "65749337983737")
{:ok,
%YubikeyOTP.Service{
api_id: "65749337983737",
api_key: nil,
hmac: false,
timeout: 1000,
timestamp: true,
urls: ["https://api.yubico.com/wsapi/2.0/verify",
"https://api2.yubico.com/wsapi/2.0/verify",
"https://api3.yubico.com/wsapi/2.0/verify",
"https://api4.yubico.com/wsapi/2.0/verify",
"https://api5.yubico.com/wsapi/2.0/verify"]
}}
The Yubicloud API has five different endpoint URLs, and by default these are all used concurrently.
"""
@spec service(options :: map) :: {:ok, %Service{}} | {:error, atom}
def service(options) do
with {:ok, service} <- Service.new(options),
{:ok, service} <- Service.validate(service) do
{:ok, service}
else
err -> err
end
end
@doc """
Returns the device ID part of a Yubikey OTP
The first part of a Yubikey OTP is static and identifies the key itself. This ID can be used to match a key with it's
owner - you don't want to only authenticate the OTP as valid, you also need to check it's the user's Yubikey.
## Example
iex> YubikeyOTP.device_id("ccccccclulvjtugnjuuufuiebhdvucdihnngndtvfjrb")
{:ok, "ccccccclulvj"}
"""
@spec device_id(otp :: binary) :: {:ok, binary} | {:error, :otp_invalid}
def device_id(otp) do
OTP.device_id(otp)
end
@doc """
Verify a Yubikey OTP using the specified service and return the status
This will contact the remote backend and process the response.
A successfully authenticated OTP will result in {:ok, :ok}. A failure will result in an :error tuple containing an
error code. Most error codes are based on the standard protocol status types and are listed in `YubikeyOTP.Errors`
## Example
iex> YubikeyOTP.verify("ccccccclulvjbthgghkbvvlcludiklkncnecncevcrlg", service)
{:ok, :ok}
iex> YubikeyOTP.verify("ccccccclulvjbthgghkbvvlcludiklkncnecncevcrlg", service)
{:error, :replayed_otp}
"""
@spec verify(otp :: binary(), service :: %Service{}) :: {:ok, :ok} | {:error, atom()}
def verify(otp, service) do
with {:ok, otp} <- OTP.validate(otp),
{:ok, request} <- Request.new(otp, service),
{:ok, request} <- Request.validate(request) do
request
|> Controller.verify(service)
else
err -> err
end
end
@doc """
Verify a Yubikey OTP using the specified service and return true or false
This will contact the remote backend and process the response. An authenticated OTP will produce `true`.
Anything other than a success will be returned as `false`.
## Example
iex> YubikeyOTP.verify?("<KEY>", service)
true
iex> YubikeyOTP.verify?("<KEY>", service)
false
"""
@spec verify?(otp :: binary, service :: %Service{}) :: true | false
def verify?(otp, service) do
case verify(otp, service) do
{:ok, :ok} -> true
{:error, _} -> false
end
end
end
|
lib/yubikey_otp.ex
| 0.850825 | 0.419856 |
yubikey_otp.ex
|
starcoder
|
defmodule Asteroid.OIDC.AuthenticationEvent do
@moduledoc """
Convenience functions to work with authentication events
The `%Asteroid.OIDC.AuthenticationEvent{}` object has the following meaningful members in
its `:data` field:
- `"name"`: the event name (`t:Asteroid.AuthenticationEvent.name/0`)
- `"amr"`: the AMR of the event (`t:Asteroid.OIDC.amr/0`)
- `"time"`: the time the authentication event occured (`non_neg_integer()`)
- `"exp"`: expiration time (`non_neg_integer()`)
"""
import Asteroid.Utils
alias Asteroid.Context
alias Asteroid.OIDC.AuthenticatedSession
alias Asteroid.Token
@type id :: String.t()
@type name :: String.t()
@enforce_keys [:id, :authenticated_session_id]
defstruct [:id, :authenticated_session_id, :data]
@type t :: %__MODULE__{
id: id(),
authenticated_session_id: AuthenticatedSession.id(),
data: map()
}
@doc """
Generates a new authentication event
"""
@spec gen_new(AuthenticatedSession.id()) :: t()
def gen_new(authenticated_session_id) do
%__MODULE__{
id: secure_random_b64(),
authenticated_session_id: authenticated_session_id,
data: %{}
}
end
@doc """
Gets an authentication event from the store
"""
@spec get(id(), Keyword.t()) :: {:ok, t()} | {:error, Exception.t()}
def get(authentication_event_id, _opts \\ []) do
ae_store_module = astrenv(:object_store_authentication_event)[:module]
ae_store_opts = astrenv(:object_store_authentication_event)[:opts] || []
case ae_store_module.get(authentication_event_id, ae_store_opts) do
{:ok, authentication_event} when not is_nil(authentication_event) ->
{:ok, authentication_event}
{:ok, nil} ->
{:error,
Token.InvalidTokenError.exception(
sort: "authentication event",
reason: "not found in the store",
id: authentication_event_id
)}
{:error, error} ->
{:error, error}
end
end
@doc """
Returns all authentication events associated to an authenticated session
"""
@spec get_from_authenticated_session_id(AuthenticatedSession.id()) :: [%__MODULE__{}]
def get_from_authenticated_session_id(auth_session_id) do
ae_store_module = astrenv(:object_store_authentication_event)[:module]
ae_store_opts = astrenv(:object_store_authentication_event)[:opts] || []
case ae_store_module.get_from_authenticated_session_id(auth_session_id, ae_store_opts) do
{:ok, auth_event_ids} ->
Enum.reduce(
auth_event_ids,
[],
fn
auth_event_id, acc ->
case get(auth_event_id) do
{:ok, auth_event} ->
[auth_event | acc]
{:error, _} ->
acc
end
end
)
_ ->
[]
end
end
@doc """
Stores an authentication event
"""
@spec store(t(), Context.t()) :: {:ok, t()} | {:error, any()}
def store(authentication_event, ctx \\ %{})
def store(authentication_event, ctx) do
ae_store_module = astrenv(:object_store_authentication_event)[:module]
ae_store_opts = astrenv(:object_store_authentication_event)[:opts] || []
authentication_event =
astrenv(:object_store_authentication_event_before_store_callback).(
authentication_event,
ctx
)
case ae_store_module.put(authentication_event, ae_store_opts) do
:ok ->
AuthenticatedSession.update_acr(authentication_event.authenticated_session_id)
{:ok, authentication_event}
{:error, _} = error ->
error
end
end
@doc """
Deletes an authentication event
"""
@spec delete(t() | id()) :: :ok | {:error, any()}
def delete(%__MODULE__{id: id, authenticated_session_id: authenticated_session_id}) do
ae_store_as_module = astrenv(:object_store_authentication_event)[:module]
ae_store_as_opts = astrenv(:object_store_authentication_event)[:opts] || []
res = ae_store_as_module.delete(id, ae_store_as_opts)
AuthenticatedSession.update_acr(authenticated_session_id)
res
end
def delete(authentication_event_id) when is_binary(authentication_event_id) do
{:ok, authentication_event} = get(authentication_event_id)
delete(authentication_event)
end
@doc """
Puts a value into the `data` field of authentication event
If the value is `nil`, the authentication event is not changed and the filed is not added.
"""
@spec put_value(t(), any(), any()) :: t()
def put_value(authentication_event, _key, nil), do: authentication_event
def put_value(authentication_event, key, val) do
%{authentication_event | data: Map.put(authentication_event.data, key, val)}
end
@doc """
Removes a value from the `data` field of a authentication event
If the value does not exist, does nothing.
"""
@spec delete_value(t(), any()) :: t()
def delete_value(authentication_event, key) do
%{authentication_event | data: Map.delete(authentication_event.data, key)}
end
end
|
lib/asteroid/oidc/authentication_event.ex
| 0.887101 | 0.410018 |
authentication_event.ex
|
starcoder
|
# A toplevel comment.
defmodule Erl2ex.Pipeline.ExComment do
@moduledoc false
defstruct(
# List of comments, one per line. Each comment must begin with a hash "#".
comments: []
)
end
# A module attribute.
defmodule Erl2ex.Pipeline.ExAttr do
@moduledoc false
defstruct(
# Name of the attribute as an atom.
name: nil,
# Whether to register the attribute.
register: false,
# List of arguments. Most attributes have a single argument (the value).
arg: nil,
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# The Elixir form of an Erlang compiler directive (such as ifdef).
# This is represented as an abstract directive here, and codegen takes care
# of generating Elixir compile-time code.
defmodule Erl2ex.Pipeline.ExDirective do
@moduledoc false
defstruct(
# The directive as an atom.
directive: nil,
# The name of the referenced name (e.g. the macro name for ifdef) as an atom.
name: nil,
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# A directive to import a module.
defmodule Erl2ex.Pipeline.ExImport do
@moduledoc false
defstruct(
# The name of the module, as an atom.
module: nil,
# List of functions to import, each as {name_as_atom, arity_as_integer}.
funcs: [],
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# An Elixir macro
defmodule Erl2ex.Pipeline.ExMacro do
@moduledoc false
defstruct(
# Elixir AST for the signature of the macro
signature: nil,
# The macro name as an atom.
macro_name: nil,
# The name of an attribute that tracks whether the macro is defined, as an atom.
tracking_name: nil,
# The name of an attribute that tracks the current macro name, as an atom.
# Used when a macro is redefined in a module, which Elixir doesn't allow. So we
# define macros with different names, and use this attribute to specify which name
# we are using.
dispatch_name: nil,
# A map from argument name (as atom) to a variable name used for the stringified
# form of the argument (i.e. the Erlang "??" preprocessor operator).
stringifications: nil,
# Elixir AST for the macro replacement when expanded in normal context.
expr: nil,
# Elixir AST for the macro replacement when expanded in a guard context, or
# nil if the expansion should not be different from normal context.
guard_expr: nil,
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# An Elixir record.
defmodule Erl2ex.Pipeline.ExRecord do
@moduledoc false
defstruct(
# The tag atom used in the record
tag: nil,
# The name of the record macro, as an atom.
macro: nil,
# The name of an attribute that stores the record definition.
data_attr: nil,
# The record fields, as Elixir AST.
fields: [],
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# An Elixir type definition.
defmodule Erl2ex.Pipeline.ExType do
@moduledoc false
defstruct(
# One of the following: :opaque, :type, :typep
kind: nil,
# An Elixir AST describing the type and its parameters (which may be empty).
signature: nil,
# Elixir AST describing the type's definition
defn: nil,
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# An Elixir function spec.
defmodule Erl2ex.Pipeline.ExSpec do
@moduledoc false
defstruct(
# Either :spec or :callback.
kind: nil,
# Name of the function specified.
name: nil,
# List of Elixir ASTs describing the specs.
specs: [],
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# The header for an Elixir module. Includes auto-generated pieces such as
# require statements for Bitwise and Record, if needed, as well as various
# macros, attributes, etc. needed to implement Erlang semantics.
defmodule Erl2ex.Pipeline.ExHeader do
@moduledoc false
defstruct(
# True if Bitwise operators are used in this module.
use_bitwise: false,
# True if the Erlang is_record BIF is used (so Elixir needs to require Record)
has_is_record: false,
# List of {record_name, [record_fields]} so codegen can define the records.
records: [],
# List of macro names that are not initialized explicitly and probably should be
# initialized from environment variables.
init_macros: [],
# The name of the macro dispatcher macro (if needed) as an atom, or nil if the
# dispatcher is not needed.
macro_dispatcher: nil,
# The name of the macro that returns record size, or nil if not needed.
record_size_macro: nil,
# The name of the macro that computes record index, or nil if not needed.
record_index_macro: nil
)
end
# An Elixir function.
defmodule Erl2ex.Pipeline.ExFunc do
@moduledoc false
defstruct(
# The name of the function.
name: nil,
# Arity of the function, as an integer
arity: nil,
# Whether the function should be public.
public: false,
# Not currently used. Later we expect we'll consolidate specs for the function
# here instead of emitting them separately.
specs: [],
# List of ExClause structures.
clauses: [],
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# A single clause of an Elixir function.
defmodule Erl2ex.Pipeline.ExClause do
@moduledoc false
defstruct(
# Elixir AST of the function signature.
signature: nil,
# List of Elixir ASTs representing the list of expressions in the function.
exprs: [],
# List of pre-form comments, one per line. Each must begin with a hash "#".
comments: []
)
end
# The full Elixir module representation.
defmodule Erl2ex.Pipeline.ExModule do
@moduledoc false
defstruct(
# Name of the module, as an atom.
name: nil,
# List of top-of-file comments, one per line. Each must begin with a hash "#".
file_comments: [],
# List of top-of-module comments, one per line. These are indented within the
# module definition. Each must begin with a hash "#".
comments: [],
# List of forms (other structures from this file).
forms: []
)
end
|
lib/erl2ex/pipeline/ex_data.ex
| 0.755637 | 0.546194 |
ex_data.ex
|
starcoder
|
defmodule HamRadio.Grid do
@moduledoc """
Converts between coordinates and Maidenhead grid locators.
"""
@alphabet ~w(A B C D E F G H I J K L M N O P Q R S T U V W X Y Z)
@regex ~r/[A-R]{2}[0-9]{2}($|([a-x]{2}$))/i
@type coord :: {lat :: float, lon :: float}
@type coord_bounds ::
{{lat_min :: float, lat_max :: float}, {lon_min :: float, lon_max :: float}}
@type grid_length :: 4 | 6
@doc """
Converts a coordinate pair into a grid square.
The `length` can be `4` (default) or `6`, returning grids like `"FN32"` or `"FN32ab"`, respectively.
"""
@spec encode(coord, grid_length) :: {:ok, String.t()} | :error
def encode(coord, length \\ 4)
def encode({lat, lon}, length)
when length in [4, 6] and lat >= -90.0 and lat <= 90 and lon >= -180 and lon <= 180 do
# Normalize from (-90, -180) to (0, 0)
lon = lon + 180.0
lat = lat + 90.0
# Map lon from 0 to 17 (A to R)
lon_index_1 = trunc(lon / 20.0)
lat_index_1 = trunc(lat / 10.0)
# 20 degrees lon per grid
lon = lon - lon_index_1 * 20.0
# 10 degrees lat per grid
lat = lat - lat_index_1 * 10.0
# Map from 0 to 9
lon_index_2 = trunc(lon / 2.0)
lat_index_2 = trunc(lat)
# Convert to string
grid =
"#{Enum.at(@alphabet, lon_index_1)}#{Enum.at(@alphabet, lat_index_1)}#{lon_index_2}#{
lat_index_2
}"
if length == 6 do
# Now 2 degrees lon per grid remaining
lon = lon - lon_index_2 * 2.0
# Now 1 degree lon per grid remaining
lat = lat - lat_index_2
# Map from 0 to 23 (a to x)
lon_index_3 = trunc(lon / (2.0 / 24.0))
lat_index_3 = trunc(lat / (1.0 / 24.0))
# Return 6-letter grid
{
:ok,
"#{grid}#{String.downcase(Enum.at(@alphabet, lon_index_3))}#{
String.downcase(Enum.at(@alphabet, lat_index_3))
}"
}
else
# Return 4-letter grid
{:ok, grid}
end
end
def encode(_, _), do: :error
@doc """
Converts a coordinate pair into a grid square.
Raises `ArgumentError` if the coordinates are invalid.
"""
@spec encode!(coord, grid_length) :: String.t() | no_return
def encode!(coord, length \\ 4) do
case encode(coord, length) do
{:ok, grid} -> grid
:error -> raise ArgumentError, "Invalid coordinate or grid length"
end
end
@doc """
Converts a grid square into a coordinate pair.
The coordinate is located at the center of the grid square.
Returns `:error` if the grid is invalid.
"""
@spec decode(String.t()) :: {:ok, coord} | :error
def decode(grid) when is_binary(grid) do
if valid?(grid) do
{:ok, decode_valid!(grid)}
else
:error
end
end
def decode(_), do: :error
@doc """
Converts a grid square into a coordinate pair.
Raises `ArgumentError` if the grid is invalid.
"""
@spec decode!(String.t()) :: coord | no_return
def decode!(grid) do
case decode(grid) do
{:ok, coord} -> coord
:error -> raise ArgumentError, "Invalid grid #{inspect(grid)}"
end
end
@doc """
Converts a grid square into the boundaries of its enclosing rectangle.
Returns `:error` if the grid is invalid.
"""
@spec decode_bounds(String.t()) :: {:ok, coord_bounds} | :error
def decode_bounds(grid) do
with {:ok, {lat, lon}} <- decode(grid) do
{lat_offset, lon_offset} =
if String.length(grid) == 4,
do: {0.5, 1.0},
else: {0.05, 0.1}
{:ok,
{
{lat - lat_offset, lat + lat_offset},
{lon - lon_offset, lon + lon_offset}
}}
end
end
@doc """
Converts a grid square into the boundaries of its enclosing rectangle.
Raises `ArgumentError` if the grid is invalid.
"""
@spec decode_bounds!(String.t()) :: coord_bounds | no_return
def decode_bounds!(grid) do
case decode_bounds(grid) do
{:ok, bounds} -> bounds
:error -> raise ArgumentError, "Invalid grid #{inspect(grid)}"
end
end
@doc """
Determines if a grid square is legit.
"""
@spec valid?(String.t()) :: boolean
def valid?(grid) do
Regex.match?(@regex, grid)
end
@doc """
Normalizes the string casing of a grid square.
iex> HamRadio.Grid.format("fn32ab")
"FN32ab"
"""
@spec format(String.t()) :: String.t()
def format(grid) do
{a, b} = String.split_at(grid, 2)
String.upcase(a) <> b
end
# PRIVATE
defp decode_valid!(grid) do
lon = -180.0
lat = -90.0
lon_ord_1 =
Enum.find_index(@alphabet, fn letter -> String.upcase(String.at(grid, 0)) == letter end)
lat_ord_1 =
Enum.find_index(@alphabet, fn letter -> String.upcase(String.at(grid, 1)) == letter end)
lon_ord_2 = grid |> String.at(2) |> String.to_integer()
lat_ord_2 = grid |> String.at(3) |> String.to_integer()
lon = lon + 360.0 / 18.0 * lon_ord_1 + 360.0 / 18.0 / 10.0 * lon_ord_2
lat = lat + 180.0 / 18.0 * lat_ord_1 + 180.0 / 18.0 / 10.0 * lat_ord_2
case String.length(grid) do
4 ->
lon = lon + 360.0 / 18.0 / 10.0 / 2.0
lat = lat + 180.0 / 18.0 / 10.0 / 2.0
{lat, lon}
6 ->
lon_ord_3 =
Enum.find_index(@alphabet, fn letter -> String.upcase(String.at(grid, 4)) == letter end)
lat_ord_3 =
Enum.find_index(@alphabet, fn letter -> String.upcase(String.at(grid, 5)) == letter end)
lon = lon + 360.0 / 18.0 / 10.0 / 24.0 * (lon_ord_3 + 0.5)
lat = lat + 180.0 / 18.0 / 10.0 / 24.0 * (lat_ord_3 + 0.5)
{lat, lon}
_ ->
raise "Invalid grid passed validation check: '#{grid}'"
end
end
end
|
lib/ham_radio/grid.ex
| 0.936829 | 0.719162 |
grid.ex
|
starcoder
|
defmodule ExQuickBooks.OAuth do
@moduledoc """
Functions for interacting with the OAuth API.
QuickBooks uses the three-legged OAuth 1.0a flow. For a human-readable
overview of the whole flow and how to implement it, see e.g.
[oauthbible.com](http://oauthbible.com/#oauth-10a-three-legged).
## Request token
To start the authentication flow, your application needs to get a request
token using `get_request_token/1`:
```
{:ok, request_token} = ExQuickBooks.OAuth.get_request_token(callback_url)
```
The token is an `ExQuickBooks.OAuth.RequestToken`, see its documentation for
more details.
You should redirect the user to `request_token.redirect_url` to authorise
your application to access their QuickBooks data. After that step they are
redirected to the given callback URL.
If you need to persist data (such as the request token) between this request
and the callback, you could store that data e.g. in the current user’s
(encrypted) session.
## Callback
After authorisation, the user is redirected to your callback URL with these
request parameters:
- `"realmId"` -
ID of the user’s QuickBooks realm. Note the camel-cased name.
- `"oauth_verifier"` -
Token verifier string you can use to retrieve an access token.
There are more parameters as well, but these are most relevant.
## Access token
You can now exchange the request token, realm ID, and the verifier from the
callback request parameters for an access token using `get_access_token/3`:
```
{:ok, access_token} = ExQuickBooks.OAuth.get_access_token(request_token, realm_id, verifier)
```
Now you can store the access token and use it in API calls to authenticate on
behalf of the user. The token is an `ExQuickBooks.OAuth.AccessToken`, see its
documentation for more details.
"""
use ExQuickBooks.Endpoint, base_url: ExQuickBooks.oauth_api
alias ExQuickBooks.OAuth.AccessToken
alias ExQuickBooks.OAuth.RequestToken
@doc """
Retrieves a new request token.
The callback URL must be an absolute URL where the user is redirected after
authorising your application.
Returns the request token with a URL where your application should redirect
the user as `request_token.redirect_url`.
"""
@spec get_request_token(String.t) ::
{:ok, RequestToken.t} | {:error, any}
def get_request_token(callback_url) do
result =
request(:post, "get_request_token", nil, nil, params: [
{"oauth_callback", callback_url}
])
|> sign_request
|> send_request
with {:ok, response} <- result,
{:ok, body} <- parse_body(response),
{:ok, token} <- parse_token(body),
do: {:ok, create_request_token(token)}
end
@doc """
Exchanges a request token, realm ID, and token verifier for an access token.
You should have previously received the realm ID and token verifier in the
callback URL params as `"realmId"` and `"oauth_verifier"`.
"""
@spec get_access_token(RequestToken.t, String.t, String.t) ::
{:ok, AccessToken.t} | {:error, any}
def get_access_token(request_token = %RequestToken{}, realm_id, verifier) do
result =
request(:post, "get_access_token", nil, nil, params: [
{"oauth_verifier", verifier}
])
|> sign_request(request_token)
|> send_request
with {:ok, response} <- result,
{:ok, body} <- parse_body(response),
{:ok, token} <- parse_token(body),
do: {:ok, create_access_token(token, realm_id)}
end
defp parse_body(%{body: body}) when is_binary(body) do
{:ok, URI.decode_query(body)}
end
defp parse_body(_) do
{:error, "Response body was malformed."}
end
defp parse_token(%{"oauth_token" => token, "oauth_token_secret" => secret}) do
{:ok, %{token: token, token_secret: secret}}
end
defp parse_token(body = %{"oauth_problem" => _}) do
{:error, body}
end
defp parse_token(_) do
{:error, "Response body did not contain oauth_token or oauth_problem."}
end
defp create_request_token(token) do
values =
token
|> Map.put(:redirect_url, redirect_url(token))
|> Map.to_list
struct!(RequestToken, values)
end
defp create_access_token(token, realm_id) do
values =
token
|> Map.put(:realm_id, realm_id)
|> Map.to_list
struct!(AccessToken, values)
end
defp redirect_url(%{token: token}) do
"https://appcenter.intuit.com/Connect/Begin?oauth_token=#{token}"
end
end
|
lib/exquickbooks/oauth.ex
| 0.916067 | 0.826257 |
oauth.ex
|
starcoder
|
defmodule Nx.Shape do
# Conveniences for manipulating shapes internal to Nx.
@moduledoc false
@doc """
Converts a shape to an algebra document for inspection.
"""
def to_algebra(shape, names, open, close) do
# TODO: Use Enum.zip_with on Elixir v1.12
shape
|> Tuple.to_list()
|> Enum.zip(names)
|> Enum.map(fn
{number, nil} ->
Inspect.Algebra.concat([open, Integer.to_string(number), close])
{number, name} ->
Inspect.Algebra.concat([
open,
Atom.to_string(name),
": ",
Integer.to_string(number),
close
])
end)
|> Inspect.Algebra.concat()
end
@doc """
Validates the names of axes.
"""
def named_axes!(names, shape) do
n_dims = tuple_size(shape)
if names do
n_names = length(names)
if n_names != n_dims do
raise ArgumentError,
"invalid names for tensor of rank #{n_dims}," <>
" when specifying names every dimension must" <>
" have a name or be nil"
else
names
end
else
List.duplicate(nil, n_dims)
end
end
@doc """
Finds the axis for the given name.
"""
def find_name!(names, name) do
Enum.find_index(names, &(&1 == name)) ||
raise(
ArgumentError,
"tensor does not have name #{inspect(name)}. The tensor names are: #{inspect(names)}"
)
end
@doc """
Broadcasts a shape to a new shape.
The dimensions of `shape` is expanded to match the
dimensions of `new_shape` according to the axes
mapping.
## Examples
### Scalars
iex> Nx.Shape.broadcast!({}, {4, 2, 1, 5}, [])
:ok
iex> Nx.Shape.broadcast!({}, {}, [])
:ok
### n-D shapes
iex> Nx.Shape.broadcast!({1}, {2, 3, 4}, [2])
:ok
iex> Nx.Shape.broadcast!({4, 2, 3}, {4, 3, 4, 2, 3}, [2, 3, 4])
:ok
### Custom axes
iex> Nx.Shape.broadcast!({2}, {2, 3}, [0])
:ok
### Error cases
iex> Nx.Shape.broadcast!({4, 2, 2}, {1, 1}, [0, 1, 2])
** (ArgumentError) cannot broadcast tensor of dimensions {4, 2, 2} to {1, 1} with axes [0, 1, 2]
iex> Nx.Shape.broadcast!({2, 2}, {2, 2, 2}, [1, 0])
** (ArgumentError) broadcast axes must be ordered, got 0 after 1
"""
def broadcast!(old_shape, new_shape, axes)
def broadcast!(old_shape, new_shape, axes)
when is_tuple(old_shape) and is_tuple(new_shape) and is_list(axes) do
old_rank = tuple_size(old_shape)
new_rank = tuple_size(new_shape)
if length(axes) != old_rank do
raise ArgumentError,
"expected length of axes (#{length(axes)}) to match rank of shape (#{old_rank})"
end
if old_rank > new_rank or not valid_broadcast?(axes, 0, -1, old_shape, new_shape) do
raise ArgumentError,
"cannot broadcast tensor of dimensions #{inspect(old_shape)} " <>
"to #{inspect(new_shape)} with axes #{inspect(axes)}"
end
:ok
end
defp valid_broadcast?([head | tail], axis, last, old_shape, new_shape) do
if head < last do
raise ArgumentError, "broadcast axes must be ordered, got #{head} after #{last}"
end
old_dim = elem(old_shape, axis)
new_dim = elem(new_shape, head)
(old_dim == 1 or old_dim == new_dim) and
valid_broadcast?(tail, axis + 1, head, old_shape, new_shape)
end
defp valid_broadcast?([], _axis, _head, _old_shape, _new_shape), do: true
@doc """
Broadcasts two shapes to a common shape.
The dimensions of either shape can be expanded to match
the dimension of the other. This differs from a normal
broadcast, where one shapes dimensions remain fixed,
while the other's are expanded to match.
## Examples
### Scalar Shapes
iex> Nx.Shape.binary_broadcast({}, [], {}, [])
{{}, []}
iex> Nx.Shape.binary_broadcast({}, [], {4, 2, 1, 5}, [:batch, nil, :data, nil])
{{4, 2, 1, 5}, [:batch, nil, :data, nil]}
### n-D Shapes
iex> Nx.Shape.binary_broadcast({8, 1, 6, 1}, [:batch, nil, :data, nil], {7, 1, 5}, [:time, :data, nil])
{{8, 7, 6, 5}, [:batch, :time, :data, nil]}
iex> Nx.Shape.binary_broadcast({7, 1, 5}, [:time, :data, nil], {8, 1, 6, 1}, [:batch, nil, :data, nil])
{{8, 7, 6, 5}, [:batch, :time, :data, nil]}
iex> Nx.Shape.binary_broadcast({5, 4}, [nil, nil], {1}, [:data])
{{5, 4}, [nil, :data]}
iex> Nx.Shape.binary_broadcast({3, 1}, [:x, :y], {15, 3, 5}, [:batch, :x, nil])
{{15, 3, 5}, [:batch, :x, :y]}
### Error cases
iex> Nx.Shape.binary_broadcast({4, 2, 5}, [nil, nil, nil], {3, 2, 5}, [:batch, :x, :y])
** (ArgumentError) cannot broadcast tensor of dimensions {4, 2, 5} to {3, 2, 5}
iex> Nx.Shape.binary_broadcast({1, 2, 5}, [:batch, :x, :y], {3, 2, 5}, [:time, :x, :y])
** (ArgumentError) cannot merge names :batch, :time
"""
def binary_broadcast(left_shape, left_names, right_shape, right_names)
def binary_broadcast(shape, names, shape, names), do: {shape, names}
def binary_broadcast(left_shape, left_names, right_shape, right_names)
when is_tuple(left_shape) and is_tuple(right_shape) do
left_rank = tuple_size(left_shape)
right_rank = tuple_size(right_shape)
rank = max(left_rank, right_rank)
left_lower_and_names =
shape_and_names_to_lower_ranked_list(
left_shape,
Enum.reverse(left_names),
left_rank,
rank
)
right_lower_and_names =
shape_and_names_to_lower_ranked_list(
right_shape,
Enum.reverse(right_names),
right_rank,
rank
)
{left_lower, left_names} = Enum.unzip(left_lower_and_names)
{right_lower, right_names} = Enum.unzip(right_lower_and_names)
case binary_broadcast(left_lower, left_names, right_lower, right_names, [], []) do
{:ok, new_shape, new_names} ->
{new_shape, new_names}
:error ->
raise ArgumentError,
"cannot broadcast tensor of dimensions #{inspect(left_shape)} " <>
"to #{inspect(right_shape)}"
end
end
defp binary_broadcast(
[ldim | ldims],
[lname | lnames],
[rdim | rdims],
[rname | rnames],
shape_acc,
names_acc
)
when rdim == 1 or ldim == 1 or rdim == ldim do
names_acc = [merge_names!(lname, rname) | names_acc]
binary_broadcast(ldims, lnames, rdims, rnames, [max(rdim, ldim) | shape_acc], names_acc)
end
defp binary_broadcast([], [], [], [], shape_acc, names_acc),
do: {:ok, List.to_tuple(shape_acc), names_acc}
defp binary_broadcast(_, _, _, _, _, _),
do: :error
defp shape_and_names_to_lower_ranked_list(_tuple, _names, 0, 0),
do: []
defp shape_and_names_to_lower_ranked_list(tuple, [], 0, rank),
do: [{1, nil} | shape_and_names_to_lower_ranked_list(tuple, [], 0, rank - 1)]
defp shape_and_names_to_lower_ranked_list(tuple, [n | names], size, rank),
do: [
{:erlang.element(size, tuple), n}
| shape_and_names_to_lower_ranked_list(tuple, names, size - 1, rank - 1)
]
@doc """
Contracts a shape along the given axes.
It expects the axes to have been normalized.
## Examples
iex> Nx.Shape.contract({4, 1, 2}, [1], [:batch, :x, :y], false)
{{4, 2}, [:batch, :y]}
iex> Nx.Shape.contract({2, 4, 6, 5}, [1, 3], [:batch, :x, :y, :z], false)
{{2, 6}, [:batch, :y]}
iex> Nx.Shape.contract({1, 2, 3}, [], [:batch, :x, :y], false)
{{1, 2, 3}, [:batch, :x, :y]}
iex> Nx.Shape.contract({4, 2, 8}, [2], [:x, :y, :z], false)
{{4, 2}, [:x, :y]}
iex> Nx.Shape.contract({4, 2, 8}, [2], [:x, :y, :z], true)
{{4, 2, 1}, [:x, :y, :z]}
"""
def contract(shape, axes, names, keep_axes) do
{new_shape, new_names} =
Enum.unzip(contract(shape, axes, names, 0, tuple_size(shape), keep_axes))
{List.to_tuple(new_shape), new_names}
end
defp contract(_shape, _axes, _names, n, n, _keep_axes) do
[]
end
defp contract(shape, axes, [name | names], i, n, keep_axes) do
cond do
i not in axes ->
[{elem(shape, i), name} | contract(shape, axes, names, i + 1, n, keep_axes)]
keep_axes ->
[{1, name} | contract(shape, axes, names, i + 1, n, keep_axes)]
true ->
contract(shape, axes, names, i + 1, n, keep_axes)
end
end
@doc """
Transposes a shape according to the given permutation.
## Examples
iex> Nx.Shape.transpose({4, 8, 2, 1}, [1, 0, 3, 2], [:batch, :channels, :height, :width])
{{8, 4, 1, 2}, [:channels, :batch, :width, :height]}
### Error cases
iex> Nx.Shape.transpose({4, 8, 2, 1}, [0, 1, 2], [:batch, nil, nil, nil])
** (ArgumentError) expected length of permutation (3) to match rank of shape (4)
"""
def transpose(shape, permutation, names)
def transpose(shape, permutation, names) when tuple_size(shape) == length(permutation) do
{new_shape, new_names} =
Enum.unzip(Enum.map(permutation, &{elem(shape, &1), Enum.at(names, &1)}))
{List.to_tuple(new_shape), new_names}
end
def transpose(shape, permutation, _names) do
raise ArgumentError,
"expected length of permutation (#{length(permutation)})" <>
" to match rank of shape (#{tuple_size(shape)})"
end
@doc """
Computes the shape for zip_reduce.
In order for the dimensions to be correct, the value of each shape
at the given axes must match. It expects axes to have already been
normalized.
## Examples
iex> Nx.Shape.zip_reduce({1, 2, 3}, [0, 1], [:batch, :x, :y], {3, 1, 2}, [1, 2], [:batch, :x, :y])
{{3, 3}, [:y, :batch]}
iex> Nx.Shape.zip_reduce({1, 2, 3}, [0, 1], [nil, nil, nil], {1, 2, 3}, [1, 2], [nil, nil, nil])
** (ArgumentError) dot/zip expects shapes to be compatible, dimension 0 of left-side (1) does not equal dimension 1 of right-side (2)
iex> Nx.Shape.zip_reduce({2, 2}, [1], [:x, :y], {2, 2}, [0], [:y, :x])
** (ArgumentError) operation would result in duplicate names [:x, :x], please rename your tensors to avoid duplicates
"""
def zip_reduce(s1, axes1, names1, s2, axes2, names2) do
validate_zip_reduce_axes!(s1, axes1, s2, axes2)
{l1, n1} = Enum.unzip(contract(s1, axes1, names1, 0, tuple_size(s1), false))
{l2, n2} = Enum.unzip(contract(s2, axes2, names2, 0, tuple_size(s2), false))
new_names = n1 ++ n2
non_nil_names = Enum.filter(new_names, &(&1 != nil))
if length(non_nil_names) != length(Enum.uniq(non_nil_names)),
do:
raise(
ArgumentError,
"operation would result in duplicate names #{inspect(new_names)}," <>
" please rename your tensors to avoid duplicates"
)
{List.to_tuple(l1 ++ l2), n1 ++ n2}
end
def validate_zip_reduce_axes!(s1, [a1 | axes1], s2, [a2 | axes2]) do
d1 = elem(s1, a1)
d2 = elem(s2, a2)
if d1 == d2 do
validate_zip_reduce_axes!(s1, axes1, s2, axes2)
else
raise ArgumentError,
"dot/zip expects shapes to be compatible," <>
" dimension #{a1} of left-side (#{d1}) does not equal" <>
" dimension #{a2} of right-side (#{d2})"
end
end
def validate_zip_reduce_axes!(_, [], _, []) do
:ok
end
@doc """
Calculates the padding needed for same padding accounting for stride.
Only calculates padding on the edges, not dilations.
## Examples
iex> Nx.Shape.calculate_padding({4, 4}, {2, 2}, [1, 1])
[{0, 1}, {0, 1}]
iex> Nx.Shape.calculate_padding({3, 3}, {2, 2}, [2, 2])
[{0, 1}, {0, 1}]
"""
def calculate_padding(shape, window, strides)
when is_tuple(shape) and is_tuple(window) and is_list(strides) do
validate_window!(shape, window)
validate_strides!(shape, strides)
calculate_padding(strides, shape, window, 0)
end
def calculate_padding([], _shape, _window, _pos), do: []
def calculate_padding([s | strides], shape, window, pos) do
dim = elem(shape, pos)
w = elem(window, pos)
output_dim = ceil(dim / s)
padding_size = max((output_dim - 1) * s + w - dim, 0)
lo = floor(padding_size / 2)
hi = ceil(padding_size / 2)
[{lo, hi} | calculate_padding(strides, shape, window, pos + 1)]
end
@doc """
Calculates the padding needed for same padding not accounting for stride.
"""
def calculate_padding(shape, window) when is_tuple(shape) and is_tuple(window) do
validate_window!(shape, window)
calculate_padding(List.duplicate(1, tuple_size(shape)), shape, window, 0)
end
@doc """
Output shape after a convolution, already padded.
"""
def conv(input_shape, input_names, kernel_shape, _kernel_names, strides, batch_groups, padding) do
filter_shape =
kernel_shape
|> Tuple.delete_at(0)
|> Tuple.delete_at(0)
num_filters = elem(kernel_shape, 0)
batch_size = elem(input_shape, 0)
# Assume padding only pads spatial dims
padding_config = [{0, 0, 0}, {0, 0, 0} | Enum.map(padding, &Tuple.append(&1, 0))]
padded_shape = Nx.Shape.pad(input_shape, padding_config)
old_spatial_dims =
padded_shape
|> Tuple.delete_at(0)
|> Tuple.delete_at(0)
|> Tuple.to_list()
spatial_dims = do_spatial_dims(old_spatial_dims, Tuple.to_list(filter_shape), strides)
# TODO: Is it always the case that it's best to return the input names?
{List.to_tuple([div(batch_size, batch_groups), num_filters | spatial_dims]), input_names}
end
defp do_spatial_dims([], [], []), do: []
defp do_spatial_dims([cur | spatial], [f | filters], [s | strides]),
do: [floor((cur - f) / s) + 1 | do_spatial_dims(spatial, filters, strides)]
@doc """
Output shape after a window operation.
## Examples
iex> Nx.Shape.window({3, 3}, {2, 2}, [1, 1])
{2, 2}
### Error cases
iex> Nx.Shape.window({1, 2, 3}, {2, 1, 1}, [1, 1, 1])
** (ArgumentError) window dimensions would result in empty tensor which is not currently supported in Nx, please open an issue if you'd like this behavior to change
iex> Nx.Shape.window({1, 2, 3}, {2, 1}, [1, 1, 1])
** (ArgumentError) invalid window dimensions, rank of shape (3) does not match rank of window (2)
iex> Nx.Shape.window({1, 2, 3}, {2, 1, 1}, [1, 1])
** (ArgumentError) invalid stride dimensions, rank of shape (3) does not match rank of stride (2)
"""
def window(shape, window, strides)
when is_tuple(shape) and is_tuple(window) and is_list(strides) do
validate_window!(shape, window)
validate_strides!(shape, strides)
List.to_tuple(window(strides, shape, window, 0))
end
defp window([], _shape, _window, _pos), do: []
defp window([s | strides], shape, window, pos) do
dim = elem(shape, pos)
w = elem(window, pos)
new_dim = div(dim - w, s) + 1
if new_dim <= 0 do
raise ArgumentError,
"window dimensions would result in empty tensor" <>
" which is not currently supported in Nx, please" <>
" open an issue if you'd like this behavior to change"
end
[new_dim | window(strides, shape, window, pos + 1)]
end
# Ensures the window is valid given the shape.
# A window is valid as long as it's rank matches
# the rank of the given shape.
defp validate_window!(shape, window)
defp validate_window!(shape, window) when tuple_size(shape) != tuple_size(window),
do:
raise(
ArgumentError,
"invalid window dimensions, rank of shape (#{tuple_size(shape)})" <>
" does not match rank of window (#{tuple_size(window)})"
)
defp validate_window!(_, _), do: :ok
# Ensures the strides are valid given the shape.
# A stride is valid as long as it's rank matches
# the rank of the given shape.
defp validate_strides!(shape, strides)
defp validate_strides!(shape, strides) when tuple_size(shape) != length(strides),
do:
raise(
ArgumentError,
"invalid stride dimensions, rank of shape (#{tuple_size(shape)})" <>
" does not match rank of stride (#{length(strides)})"
)
defp validate_strides!(_, _), do: :ok
@doc """
Output shape after a squeeze operation.
## Examples
iex> Nx.Shape.squeeze({2, 1, 1}, [1, 2], [:batch, :x, :y])
{{2}, [:batch]}
iex> Nx.Shape.squeeze({1, 2}, [0], [:batch, :x])
{{2}, [:x]}
### Error cases
iex> Nx.Shape.squeeze({2, 2, 1}, [1], [:batch, :x, :y])
** (ArgumentError) cannot squeeze dimensions whose sizes are not 1, got 2 for dimension 1
"""
def squeeze(shape, axes, names) do
squeeze(Enum.with_index(Tuple.to_list(shape)), axes, names, [], [])
end
defp squeeze([], _, _, sacc, nacc) do
{List.to_tuple(Enum.reverse(sacc)), Enum.reverse(nacc)}
end
defp squeeze([{s, i} | shape], axes, [n | names], sacc, nacc) do
if i in axes do
if s == 1 do
squeeze(shape, axes, names, sacc, nacc)
else
raise ArgumentError,
"cannot squeeze dimensions whose sizes are not 1, got #{s} for dimension #{i}"
end
else
squeeze(shape, axes, names, [s | sacc], [n | nacc])
end
end
@doc """
Output shape after a padding operation.
## Examples
iex> Nx.Shape.pad({3, 2, 4}, [{0, 1, 0}, {1, 2, 0}, {1, 1, 0}])
{4, 5, 6}
iex> Nx.Shape.pad({}, [])
{}
iex> Nx.Shape.pad({2, 2}, [{1, 1, 0}, {0, 0, 0}])
{4, 2}
iex> Nx.Shape.pad({2, 3}, [{0, 0, 1}, {0, 0, 1}])
{3, 5}
### Error cases
iex> Nx.Shape.pad({2, 2, 3}, [{0, 1, 0}, {1, 2, 0}])
** (ArgumentError) invalid padding configuration, rank of padding configuration and shape must match
"""
def pad(shape, padding_config) do
shape
|> Tuple.to_list()
|> padded_dims(padding_config, [])
|> Enum.reverse()
|> List.to_tuple()
end
defp padded_dims([], [], acc), do: acc
defp padded_dims([_ | _], [], _acc),
do:
raise(
ArgumentError,
"invalid padding configuration, rank of padding configuration" <>
" and shape must match"
)
defp padded_dims([], [_ | _], _acc),
do:
raise(
ArgumentError,
"invalid padding configuration, rank of padding configuration" <>
" and shape must match"
)
defp padded_dims([s | shape], [{edge_low, edge_high, interior} | config], acc) do
interior_padding_factor = (s - 1) * interior
padded_dims(shape, config, [s + interior_padding_factor + edge_low + edge_high | acc])
end
## Axes helpers
@doc """
Normalize the axis to the given shape.
## Examples
iex> Nx.Shape.normalize_axis({4, 2, 3}, -1, [:batch, :x, :y])
2
iex> Nx.Shape.normalize_axis({4, 2, 1, 4}, -2, [:batch, :x, :y, :z])
2
iex> Nx.Shape.normalize_axis({4, 2, 1, 4}, 1, [:batch, :x, :y, :z])
1
iex> Nx.Shape.normalize_axis({4, 2, 1, 4}, :z, [:batch, :x, :y, :z])
3
### Error cases
iex> Nx.Shape.normalize_axis({4, 2, 5}, -4, [:batch, :x, :y])
** (ArgumentError) given axis (-4) invalid for shape with rank 3
iex> Nx.Shape.normalize_axis({4, 2, 5}, 3, [:batch, :x, :y])
** (ArgumentError) given axis (3) invalid for shape with rank 3
iex> Nx.Shape.normalize_axis({4, 2, 5}, :z, [:batch, :x, :y])
** (ArgumentError) key :z not found in tensor with names [:batch, :x, :y]
iex> Nx.Shape.normalize_axis({4, 2, 5}, nil, [:batch, nil, nil])
** (ArgumentError) axis name cannot be nil
"""
def normalize_axis(shape, axis, names)
def normalize_axis(shape, axis, _names) when axis < 0 and abs(axis) <= tuple_size(shape),
do: tuple_size(shape) + axis
def normalize_axis(shape, axis, _names) when axis >= 0 and axis < tuple_size(shape),
do: axis
def normalize_axis(_shape, nil, _names),
do: raise(ArgumentError, "axis name cannot be nil")
def normalize_axis(_shape, axis, names) when is_atom(axis) do
if axis in names do
Enum.with_index(names)[axis]
else
raise ArgumentError, "key #{inspect(axis)} not found in tensor with names #{inspect(names)}"
end
end
def normalize_axis(shape, axis, _names) do
raise ArgumentError,
"given axis (#{inspect(axis)}) invalid for shape with rank #{tuple_size(shape)}"
end
@doc """
Normalize a list of unique axis.
See `normalize_axis/1`.
## Examples
iex> Nx.Shape.normalize_axes({2, 3, 4}, [-1, 0], [:batch, nil])
[2, 0]
iex> Nx.Shape.normalize_axes({2, 3, 4}, [:batch, 1], [:batch, :x])
[0, 1]
### Error Cases
iex> Nx.Shape.normalize_axes({2, 3, 4}, [1, 1], [nil, nil, nil])
** (ArgumentError) axes [1, 1] must be unique integers between 0 and 2
"""
def normalize_axes(shape, axes, names) when is_list(axes) do
normalized = Enum.map(axes, &normalize_axis(shape, &1, names))
if length(Enum.uniq(normalized)) != length(axes) do
raise ArgumentError,
"axes #{inspect(axes)} must be unique integers between 0 and #{tuple_size(shape) - 1}"
end
normalized
end
@doc """
Returns the axes for transposition.
## Examples
iex> Nx.Shape.transpose_axes({})
[]
iex> Nx.Shape.transpose_axes({3, 2, 1})
[2, 1, 0]
"""
def transpose_axes(shape) do
rank = tuple_size(shape)
count_down(rank, rank - 1)
end
@doc """
Compute the broadcast axes based on the shape rank.
It doesn't validate if the remaining dimensions are
actually valid.
## Examples
iex> Nx.Shape.broadcast_axes({2, 2, 2}, {2, 2, 2, 2})
[1, 2, 3]
iex> Nx.Shape.broadcast_axes({2, 2, 2}, {2, 2, 2, 2, 2})
[2, 3, 4]
"""
def broadcast_axes(shape, new_shape) when tuple_size(shape) > tuple_size(new_shape) do
raise ArgumentError,
"cannot broadcast tensor of dimensions #{inspect(shape)} " <>
"to #{inspect(new_shape)}"
end
def broadcast_axes(shape, new_shape) do
min_size = tuple_size(shape)
max_size = tuple_size(new_shape)
count_up(min_size, max_size - min_size)
end
@doc """
Returns the axes for squeezing.
## Examples
iex> Nx.Shape.squeeze_axes({2, 1, 1})
[1, 2]
iex> Nx.Shape.squeeze_axes({1, 2, 1, 3, 2, 1})
[0, 2, 5]
"""
def squeeze_axes(shape) do
for {1, i} <- Enum.with_index(Tuple.to_list(shape)), do: i
end
@doc """
Returns the shape after a slice.
## Examples
iex> Nx.Shape.slice({2, 15, 30}, [1, 4, 10], [1, 1, 10], [1, 1, 3])
{1, 1, 4}
### Error cases
iex> Nx.Shape.slice({2, 15, 30}, [1, 4, 10], [2, 1, 1], [1, 1, 1])
** (ArgumentError) start index + length at axis 0 must be less than axis size of 2, got: 3
"""
def slice(shape, start_indices, lengths, strides) do
rank = tuple_size(shape)
if length(strides) != rank do
raise ArgumentError, "invalid strides rank for shape of rank #{rank}"
end
if length(start_indices) != rank do
raise ArgumentError, "invalid start indices rank for shape of rank #{rank}"
end
if length(lengths) != rank do
raise ArgumentError, "invalid limit indices rank for shape of rank #{rank}"
end
shape
|> slice(0, start_indices, lengths, strides)
|> List.to_tuple()
end
defp slice(shape, pos, [i | start_indices], [len | lengths], [s | strides]) do
dim = elem(shape, pos)
if not is_integer(i) or i < 0 do
raise ArgumentError,
"start index at axis #{pos} must be greater than or equal to 0, got: #{inspect(i)}"
end
if not is_integer(len) or len < 1 do
raise ArgumentError,
"length at axis #{pos} must be greater than or equal to 1, got: #{inspect(len)}"
end
if not is_integer(s) or s < 1 do
raise ArgumentError,
"stride at axis #{pos} must be greater than or equal to 1, got: #{inspect(s)}"
end
if i >= dim do
raise ArgumentError,
"start index at axis #{pos} must be less than axis size of #{dim}, got: #{i}"
end
if i + len > dim do
raise ArgumentError,
"start index + length at axis #{pos} must be less than axis size of #{dim}, " <>
"got: #{i + len}"
end
[Kernel.ceil(len / s) | slice(shape, pos + 1, start_indices, lengths, strides)]
end
defp slice(_shape, _pos, [], [], []), do: []
@doc """
Returns the shape and names after a concat.
## Examples
iex> Nx.Shape.concatenate([{2, 3, 2}, {1, 3, 2}, {4, 3, 2}], [[:x, :y, :z], [:x, :y, :z], [:x, :y, :z]], 0)
{{7, 3, 2}, [:x, :y, :z]}
"""
def concatenate(shapes, names, axis) do
names = validate_concat_names!(names)
{concat_dims(shapes, axis), names}
end
defp concat_dims([s1 | shapes], axis) do
s1 = Tuple.to_list(s1)
shapes
|> Enum.reduce(s1, &concat_shapes(Tuple.to_list(&1), &2, axis))
|> List.to_tuple()
end
defp concat_shapes(shape1, shape2, axis) do
# TODO: Use Enum.with_index on Elixir v1.12
shape1
|> Enum.zip(shape2)
|> Enum.with_index()
|> Enum.map(fn {{s1, s2}, i} ->
cond do
i == axis ->
s1 + s2
s1 == s2 ->
s1
true ->
raise ArgumentError,
"non-concat dims must be equal got" <>
" #{inspect(s1)} and #{inspect(s2)}" <>
" while concatenating on axis #{axis}"
end
end)
end
@doc """
Returns the shape and names after a Cholesky decomposition.
## Examples
iex> Nx.Shape.cholesky({4, 4}, [:x, :y])
{{4, 4}, [:x, :y]}
## Error Cases
iex> Nx.Shape.cholesky({3, 2}, [:x, :y])
** (ArgumentError) tensor must be a square matrix, got shape: {3, 2}
iex> Nx.Shape.cholesky({3, 3, 3}, [:x, :y, :z])
** (ArgumentError) tensor must have rank 2, got rank 3 with shape {3, 3, 3}
"""
def cholesky({n, n}, names), do: {{n, n}, names}
def cholesky({m, n}, _names),
do: raise(ArgumentError, "tensor must be a square matrix, got shape: #{inspect({m, n})}")
def cholesky(shape, _names),
do:
raise(
ArgumentError,
"tensor must have rank 2, got rank #{tuple_size(shape)} with shape #{inspect(shape)}"
)
def qr({m, n}, opts) when m >= n do
mode = opts[:mode]
case mode do
:reduced ->
{{m, n}, {n, n}}
_ ->
{{m, m}, {m, n}}
end
end
def qr({m, n}, _opts),
do:
raise(
ArgumentError,
"tensor must have at least as many rows as columns, got shape: #{inspect({m, n})}"
)
def qr(shape, _opts),
do:
raise(
ArgumentError,
"tensor must have rank 2, got rank #{tuple_size(shape)} with shape #{inspect(shape)}"
)
def svd({m, n}) do
{{m, m}, {n}, {n, n}}
end
def svd(shape),
do:
raise(
ArgumentError,
"tensor must have rank 2, got rank #{tuple_size(shape)} with shape #{inspect(shape)}"
)
def lu({n, n}) do
{{n, n}, {n, n}, {n, n}}
end
def lu(shape),
do:
raise(
ArgumentError,
"tensor must have as many rows as columns, got shape: #{inspect(shape)}"
)
defp validate_concat_names!(names) do
:ok =
names
|> Enum.zip()
|> Enum.each(fn tuple ->
[n1 | rest] = Tuple.to_list(tuple)
Enum.reduce(rest, n1, &merge_names!(&1, &2))
end)
hd(names)
end
## Helpers
defp count_up(0, _n), do: []
defp count_up(i, n), do: [n | count_up(i - 1, n + 1)]
defp count_down(0, _n), do: []
defp count_down(i, n), do: [n | count_down(i - 1, n - 1)]
defp merge_names!(nil, nil), do: nil
defp merge_names!(nil, name) when is_atom(name), do: name
defp merge_names!(name, nil) when is_atom(name), do: name
defp merge_names!(name, name) when is_atom(name), do: name
defp merge_names!(lhs, rhs),
do: raise(ArgumentError, "cannot merge names #{inspect(lhs)}, #{inspect(rhs)}")
end
|
lib/nx/shape.ex
| 0.71113 | 0.677227 |
shape.ex
|
starcoder
|
defmodule Const do
@moduledoc """
A simple helper to define constants. Constants are defined as functions of the module and couple of helper functions are added.
```elixir
defmodule Status do
use Const, [:queued, :processed, :sent]
end
```
is equivalent to writing
```elixir
defmodule Status do
def queued, do: 0
def processed, do: 1
def sent, do: 2
def all, do: [queued: 0, processed: 1, sent: 2]
def by_value(val) do
# returns the atom from the integer value. In case of duplicated values, the fist
# associated atom is returned
end
end
```
Note that the parameter passed to the use macro can also be a keyword list, where the values are explicitly given. You can even give only some
of the values, in which case the behavior will be like in C, that is the unspecified value will be counted starting from the last explicitly given one
```elixir
defmodule Status do
use Const, [:queued, :processed, {:sent, 100}, :delivered, :received]
# the final values will be: [queued: 0, processed: 1, sent: 100, delivered: 101, received: 102]
end
```
Duplicated values are allowed, but in that case the `by_value` function will not be really useful, at least for the duplicated values, since only the first atom will be returned.
"""
defmacro __using__(constants) do
const = constants |> format()
[define_all(const), define_by_value(const) | define_constants(const)]
end
defp define_constants(const) do
Enum.map(const, fn({constant, val}) ->
quote do
def unquote(constant)(), do: unquote(val)
end
end)
end
defp define_all(const) do
quote do
def all(), do: unquote(const)
end
end
defp define_by_value(const) do
quote do
def by_value(val) do
case Enum.find(unquote(const), fn({k, v}) -> v == val end) do
{k, v} -> k
nil -> nil
end
end
end
end
defp format(constants) do
{res, _} = Enum.reduce(constants, {[], 0}, fn
(c = {_k, v}, {res, _idx}) ->
{[c | res], v + 1}
(k, {res, idx}) ->
{[{k, idx} | res], idx + 1}
end)
Enum.reverse(res)
end
end
|
lib/const.ex
| 0.819063 | 0.98764 |
const.ex
|
starcoder
|
defmodule Is.Validators.InRange do
@moduledoc """
In range validator (inclusive).
## Examples
iex> Is.validate(10, in_range: [1, 10])
[]
iex> Is.validate(10, in_range: [min: 1])
[]
iex> Is.validate(10, in_range: [max: 10])
[]
iex> Is.validate(10, in_range: [min: 1, max: 10])
[]
iex> Is.validate(11, in_range: [1, 10])
[{:error, [], "must be between 1 and 10 inclusive"}]
iex> Is.validate(-1, in_range: [1, 10])
[{:error, [], "must be between 1 and 10 inclusive"}]
iex> Is.validate(2, in_range: [min: 3])
[{:error, [], "must at least be 3"}]
iex> Is.validate(4, in_range: [max: 3])
[{:error, [], "must at most be 3"}]
iex> Is.validate(2, in_range: [min: 3, max: 4])
[{:error, [], "must be between 3 and 4 inclusive"}]
iex> Is.validate(4, in_range: [max: 3])
[{:error, [], "must at most be 3"}]
iex> Is.validate("a", in_range: [1, 10])
[{:error, [], "in_range: value is not a number or options are invalid"}]
"""
def validate(data, [min, max]) when is_number(min) and is_number(max) do
validate(data, [min: min, max: max])
end
def validate(data, options) when is_number(data) and is_list(options) do
min = Keyword.get(options, :min)
max = Keyword.get(options, :max)
validate_with_range(data, [min, max])
end
def validate(_data, _range) do
{:error, "in_range: value is not a number or options are invalid"}
end
defp validate_with_range(size, [min, max]) do
cond do
is_number(min) and is_number(max) ->
if size >= min and size <= max do
:ok
else
{:error, "must be between #{min} and #{max} inclusive"}
end
is_number(min) and not is_number(max) ->
if size >= min do
:ok
else
{:error, "must at least be #{min}"}
end
is_number(max) and not is_number(min) ->
if size <= max do
:ok
else
{:error, "must at most be #{max}"}
end
true -> {:error, "length: value is not a binary or options are not valid"}
end
end
end
|
lib/is/validators/in_range.ex
| 0.727104 | 0.684897 |
in_range.ex
|
starcoder
|
defmodule Plug.AMQP do
@moduledoc """
Adapter interface to the [AMQP RPC pattern](https://www.rabbitmq.com/tutorials/tutorial-six-elixir.html).
`Plug.AMQP` provides an [AMQP](https://www.amqp.org) interface to `Plug`.
When using `Plug.AMQP` you can write servers that answer requests sent through
an *AMQP* broker, like [RabbitMQ](https://www.rabbitmq.com). The request
response pattern is explained in detail [here](https://www.rabbitmq.com/tutorials/tutorial-six-elixir.html).
## Usage
To use `Plug.AMQP`, add it to your supervision tree. Assuming that your Plug
module is named `MyPlug`:
children = [
{Plug.AMQP, connection_options: "amqp://my-rabbit:5672", plug: MyPlug}
]
Supervisor.start_link(children, strategy: :one_for_one)
Check `t:option/0` and `t:Plug.AMQP.ConsumerProducer.option/0` for more
options.
## Examples
The following example is taken from the
[RabbitMQ RPC Tutorial](https://www.rabbitmq.com/tutorials/tutorial-six-elixir.html)
but using `Plug.AMQP`.
```elixir
#{File.read!("examples/fibonacci.exs")}
```
"""
use Supervisor
alias Plug.AMQP.{Conn, ConsumerProducer}
@typedoc """
A `Plug.AMQP` configuration option.
`Plug.AMQP` supports any of `t:Plug.AMQP.ConsumerProducer.option/0`. Also, the `plug`
option must be used to set the main plug of a server.
"""
@type option() ::
{:plug, module() | {module() | keyword()}}
| ConsumerProducer.option()
@typedoc "A list of `t:option/0`s."
@type options() :: [option() | {atom(), any()}]
@doc false
@spec start_link(keyword) :: Supervisor.on_start()
def start_link(opts) do
with {:ok, supervisor} <- Supervisor.start_link(__MODULE__, opts, []),
:ok <- start_children(supervisor, opts) do
{:ok, supervisor}
end
end
@impl true
def init(_opts) do
Supervisor.init([], strategy: :one_for_one)
end
@doc false
@spec handle(
GenServer.server(),
ConsumerProducer.payload(),
ConsumerProducer.headers(),
options()
) :: :ok
def handle(endpoint, payload, headers, opts) do
start = System.monotonic_time()
{plug, plug_opts} = fetch_plug!(opts)
conn = Conn.conn(endpoint, payload, headers)
:telemetry.execute(
[:plug_adapter, :call, :start],
%{system_time: System.system_time()},
%{adapter: :plug_amqp, conn: conn, plug: plug}
)
try do
conn
|> plug.call(plug_opts)
|> maybe_send_resp()
catch
kind, reason ->
:telemetry.execute(
[:plug_adapter, :call, :exception],
%{duration: System.monotonic_time() - start},
%{
adapter: :plug_amqp,
conn: conn,
plug: plug,
kind: kind,
reason: reason,
stacktrace: __STACKTRACE__
}
)
exit_on_error(kind, reason, __STACKTRACE__, {plug, :call, [conn, opts]})
else
%{adapter: {Plug.AMQP.Conn, req}} = conn ->
:telemetry.execute(
[:plug_adapter, :call, :stop],
%{duration: System.monotonic_time() - start},
%{adapter: :plug_amqp, conn: conn, plug: plug}
)
{:ok, req, {plug, opts}}
end
:ok
end
@spec start_children(Supervisor.supervisor(), keyword()) :: :ok | {:error, any()}
defp start_children(supervisor, opts) do
with {:ok, task_supervisor} <- Supervisor.start_child(supervisor, Task.Supervisor),
opts <-
opts
|> Keyword.put_new(:request_handler_supervisor, task_supervisor)
|> Keyword.put(:request_handler, {__MODULE__, :handle, opts}),
{:ok, _endpoint} <- Supervisor.start_child(supervisor, {ConsumerProducer, opts}) do
:ok
end
end
@spec fetch_plug!(options()) :: {module(), keyword()} | no_return()
defp fetch_plug!(opts) do
case Keyword.fetch!(opts, :plug) do
{module, opts} -> {module, opts}
module -> {module, []}
end
end
defp exit_on_error(
:error,
%Plug.Conn.WrapperError{kind: kind, reason: reason, stack: stack},
_stack,
call
) do
exit_on_error(kind, reason, stack, call)
end
defp exit_on_error(:error, value, stack, call) do
exception = Exception.normalize(:error, value, stack)
exit({{exception, stack}, call})
end
defp exit_on_error(:throw, value, stack, call) do
exit({{{:nocatch, value}, stack}, call})
end
defp exit_on_error(:exit, value, _stack, call) do
exit({value, call})
end
defp maybe_send_resp(conn = %Plug.Conn{state: :set}), do: Plug.Conn.send_resp(conn)
defp maybe_send_resp(conn = %Plug.Conn{}), do: conn
end
|
lib/plug/amqp.ex
| 0.830233 | 0.888855 |
amqp.ex
|
starcoder
|
defmodule MealTracker.FoodItem do
@moduledoc """
Represents a food item in a meal log.
## Text Formats
This module exists to translate food items between the structure representation and textual
representations. In the patterns below "Food name" represents the name of the food, "nx"
represents the number `n` of items of that food, and "n unit" represents the number `n` of the
given `unit` of the food such as "355 milliliter Pepsi".
* `Food name` (equivalent to `1x Food name`)
* `nx Food name`
* `n unit Food name`
"""
defstruct [:name, quantity: 1, unit: :item]
@item_quantity_pattern ~r/^(?<quantity>\d+(\.\d+)?)x (?<name>.+)$/
@unit_quantity_pattern ~r/^(?<quantity>\d+(\.\d+)?) (?<unit>\w+) (of )?(?<name>.+)$/
@doc """
Parses a food item entry into the `MealTracker.FoodItem` struct.
"""
@spec parse(String.t() | list(String.t())) :: __MODULE__.t()
def parse(text)
def parse(list) when is_list(list) do
list
|> Enum.join(" ")
|> parse()
end
def parse(text) when is_binary(text) do
cond do
captures = Regex.named_captures(@item_quantity_pattern, text) -> parse_item_quantity(captures)
captures = Regex.named_captures(@unit_quantity_pattern, text) -> parse_unit_quantity(captures)
true -> %__MODULE__{name: text, quantity: 1, unit: :item}
end
end
@doc """
Converts a food item to its string representation.
"""
@spec to_string(__MODULE__.t()) :: String.t()
def to_string(%__MODULE__{quantity: n, unit: :item, name: name}), do: "#{n}x #{name}"
def to_string(%__MODULE__{quantity: n, unit: unit, name: name}), do: "#{n} #{unit} #{name}"
defp parse_float({quantity, ""}, _), do: quantity
defp parse_integer({quantity, ""}, _), do: quantity
defp parse_integer(_, text), do: parse_float(Float.parse(text), text)
defp parse_item_quantity(captures) do
%__MODULE__{name: captures["name"], quantity: parse_number(captures["quantity"])}
end
defp parse_number(text), do: parse_integer(Integer.parse(text), text)
defp parse_unit_quantity(captures) do
%__MODULE__{
name: captures["name"],
quantity: parse_number(captures["quantity"]),
unit: captures["unit"] |> stem() |> String.to_atom()
}
end
defp stem(unit) do
cond do
String.ends_with?(unit, "s") -> String.slice(unit, 0..-2)
true -> unit
end
end
end
|
lib/meal_tracker/food_item.ex
| 0.817756 | 0.575051 |
food_item.ex
|
starcoder
|
defmodule Mnemonix.Features.Bump do
@name Inspect.inspect(__MODULE__, %Inspect.Opts{})
@moduledoc """
Functions to increment/decrement integer values within a store.
Using this feature will define all of its Mnemonix client API functions on your module.
Refer to `Mnemonix.Builder` for documentation on options you can use when doing so.
"""
use Mnemonix.Behaviour
use Mnemonix.Singleton.Behaviour
@typedoc """
The target of a bump operation.
"""
@type value :: integer
@typedoc """
The amount of a bump operation.
"""
@type amount :: integer
@typedoc """
The return value of a bump operation.
"""
@type result :: value | {:error, :no_integer}
@callback bump(Mnemonix.store(), Mnemonix.key(), amount) :: result | no_return
@doc """
Adds `amount` to the value of the integer entry under `key` in `store`.
If an entry for `key` does not exist,
it is set to `0` before performing the operation.
If the `amount` or the value under `key` is not an integer,
returns `{:error, :no_integer}`, otherwise returns `:ok`,
and the value will remain unchanged.
## Examples
iex> store = Mnemonix.new(%{a: 1})
iex> #{@name}.bump(store, :a, 1)
2
iex> store = Mnemonix.new
iex> #{@name}.bump(store, :b, 2)
2
iex> store = Mnemonix.new(%{c: "foo"})
iex> #{@name}.bump(store, :c, 3)
{:error, :no_integer}
iex> Mnemonix.get(store, :c)
"foo"
iex> store = Mnemonix.new
iex> #{@name}.bump(store, :c, "foo")
{:error, :no_integer}
iex> Mnemonix.get(store, :d)
nil
"""
@spec bump(Mnemonix.store(), Mnemonix.key(), amount) :: result | no_return
def bump(store, key, amount) do
case GenServer.call(store, {:bump, key, amount}) do
{:ok, value} -> value
{:warn, message, value} -> with :ok <- IO.warn(message), do: value
{:raise, type, args} -> raise type, args
end
end
@callback bump!(Mnemonix.store(), Mnemonix.key(), amount) :: Mnemonix.store() | no_return
@doc """
Adds `amount` to the value of the integer entry under `key` in `store`.
If an entry for `key` does not exist,
it is set to `0` before performing the operation.
If the `amount` or the value under `key` is not an integer, raises an `ArithmeticError`,
and the value will remain unchanged. Otherwise, returns the `store`.
## Examples
iex> store = Mnemonix.new(%{a: 1})
iex> #{@name}.bump!(store, :a, 2)
iex> Mnemonix.get(store, :a)
3
iex> store = Mnemonix.new
iex> #{@name}.bump!(store, :b, 2)
iex> Mnemonix.get(store, :b)
2
iex> store = Mnemonix.new(%{c: "foo"})
iex> #{@name}.bump!(store, :c, 2)
** (ArithmeticError) bad argument in arithmetic expression
iex> store = Mnemonix.new
iex> #{@name}.bump!(store, :d, "foo")
** (ArithmeticError) bad argument in arithmetic expression
"""
@spec bump!(Mnemonix.store(), Mnemonix.key(), amount) :: :ok | no_return
def bump!(store, key, amount) do
case GenServer.call(store, {:bump!, key, amount}) do
:ok -> store
{:warn, message} -> with :ok <- IO.warn(message), do: store
{:raise, type, args} -> raise type, args
end
end
@callback increment(Mnemonix.store(), Mnemonix.key()) :: Mnemonix.store() | no_return
@doc """
Increments the value of the integer entry under `key` in `store` by `1`.
If an entry for `key` does not exist,
it is set to `0` before performing the operation.
If the value under `key` is not an integer,
the store remains unchanged and `{:error, :no_integer}` is returned.
## Examples
iex> store = Mnemonix.new(%{a: 1})
iex> #{@name}.increment(store, :a)
2
iex> store = Mnemonix.new
iex> #{@name}.increment(store, :b)
1
iex> store = Mnemonix.new(%{c: "foo"})
iex> #{@name}.increment(store, :c)
{:error, :no_integer}
iex> Mnemonix.get(store, :c)
"foo"
"""
@spec increment(Mnemonix.store(), Mnemonix.key()) :: Mnemonix.store() | no_return
def increment(store, key) do
case GenServer.call(store, {:increment, key}) do
{:ok, value} -> value
{:warn, message, value} -> with :ok <- IO.warn(message), do: value
{:raise, type, args} -> raise type, args
end
end
@callback increment(Mnemonix.store(), Mnemonix.key(), amount) ::
Mnemonix.store() | no_return
@doc """
Increments the value of the integer entry under `key` in `store` by `amount`.
If an entry for `key` does not exist,
it is set to `0` before performing the operation.
If the `amount` or the value under `key` is not an integer,
the store remains unchanged and `{:error, :no_integer}` is returned.
## Examples
iex> store = Mnemonix.new(%{a: 1})
iex> #{@name}.increment(store, :a, 2)
3
iex> store = Mnemonix.new
iex> #{@name}.increment(store, :b, 2)
2
iex> store = Mnemonix.new(%{c: "foo"})
iex> #{@name}.increment(store, :c, 2)
{:error, :no_integer}
iex> Mnemonix.get(store, :c)
"foo"
iex> store = Mnemonix.new
iex> #{@name}.increment(store, :d, "foo")
{:error, :no_integer}
iex> Mnemonix.get(store, :d)
nil
"""
@spec increment(Mnemonix.store(), Mnemonix.key(), amount) ::
Mnemonix.store() | no_return
def increment(store, key, amount) do
case GenServer.call(store, {:increment, key, amount}) do
{:ok, value} -> value
{:warn, message, value} -> with :ok <- IO.warn(message), do: value
{:raise, type, args} -> raise type, args
end
end
@callback decrement(Mnemonix.store(), Mnemonix.key()) :: Mnemonix.store() | no_return
@doc """
Decrements the value of the integer entry under `key` in `store` by `1`.
If an entry for `key` does not exist,
it is set to `0` before performing the operation.
If the value under `key` is not an integer,
the store remains unchanged and `{:error, :no_integer}` is returned.
## Examples
iex> store = Mnemonix.new(%{a: 1})
iex> #{@name}.decrement(store, :a)
0
iex> store = Mnemonix.new
iex> #{@name}.decrement(store, :b)
-1
iex> store = Mnemonix.new(%{c: "foo"})
iex> #{@name}.decrement(store, :c)
{:error, :no_integer}
iex> Mnemonix.get(store, :c)
"foo"
"""
@spec decrement(Mnemonix.store(), Mnemonix.key()) :: Mnemonix.store() | no_return
def decrement(store, key) do
case GenServer.call(store, {:decrement, key}) do
{:ok, value} -> value
{:warn, message, value} -> with :ok <- IO.warn(message), do: value
{:raise, type, args} -> raise type, args
end
end
@callback decrement(Mnemonix.store(), Mnemonix.key(), amount) ::
Mnemonix.store() | no_return
@doc """
Decrements the value of the integer entry under `key` in `store` by `amount`.
If an entry for `key` does not exist,
it is set to `0` before performing the operation.
If `amount` or the value under `key` is not an integer,
the store remains unchanged and `{:error, :no_integer}` is returned.
## Examples
iex> store = Mnemonix.new(%{a: 2})
iex> #{@name}.decrement(store, :a, 2)
0
iex> store = Mnemonix.new
iex> #{@name}.decrement(store, :b, 2)
-2
iex> store = Mnemonix.new(%{c: "foo"})
iex> #{@name}.decrement(store, :c, 2)
{:error, :no_integer}
iex> Mnemonix.get(store, :c)
"foo"
iex> store = Mnemonix.new
iex> #{@name}.decrement(store, :d, "foo")
{:error, :no_integer}
iex> Mnemonix.get(store, :d)
nil
"""
@spec decrement(Mnemonix.store(), Mnemonix.key(), amount) ::
Mnemonix.store() | no_return
def decrement(store, key, amount) do
case GenServer.call(store, {:decrement, key, amount}) do
{:ok, value} -> value
{:warn, message, value} -> with :ok <- IO.warn(message), do: value
{:raise, type, args} -> raise type, args
end
end
end
|
lib/mnemonix/features/bump.ex
| 0.906975 | 0.410874 |
bump.ex
|
starcoder
|
defmodule Sanbase.KafkaExporter do
@moduledoc ~s"""
Module for persisting any data to Kafka.
The module exposes one function that should be used - `persist/1`.
This functions adds the data to an internal buffer that is flushed
every `kafka_flush_timeout` seconds or when the buffer is big enough.
The exporter cannot send data more than once every 1 second so the
GenServer cannot die too often and crash its supervisor
"""
use GenServer
require Logger
require Sanbase.Utils.Config, as: Config
@producer Config.get(:producer)
@type data :: {String.t(), String.t()}
@type result :: :ok | {:error, String.t()}
@typedoc ~s"""
Options that describe to which kafka topic and how often to send the batches.
These options do not describe the connection
"""
@type options :: [
{:name, atom()}
| {:topic, String.t()}
| {:kafka_flush_timeout, non_neg_integer()}
| {:buffering_max_messages, non_neg_integer()}
| {:can_send_after_interval, non_neg_integer()}
]
@spec start_link(options) :: GenServer.on_start()
def start_link(opts) do
name = Keyword.get(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, opts, name: name)
end
@spec init(options) :: {:ok, state} when state: map()
def init(opts) do
kafka_flush_timeout = Keyword.get(opts, :kafka_flush_timeout, 30_000)
buffering_max_messages = Keyword.get(opts, :buffering_max_messages, 1000)
can_send_after_interval = Keyword.get(opts, :can_send_after_interval, 1000)
Process.send_after(self(), :flush, kafka_flush_timeout)
{:ok,
%{
topic: Keyword.fetch!(opts, :topic),
data: [],
size: 0,
kafka_flush_timeout: kafka_flush_timeout,
buffering_max_messages: buffering_max_messages,
can_send_after_interval: can_send_after_interval,
can_send_after: DateTime.utc_now() |> DateTime.add(can_send_after_interval, :millisecond)
}}
end
@doc ~s"""
Asynchronously add data to be exported to the buffer.
It will be sent no longer than `kafka_flush_timeout` seconds later. The data
is pushed to an internal buffer that is then send at once to Kafka.
"""
@spec persist(data | [data], pid() | atom()) :: :ok
def persist(data, exporter \\ __MODULE__) do
GenServer.cast(exporter, {:persist, data})
end
@spec persist_sync(data | [data], pid() | atom()) :: result
def persist_sync(data, exporter \\ __MODULE__) do
GenServer.call(exporter, {:persist, data})
end
def flush(exporter \\ __MODULE__) do
GenServer.call(exporter, :flush)
end
@doc ~s"""
Send all available data in the buffers before shutting down.
The data recorder should be started before the Endpoint in the supervison tree.
This means that when shutting down it will be stopped after the Endpoint so
all data will be stored in Kafka and no more data is expected.
"""
def terminate(_reason, state) do
Logger.info(
"Terminating the KafkaExporter. Sending #{length(state.data)} events to kafka topic: #{
state.topic
}"
)
send_data(state.data, state)
:ok
end
@spec handle_call({:persist, data | [data]}, any(), state) :: {:reply, result, state}
when state: map()
def handle_call({:persist, data}, _from, state) do
data = List.wrap(data)
send_data_result =
(data ++ state.data)
|> send_data_immediately(%{state | size: state.size + length(data)})
{:reply, send_data_result, %{state | data: [], size: 0}}
end
def handle_call(:flush, _from, state) do
send_data_immediately(state.data, state)
{:reply, :ok, %{state | data: [], size: 0}}
end
@spec handle_cast({:persist, data | [data]}, state) :: {:noreply, state}
when state: map()
def handle_cast({:persist, data}, state) do
data = List.wrap(data)
new_messages_length = length(data)
case state.size + new_messages_length >= state.buffering_max_messages do
true ->
:ok = send_data(data ++ state.data, %{state | size: state.size + new_messages_length})
{:noreply,
%{
state
| data: [],
size: 0,
can_send_after:
DateTime.utc_now() |> DateTime.add(state.can_send_after_interval, :millisecond)
}}
false ->
{:noreply, %{state | data: data ++ state.data, size: state.size + new_messages_length}}
end
end
def handle_info(:flush, state) do
send_data(state.data, state)
Process.send_after(self(), :flush, state.kafka_flush_timeout)
{:noreply, %{state | data: [], size: 0}}
end
defp send_data([], _), do: :ok
defp send_data(nil, _), do: :ok
defp send_data(data, %{topic: topic, can_send_after: can_send_after, size: size}) do
Sanbase.DateTimeUtils.sleep_until(can_send_after)
Logger.info("Sending #{size} events to Kafka topic: #{topic}")
@producer.send_data(topic, data)
end
defp send_data_immediately([], _), do: :ok
defp send_data_immediately(nil, _), do: :ok
defp send_data_immediately(data, %{topic: topic, size: size}) do
Logger.info("Sending #{size} events to Kafka topic: #{topic}")
@producer.send_data(topic, data)
end
end
|
lib/sanbase/kafka/kafka_exporter.ex
| 0.814311 | 0.543287 |
kafka_exporter.ex
|
starcoder
|
defmodule BikeBrigade.Riders.RiderSearch do
import Ecto.Query
alias BikeBrigade.Repo
alias BikeBrigade.Riders.{Rider, RiderSearch, Tag}
alias BikeBrigade.Stats.RiderStats
defstruct [
:sort_field,
:sort_order,
:preload,
offset: 0,
limit: 0,
filters: [],
page_changed: true,
query_changed: true
]
@type t :: %RiderSearch{
offset: non_neg_integer(),
limit: non_neg_integer(),
filters: list(),
sort_field: atom(),
sort_order: atom(),
preload: list()
}
@sort_orders [:desc, :asc]
@sortable_fields [:name, :capacity, :last_active, :phone]
@default_opts [
sort_field: :last_active,
sort_order: :desc,
offset: 0,
limit: 20,
filters: [],
preload: []
]
defmodule Filter do
@derive Jason.Encoder
defstruct [:type, :search, :id]
@type t :: %Filter{type: atom(), search: String.t(), id: integer() | nil}
end
defmodule Results do
defstruct page: [], all_locations: [], total: 0, page_first: 0, page_last: 0
@type t :: %Results{
page: list(),
total: non_neg_integer(),
page_first: non_neg_integer(),
page_last: non_neg_integer()
}
@spec has_next_page?(t()) :: boolean()
def has_next_page?(%{page_last: page_last, total: total}) do
page_last < total
end
@spec has_prev_page?(t()) :: boolean()
def has_prev_page?(%{page_first: page_first}) do
page_first > 1
end
end
@spec new(keyword()) :: RiderSearch.t()
def new(opts \\ []) do
opts = Keyword.merge(@default_opts, opts)
%RiderSearch{
offset: opts[:offset],
limit: opts[:limit],
filters: opts[:filters],
sort_order: opts[:sort_order],
sort_field: opts[:sort_field],
preload: opts[:preload],
page_changed: true,
query_changed: true
}
end
@spec fetch(RiderSearch.t()) :: {RiderSearch.t(), Results.t()}
@spec fetch(RiderSearch.t(), Results.t()) :: {RiderSearch.t(), Results.t()}
def fetch(rs, results \\ %Results{}) do
{rs, results} =
{rs, results}
|> fetch_total()
|> fetch_page()
{%{rs | query_changed: false, page_changed: false}, results}
end
@spec fetch_total({RiderSearch.t(), Results.t()}) :: {RiderSearch.t(), Results.t()}
defp fetch_total({%RiderSearch{query_changed: false} = rs, results}) do
{rs, results}
end
defp fetch_total({%RiderSearch{query_changed: true} = rs, results}) do
total =
build_query(rs)
|> exclude(:preload)
|> exclude(:order_by)
|> exclude(:select)
|> exclude(:limit)
|> exclude(:offset)
|> select(count())
|> Repo.one()
{rs, %{results | total: total}}
end
@spec fetch_page({RiderSearch.t(), Results.t()}) :: {RiderSearch.t(), Results.t()}
defp fetch_page({%RiderSearch{page_changed: false} = rs, results}) do
{rs, results}
end
defp fetch_page({%RiderSearch{page_changed: true} = rs, results}) do
riders =
build_query(rs)
|> Repo.all()
|> Repo.preload(rs.preload)
page_first =
if results.total == 0 do
0
else
rs.offset + 1
end
page_last = min(rs.offset + rs.limit + 1, results.total)
{rs, %{results | page: riders, page_first: page_first, page_last: page_last}}
end
@spec fetch_locations(RiderSearch.t()) :: list()
def fetch_locations(rs) do
build_query(rs)
|> exclude(:preload)
|> exclude(:order_by)
|> exclude(:select)
|> exclude(:limit)
|> exclude(:offset)
|> join(:inner, [rider: r], l in assoc(r, :location), as: :location)
|> select([rider: r, location: l], {r.id, r.name, l})
|> Repo.all()
end
@spec filter(t(), list()) :: t()
def filter(rs, filters) do
%{rs | filters: filters, offset: 0, page_changed: true, query_changed: true}
end
@spec sort(t(), atom(), atom()) :: t()
def sort(rs, field, order)
when field in @sortable_fields and order in @sort_orders do
%{rs | sort_field: field, sort_order: order, page_changed: true, query_changed: true}
end
@spec next_page(t()) :: t()
def next_page(%{limit: limit, offset: offset} = rs) do
%{rs | offset: offset + limit, page_changed: true}
end
@spec prev_page(t()) :: t()
def prev_page(%{limit: limit, offset: offset} = rs) do
# Only move to prev page if we have one
if offset > 0 do
%{rs | offset: max(offset - limit, 0), page_changed: true}
else
rs
end
end
@spec build_query(t()) :: Ecto.Query.t()
defp build_query(rs) do
base_query()
|> sort_query(rs.sort_field, rs.sort_order)
|> filter_query(rs.filters)
|> paginate_query(rs.offset, rs.limit)
end
@spec base_query() :: Ecto.Query.t()
defp base_query do
tags_query =
from(t in Tag,
join: r in assoc(t, :riders),
where: r.id == parent_as(:rider).id,
select: %{tags: fragment("array_agg(?)", t.name)}
)
from(r in Rider,
as: :rider,
left_lateral_join: t in subquery(tags_query),
as: :tags,
left_join: l in assoc(r, :latest_campaign),
as: :latest_campaign
)
end
@spec paginate_query(Ecto.Query.t(), non_neg_integer(), non_neg_integer()) :: Ecto.Query.t()
defp paginate_query(query, offset, limit) do
query
|> offset(^offset)
|> limit(^limit)
end
@spec sort_query(Ecto.Query.t(), atom(), atom()) :: Ecto.Query.t()
defp sort_query(query, :last_active, order)
when order in @sort_orders do
order = :"#{order}_nulls_last"
query
|> order_by([{^order, as(:latest_campaign).delivery_start}, asc: :name])
end
defp sort_query(query, field, order) do
query
|> order_by([{^order, ^field}])
end
@spec filter_query(Ecto.Query.t(), list()) :: Ecto.Query.t()
defp filter_query(query, filters) do
Enum.reduce(filters, query, &apply_filter/2)
end
@spec apply_filter(Filter.t(), Ecto.Query.t()) :: Ecto.Query.t()
defp apply_filter(%Filter{type: :name, search: search}, query) do
query
|> where(ilike(as(:rider).name, ^"#{search}%") or ilike(as(:rider).name, ^"% #{search}%"))
end
defp apply_filter(%Filter{type: :phone, search: search}, query) do
query
|> where(like(as(:rider).phone, ^"%#{search}%"))
end
defp apply_filter(%Filter{type: :name_or_phone, search: search}, query) do
query
|> where(
ilike(as(:rider).name, ^"#{search}%") or ilike(as(:rider).name, ^"% #{search}%") or
like(as(:rider).phone, ^"%#{search}%")
)
end
defp apply_filter(%Filter{type: :program, id: id}, query) do
query
|> join(:inner, [rider: r], rs in RiderStats, on: rs.rider_id == r.id and rs.program_id == ^id)
end
defp apply_filter(%Filter{type: :tag, search: tag}, query) do
query
|> where(fragment("? = ANY(?)", ^tag, as(:tags).tags))
end
defp apply_filter(%Filter{type: :capacity, search: capacity}, query) do
# TODO this may be easier with Ecto.Enum instead of EctoEnum
{:ok, capacity} = Rider.CapacityEnum.dump(capacity)
query
|> where(as(:rider).capacity == ^capacity)
end
defp apply_filter(%Filter{type: :active, search: "never"}, query) do
query
|> where(is_nil(as(:latest_campaign).id))
end
defp apply_filter(%Filter{type: :active, search: "all_time"}, query) do
query
|> where(not is_nil(as(:latest_campaign).id))
end
defp apply_filter(%Filter{type: :active, search: period}, query) do
query
|> where(as(:latest_campaign).delivery_start > ago(1, ^period))
end
end
|
lib/bike_brigade/riders/rider_search.ex
| 0.757705 | 0.420124 |
rider_search.ex
|
starcoder
|
defmodule DiscordBot.Model.VoicePayload do
@moduledoc """
An object which wraps all voice control websocket messages.
"""
use DiscordBot.Model.Serializable
alias DiscordBot.Model.{
SelectProtocol,
SessionDescription,
Speaking,
VoiceHello,
VoiceIdentify,
VoicePayload,
VoiceReady
}
defstruct [
:opcode,
:data
]
@typedoc """
The numeric opcode for the payload.
"""
@type opcode :: atom | number
@typedoc """
The body of the payload.
"""
@type data :: any | nil
@type t :: %__MODULE__{
opcode: opcode,
data: data
}
defimpl Poison.Encoder, for: __MODULE__ do
@spec encode(Payload.t(), Poison.Encoder.options()) :: iodata
def encode(payload, options) do
%{opcode: opcode, data: data} = payload
Poison.Encoder.Map.encode(
%{
"op" => VoicePayload.opcode_from_atom(opcode),
"d" => data
},
options
)
end
end
@doc """
Constructs a voice payload containing only an opcode, `opcode`
"""
@spec payload(atom | number) :: __MODULE__.t()
def payload(opcode) do
payload(opcode, nil)
end
@doc """
Constructs a payload containing an opcode, `opcode` and a datagram, `data`
"""
@spec payload(atom | number, any) :: __MODULE__.t()
def payload(opcode, data) when is_number(opcode) do
opcode
|> atom_from_opcode
|> payload(data)
end
def payload(opcode, data) when is_atom(opcode) do
%__MODULE__{
opcode: opcode,
data: data
}
end
@doc """
Builds the voice control heartbeat message.
"""
@spec heartbeat(integer) :: __MODULE__.t()
def heartbeat(nonce) do
payload(:heartbeat, nonce)
end
@doc """
Converts a JSON map into a voice payload.
"""
@spec from_map(map) :: __MODULE__.t()
def from_map(map) do
opcode = map |> Map.get("op") |> atom_from_opcode
%__MODULE__{
opcode: opcode,
data: map |> Map.get("d") |> to_model(opcode)
}
end
@doc """
Converts a payload body to the correct type given its opcode.
"""
@spec to_model(any, atom) :: struct
def to_model(data, opcode) do
case opcode do
:identify -> data |> VoiceIdentify.from_map()
:hello -> data |> VoiceHello.from_map()
:ready -> data |> VoiceReady.from_map()
:select_protocol -> data |> SelectProtocol.from_map()
:speaking -> data |> Speaking.from_map()
:session_description -> data |> SessionDescription.from_map()
_ -> data
end
end
@doc """
Converts a numeric Discord opcode to a corresponding
descriptive atom.
"""
@spec atom_from_opcode(number) :: atom
def atom_from_opcode(opcode) do
%{
0 => :identify,
1 => :select_protocol,
2 => :ready,
3 => :heartbeat,
4 => :session_description,
5 => :speaking,
6 => :heartbeat_ack,
7 => :resume,
8 => :hello,
9 => :resumed,
13 => :client_disconnect
}[opcode]
end
@doc """
Converts an atom describing a Discord opcode to
its corresponding numeric value.
"""
@spec opcode_from_atom(atom) :: number
def opcode_from_atom(atom) do
%{
identify: 0,
select_protocol: 1,
ready: 2,
heartbeat: 3,
session_description: 4,
speaking: 5,
heartbeat_ack: 6,
resume: 7,
hello: 8,
resumed: 9,
client_disconnect: 13
}[atom]
end
end
|
apps/discordbot/lib/discordbot/model/voice_payload.ex
| 0.816077 | 0.419618 |
voice_payload.ex
|
starcoder
|
defmodule Grax do
@moduledoc """
The Grax API.
For now there is no API documentation.
Read about the API in the guide [here](https://rdf-elixir.dev/grax/api.html).
"""
alias Grax.{Schema, Id, Validator, ValidationError}
alias Grax.Schema.{DataProperty, LinkProperty, Struct, AdditionalStatements}
alias Grax.RDF.{Loader, Preloader, Mapper}
alias RDF.{IRI, BlankNode, Graph}
import RDF.Utils
import RDF.Utils.Guards
@__id__property_access_error Schema.InvalidProperty.exception(
property: :__id__,
message:
"__id__ can't be changed. Use build/2 to construct a new Grax.Schema mapping from another with a new id."
)
def build(mod, %IRI{} = id), do: {:ok, do_build(mod, id)}
def build(mod, %BlankNode{} = id), do: {:ok, do_build(mod, id)}
def build(mod, %Id.Schema{} = id_schema), do: build(mod, id_schema, %{})
def build(mod, initial) when is_list(initial), do: build(mod, Map.new(initial))
def build(mod, initial) when is_map(initial) do
with {:ok, id} <- id(mod, initial) do
build(mod, id, Map.delete(initial, :__id__))
end
end
def build(mod, id) do
{:ok, do_build(mod, IRI.new(id))}
end
def build(mod, %Id.Schema{} = id_schema, initial) do
with {:ok, id} <- id(id_schema, initial) do
build(mod, id, initial)
end
end
def build(mod, id, %mod{} = initial) do
mod
|> build!(id, initial)
|> validate()
end
def build(mod, id, initial) do
with {:ok, mapping} <- build(mod, id) do
put(mapping, initial)
end
end
def build!(mod, id) do
case build(mod, id) do
{:ok, mapping} -> mapping
{:error, error} -> raise error
end
end
def build!(mod, id, %mod{} = initial) do
struct(initial, __id__: build!(mod, id).__id__)
end
def build!(mod, id, initial) do
mod
|> build!(id)
|> put!(initial)
end
defp do_build(mod, id) do
struct(mod, __id__: id)
end
def id(_, %{__id__: %RDF.BlankNode{} = bnode}), do: {:ok, bnode}
def id(_, %{__id__: id}), do: {:ok, RDF.iri(id)}
def id(%Id.Schema{} = id_schema, attributes) do
Id.Schema.generate_id(id_schema, attributes)
end
def id(schema, attributes) when maybe_module(schema) do
schema
|> id_schema(attributes)
|> id(attributes)
end
def id(_, _), do: {:error, "no id schema found"}
def id_schema(schema, initial) when is_atom(schema) do
schema.__id_schema__() ||
(schema.__id_spec__() &&
Id.Spec.custom_select_id_schema(schema.__id_spec__(), schema, initial))
end
def load(mod, id, graph, opts \\ []) do
validate? = Keyword.get(opts, :validate, true)
opts = Keyword.put(opts, :validate, validate?)
do_load(mod, id, graph, validate?, opts)
end
def load!(mod, id, graph, opts \\ []) do
validate? = Keyword.get(opts, :validate, false)
opts = Keyword.put_new(opts, :validate, validate?)
with {:ok, mapping} <- do_load(mod, id, graph, validate?, opts) do
mapping
else
{:error, error} -> raise error
end
end
defp do_load(mod, id, graph, false, opts) do
with {:ok, initial} <- build(mod, id),
{:ok, loaded} <- Loader.call(mod, initial, graph, opts) do
mod.on_load(loaded, graph, opts)
end
end
defp do_load(mod, id, graph, true, opts) do
with {:ok, mapping} <- do_load(mod, id, graph, false, opts) do
validate(mapping, opts)
end
end
def preload(%schema{} = mapping, graph, opts \\ []) do
Preloader.call(schema, mapping, graph, setup_depth_preload_opts(opts))
end
def preload!(%schema{} = mapping, graph, opts \\ []) do
Preloader.call(schema, mapping, graph, [
{:validate, false} | setup_depth_preload_opts(opts)
])
|> case do
{:ok, mapping} -> mapping
{:error, error} -> raise error
end
end
def preloaded?(%schema{} = mapping) do
schema.__properties__(:link)
|> Enum.all?(fn {property, _} -> preloaded?(mapping, property) end)
end
def preloaded?(%schema{} = mapping, property) do
case schema.__property__(property) do
%LinkProperty{} -> mapping |> Map.get(property) |> do_preloaded?()
%DataProperty{} -> true
_ -> raise ArgumentError, "#{inspect(property)} is not a property of #{schema}"
end
end
defp do_preloaded?(nil), do: nil
defp do_preloaded?(%IRI{}), do: false
defp do_preloaded?(%BlankNode{}), do: false
defp do_preloaded?([]), do: true
defp do_preloaded?([value | _]), do: do_preloaded?(value)
defp do_preloaded?(%_{}), do: true
# This is the fallback case with an apparently invalid value.
defp do_preloaded?(_), do: false
# TODO: This is a wrapper acting as a preliminary substitute for the preloading strategy selector
def setup_depth_preload_opts(opts) do
case Keyword.pop(opts, :depth) do
{nil, _} -> opts
{depth, opts} -> Keyword.put_new(opts, :preload, normalize_preload_opt(depth))
end
end
@doc false
def normalize_preload_opt(preload_value)
def normalize_preload_opt(nil), do: nil
def normalize_preload_opt(integer) when is_integer(integer), do: {:add_depth, integer}
def normalize_preload_opt({keyword, _} = depth, _) when keyword in [:depth, :add_depth],
do: depth
def normalize_preload_opt(invalid, _),
do: raise(ArgumentError, "invalid depth specification: #{inspect(invalid)}")
@doc false
def normalize_preload_spec(preload_value)
def normalize_preload_spec(integer) when is_integer(integer), do: {:depth, integer}
def normalize_preload_spec({:+, _line, [integer]}) when is_integer(integer),
do: {:add_depth, integer}
def normalize_preload_spec(preload_value), do: normalize_preload_opt(preload_value)
def put(_, :__id__, _), do: {:error, @__id__property_access_error}
# Note, this clause is duplicated on put!/3
def put(mapping, :__additional_statements__, predications) do
{:ok, put_additional_statements(mapping, predications)}
end
def put(%schema{} = mapping, property, value) do
if Schema.has_field?(schema, property) do
cond do
property_schema = schema.__property__(property) ->
validation =
case property_schema.__struct__ do
DataProperty -> :check_property
LinkProperty -> :check_link
end
do_put_property(validation, mapping, property, value, property_schema)
# it's a simple, unmapped field
true ->
{:ok, struct!(mapping, [{property, value}])}
end
else
{:error, Schema.InvalidProperty.exception(property: property)}
end
end
def put(%_{} = mapping, values) do
Enum.reduce(values, {mapping, ValidationError.exception(context: mapping.__id__)}, fn
{property, value}, {mapping, validation} ->
mapping
|> put(property, value)
|> case do
{:ok, mapping} -> {mapping, validation}
{:error, error} -> {mapping, ValidationError.add_error(validation, property, error)}
end
end)
|> case do
{mapping, %ValidationError{errors: []}} -> {:ok, mapping}
{_, validation} -> {:error, validation}
end
end
defp do_put_property(validation, mapping, property, value, property_schema) do
with {:ok, value} <-
value
|> normalize_value(property_schema)
|> build_linked(property_schema) do
Validator
|> apply(validation, [
ValidationError.exception(context: mapping.__id__),
property,
value,
property_schema,
[]
])
|> case do
%{errors: []} -> {:ok, struct!(mapping, [{property, value}])}
%{errors: errors} -> {:error, errors[property]}
end
end
end
def put!(_, :__id__, _), do: raise(@__id__property_access_error)
# Note, this clause is duplicated on put/3
def put!(mapping, :__additional_statements__, predications) do
put_additional_statements(mapping, predications)
end
def put!(%schema{} = mapping, property, value) do
property_schema = schema.__property__(property)
value
|> normalize_value(property_schema)
|> build_linked(property_schema)
|> case do
{:ok, value} -> struct!(mapping, [{property, value}])
{:error, error} -> raise error
end
end
def put!(%_{} = mapping, values) do
Enum.reduce(values, mapping, fn
{property, value}, mapping ->
put!(mapping, property, value)
end)
end
defp normalize_value(value, property_schema) do
normalized_value =
value
|> uniq_value()
|> normalize_list_value(Schema.Property.value_set?(property_schema))
if property_schema do
normalize_type(
normalized_value,
property_schema.__struct__,
Schema.Property.value_type(property_schema)
)
else
normalized_value
end
end
defp uniq_value(value) when is_list(value), do: Enum.uniq(value)
defp uniq_value(value), do: value
defp normalize_list_value(value, true), do: List.wrap(value)
defp normalize_list_value([value], false), do: value
defp normalize_list_value(value, false), do: value
defp normalize_type(values, DataProperty, IRI) when is_list(values),
do: Enum.map(values, &normalize_type(&1, DataProperty, IRI))
defp normalize_type(%IRI{} = iri, DataProperty, IRI), do: iri
defp normalize_type(term, DataProperty, IRI) when maybe_module(term), do: IRI.new(term)
defp normalize_type(value, _, _), do: value
defp build_linked(values, %LinkProperty{} = property_schema) when is_list(values) do
map_while_ok(values, &build_linked(&1, property_schema))
end
defp build_linked(%IRI{} = value, %LinkProperty{}), do: {:ok, value}
defp build_linked(%BlankNode{} = value, %LinkProperty{}), do: {:ok, value}
defp build_linked(term, %LinkProperty{}) when maybe_module(term), do: {:ok, IRI.new(term)}
defp build_linked(%{} = value, %LinkProperty{} = property_schema) do
if Map.has_key?(value, :__struct__) do
{:ok, value}
else
case LinkProperty.value_type(property_schema) do
nil ->
raise ArgumentError,
"unable to determine value type of property #{inspect(property_schema)}"
%{} = class_mapping when not is_struct(class_mapping) ->
raise ArgumentError,
"unable to determine value type of heterogeneous property #{inspect(property_schema)}"
resource_type ->
resource_type.build(value)
end
end
end
defp build_linked(value, _), do: {:ok, value}
@spec additional_statements(Schema.t()) :: RDF.Description.t()
def additional_statements(%_{} = mapping) do
AdditionalStatements.statements(mapping.__additional_statements__, mapping.__id__)
end
@spec add_additional_statements(Schema.t(), map()) :: Schema.t()
def add_additional_statements(%_{} = mapping, predications) do
Struct.update_additional_statements(mapping, &AdditionalStatements.add(&1, predications))
end
@spec put_additional_statements(Schema.t(), map()) :: Schema.t()
def put_additional_statements(%_{} = mapping, predications) do
Struct.update_additional_statements(mapping, &AdditionalStatements.put(&1, predications))
end
@spec clear_additional_statements(Schema.t(), opts :: keyword()) :: Schema.t()
def clear_additional_statements(%schema{} = mapping, opts \\ []) do
Struct.put_additional_statements(
mapping,
if(Keyword.get(opts, :clear_schema_class, false),
do: AdditionalStatements.empty(),
else: schema.__additional_statements__()
)
)
end
@spec validate(Schema.t(), opts :: keyword()) ::
{:ok, Schema.t()} | {:error, ValidationError.t()}
def validate(%_{} = mapping, opts \\ []) do
Validator.call(mapping, opts)
end
@spec validate!(Schema.t(), opts :: keyword()) :: Schema.t()
def validate!(%_{} = mapping, opts \\ []) do
case validate(mapping, opts) do
{:ok, _} -> mapping
{:error, error} -> raise error
end
end
@spec valid?(Schema.t(), opts :: keyword()) :: boolean
def valid?(%_{} = mapping, opts \\ []) do
match?({:ok, _}, validate(mapping, opts))
end
@spec to_rdf(Schema.t(), opts :: keyword()) :: {:ok, Graph.t()} | {:error, any}
def to_rdf(%schema{} = mapping, opts \\ []) do
with {:ok, rdf} <- Mapper.call(mapping, opts) do
schema.on_to_rdf(mapping, rdf, opts)
end
end
@spec to_rdf!(Schema.t(), opts :: keyword()) :: Graph.t()
def to_rdf!(mapping, opts \\ []) do
case to_rdf(mapping, opts) do
{:ok, graph} -> graph
{:error, error} -> raise error
end
end
end
|
lib/grax/grax.ex
| 0.626124 | 0.432723 |
grax.ex
|
starcoder
|
defmodule ElixirLS.LanguageServer.Providers.FoldingRange.Indentation do
@moduledoc """
Code folding based on indentation level
Note that we trim trailing empty rows from regions.
See the example.
"""
alias ElixirLS.LanguageServer.Providers.FoldingRange
alias ElixirLS.LanguageServer.Providers.FoldingRange.Line
@doc """
Provides ranges for the source text based on the indentation level.
## Example
iex> alias ElixirLS.LanguageServer.Providers.FoldingRange
iex> text = \"""
...> defmodule A do # 0
...> def get_info(args) do # 1
...> org = # 2
...> args # 3
...> |> Ecto.assoc(:organization) # 4
...> |> Repo.one!() # 5
...>
...> user = # 7
...> org # 8
...> |> Organization.user!() # 9
...>
...> {:ok, %{org: org, user: user}} # 11
...> end # 12
...> end # 13
...> \"""
iex> FoldingRange.convert_text_to_input(text)
...> |> FoldingRange.Indentation.provide_ranges()
{:ok, [
%{startLine: 0, endLine: 12, kind?: :region},
%{startLine: 1, endLine: 11, kind?: :region},
%{startLine: 7, endLine: 9, kind?: :region},
%{startLine: 2, endLine: 5, kind?: :region},
]}
Note that the empty lines 6 and 10 do not appear in the inner most ranges.
"""
@spec provide_ranges(FoldingRange.input()) :: {:ok, [FoldingRange.t()]}
def provide_ranges(%{lines: lines}) do
ranges =
lines
|> Enum.map(&extract_cell/1)
|> pair_cells()
|> pairs_to_ranges()
{:ok, ranges}
end
defp extract_cell({_line, cell, _first}), do: cell
@doc """
Pairs cells into {start, end} tuples of regions
Public function for testing
"""
@spec pair_cells([Line.cell()]) :: [{Line.cell(), Line.cell()}]
def pair_cells(cells) do
do_pair_cells(cells, [], [], [])
end
# Base case
defp do_pair_cells([], _, _, pairs) do
pairs
|> Enum.map(fn
{cell1, cell2, []} -> {cell1, cell2}
{cell1, _, empties} -> {cell1, List.last(empties)}
end)
|> Enum.reject(fn {{r1, _}, {r2, _}} -> r1 + 1 >= r2 end)
end
# Empty row
defp do_pair_cells([{_, nil} = head | tail], stack, empties, pairs) do
do_pair_cells(tail, stack, [head | empties], pairs)
end
# Empty stack
defp do_pair_cells([head | tail], [], empties, pairs) do
do_pair_cells(tail, [head], empties, pairs)
end
# Non-empty stack: head is to the right of the top of the stack
defp do_pair_cells([{_, x} = head | tail], [{_, y} | _] = stack, _, pairs) when x > y do
do_pair_cells(tail, [head | stack], [], pairs)
end
# Non-empty stack: head is equal to or to the left of the top of the stack
defp do_pair_cells([{_, x} = head | tail], stack, empties, pairs) do
# If the head is <= to the top of the stack, then we need to pair it with
# everything on the stack to the right of it.
# The head can also start a new region, so it's pushed onto the stack.
{leftovers, new_tail_stack} = stack |> Enum.split_while(fn {_, y} -> x <= y end)
new_pairs = leftovers |> Enum.map(&{&1, head, empties})
do_pair_cells(tail, [head | new_tail_stack], [], new_pairs ++ pairs)
end
@spec pairs_to_ranges([{Line.cell(), Line.cell()}]) :: [FoldingRange.t()]
defp pairs_to_ranges(pairs) do
pairs
|> Enum.map(fn {{r1, _}, {r2, _}} ->
%{
startLine: r1,
endLine: r2 - 1,
kind?: :region
}
end)
end
end
|
apps/language_server/lib/language_server/providers/folding_range/indentation.ex
| 0.861786 | 0.495667 |
indentation.ex
|
starcoder
|
defmodule Exmath do
def factorial(n) when n <= 1, do: 1
@doc """
Factorial will multiply n with n-1 until n <= 1.
# Example
iex> Exmath.factorial(4)
24
"""
@spec factorial(number) :: integer
def factorial(n) do
n * factorial(n-1)
end
@doc """
Combinations formula.
A formula for the number of possible combinations of r elements from a set of n elements.
In combinations order doesn't matter.
# Example
We have 5 balls, in how many ways can we select 3 of them?
iex> Exmath.nCr(5, 3)
10.0
"""
@spec nCr(number, number) :: float
def nCr(n, r) do
factorial(n)/(factorial(r)*factorial((n-r)))
end
@doc """
Permutations formula.
A formula for the number of possible permutations of r elements from a set of n elements.
# Example
How many ways can 4 students from a group of 15 be lined up for a photograph?
iex> Exmath.nPr(15, 4)
32760.0
"""
@spec nPr(number, number) :: float
def nPr(n, r) do
factorial(n)/factorial((n-r))
end
@doc """
Prints row r of Pascal's triangle.
Calculated using the previously implemented nCr formula.
Be aware; Pascal's triangle starts with 0 both column- and row-wise.
# Example
What is the 4th row of pascals triangle.
iex> Exmath.pascals_triangle_row(3)
[1.0, 3.0, 3.0, 1.0]
"""
@spec pascals_triangle_row(number) :: [float]
def pascals_triangle_row(r) do
Enum.map((0..r), fn(c) -> nCr(r, c) end)
end
@doc """
Hypergeometric distribution without replacement
# Parameters
* k -> how many wins
* nn -> total pool
* kk -> target total (wins + losses)
* n -> how many to draw
# Example
Imagine we have an urn of 50 marbles. 5 green ones and 45 red ones.
Blindly we will take 10 marbles from the urn.
What is the likelihood that we will draw 4 green and 6 red marbles.
This means we will have k=4, n=10, N=50, K=5.
iex> Float.round Exmath.hypergeometric_distribution(4, 50, 5, 10), 5
0.00396
"""
@spec hypergeometric_distribution(number, number, number, number) :: float
def hypergeometric_distribution(k, nn, kk, n) do
(nCr(kk, k)*nCr(nn-kk, n-k))/nCr(nn, n)
end
def average_growth(p1, p1), do: 0.0
@doc """
Get the average growth between two points in a graph.
# Example
Imagine we have the two points (1, 1) and (10, 10).
The mathematical formula for calculating this is delta-y/delta-x.
iex> Exmath.average_growth({1, 1}, {10, 10})
1.0
"""
@spec average_growth({number, number}, {number, number}) :: float
def average_growth({p1_x, p1_y}, {p2_x, p2_y}), do: (p2_y-p1_y)/(p2_x-p1_x)
def stirlings2(_n, 1), do: 1
def stirlings2(n, n), do: 1
@doc """
Computes the stirling number of the second kind.
This is how many ways you can partition n elements into k groups.
# Example
Let's say you have 10 images, how many ways can you partition those images into 3 groups?
iex> Exmath.stirlings2(10, 3)
9330.0
"""
@spec stirlings2(number, number) :: float
def stirlings2(n, k) do
(1/factorial(k))*Enum.reduce((0..k), 0, fn(j, acc) ->
acc + :math.pow(-1, k-j)*nCr(k, j)*:math.pow(j, n)
end)
end
@doc """
Calculates the n-th bell number.
A bell number is how many ways you can partition n elements.
# Example
If you have a set of 10 images, how many different ways can you group them?
iex> Exmath.bell_number(10)
115_975.0
"""
@spec bell_number(number) :: float
def bell_number(n) do
Enum.reduce((0..n), 0, fn(k, acc) ->
acc + stirlings2(n, k)
end)
end
@doc """
Calculates the hypothenus using the pythagoras theorem
# Example
iex> Exmath.pythagoras(4, 3)
5.0
"""
@spec pythagoras(number, number) :: float
def pythagoras(x, y) do
:math.sqrt(:math.pow(x, 2) + :math.pow(y, 2))
end
# Delegate all of the default functions in erlangs math module {{{
defdelegate acos(x), to: :math
defdelegate acosh(x), to: :math
defdelegate asin(x), to: :math
defdelegate asinh(x), to: :math
defdelegate atan(x), to: :math
defdelegate atan2(x, y), to: :math
defdelegate atanh(x), to: :math
defdelegate cos(x), to: :math
defdelegate cosh(x), to: :math
defdelegate exp(x), to: :math
defdelegate log(x), to: :math
defdelegate log10(x), to: :math
defdelegate log2(x), to: :math
defdelegate pow(x, y), to: :math
defdelegate sin(x), to: :math
defdelegate sinh(x), to: :math
defdelegate sqrt(x), to: :math
defdelegate tan(x), to: :math
defdelegate tanh(x), to: :math
# }}}
end
|
lib/exmath.ex
| 0.9245 | 0.808446 |
exmath.ex
|
starcoder
|
defmodule LoggerPapertrailBackend.Configurator do
alias LoggerPapertrailBackend.Configuration
@moduledoc """
You can config papertrail backend with an url in the form of papertrail://logs.papertrail.com:12345/my_system_name
It works with syslog:// as scheme too.
In your config, choose between
```elixir
config :logger, :logger_papertrail_backend,
url: "papertrail://logs.papertrail.com:12345/my_system_name"
```
or
``` elixir
config :logger, :logger_papertrail_backend,
host: "logs.papertrailapp.com:12345",
system_name: my_system_name
```
### Example
iex> config = [url: "papertrail://logs.papertrail.com:12345/my_system_name"]
iex> LoggerPapertrailBackend.Configurator.configure_papertrail_target(config)
%LoggerPapertrailBackend.Configuration{host: "logs.papertrail.com", port: 12345, system_name: "my_system_name"}
iex> host_config = [host: "logs.papertrail.com:12345", system_name: "my_system_name"]
iex> LoggerPapertrailBackend.Configurator.configure_papertrail_target(host_config)
%LoggerPapertrailBackend.Configuration{host: "logs.papertrail.com", port: 12345, system_name: "my_system_name"}
"""
@doc """
Configures target using a parsable URI as url, or
or configures target by extracting system-name, host and port from a keywordlist
in the form of `[host: "hostname:port", system_name: "my_system_name"]`.
`system_name` is optional.
"""
@spec configure_papertrail_target(configuration :: list) :: %Configuration{
host: binary,
port: integer,
system_name: binary
}
def configure_papertrail_target(configuration) when is_list(configuration) do
configuration
|> Enum.into(%{})
|> configure_target
end
def configure_papertrail_target(configuration), do: configure_target(configuration)
# private parts
defp configure_target(%{url: url}), do: configure_target(URI.parse(url))
defp configure_target(%URI{host: host, path: path, port: port}) do
system_name = path |> clean_path
%Configuration{host: host, port: port, system_name: system_name}
end
defp configure_target(%{host: host_config, system_name: system_name}) do
"papertrail://#{host_config}/#{system_name}"
|> URI.parse()
|> configure_target
end
defp configure_target(%{host: host_config}),
do: configure_target(%{host: host_config, system_name: nil})
defp configure_target(config) do
raise(LoggerPapertrailBackend.ConfigurationError, "Unknown configuration: #{inspect(config)}")
end
defp clean_path("/"), do: nil
defp clean_path("/" <> rest), do: rest
defp clean_path(_), do: nil
end
|
lib/configurator.ex
| 0.666062 | 0.440168 |
configurator.ex
|
starcoder
|
defmodule Topo.Intersects do
@moduledoc false
alias Topo.PointLine
alias Topo.PointRing
alias Topo.LineLine
alias Topo.LineRing
alias Topo.RingRing
@type geo_struct ::
%Geo.Point{}
| %Geo.MultiPoint{}
| %Geo.LineString{}
| %Geo.MultiLineString{}
| %Geo.Polygon{}
| %Geo.MultiPolygon{}
@spec intersects?(geo_struct, geo_struct) :: boolean
def intersects?(%Geo.Point{} = a, %Geo.Point{} = b), do: a == b
def intersects?(%Geo.Point{} = a, %Geo.MultiPoint{} = b), do: intersects_any?(a, b, Geo.Point)
def intersects?(%Geo.Point{coordinates: a}, %Geo.LineString{coordinates: b}),
do: PointLine.relate(b, a) != :disjoint
def intersects?(%Geo.Point{} = a, %Geo.MultiLineString{} = b),
do: intersects_any?(a, b, Geo.LineString)
def intersects?(%Geo.Point{} = a, %Geo.Polygon{coordinates: [exterior | holes]}) do
case PointRing.relate(exterior, a.coordinates) do
:interior -> PointRing.relate_multi(holes, a.coordinates) !== :interior
:disjoint -> false
_ -> true
end
end
def intersects?(%Geo.Point{} = a, %Geo.MultiPolygon{} = b),
do: intersects_any?(a, b, Geo.Polygon)
def intersects?(%Geo.MultiPoint{} = a, %Geo.Point{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiPoint{} = a, %Geo.MultiPoint{} = b),
do: intersects_any?(a, b, Geo.Point)
def intersects?(%Geo.MultiPoint{} = a, %Geo.LineString{} = b),
do: intersects_any?(b, a, Geo.Point)
def intersects?(%Geo.MultiPoint{} = a, %Geo.MultiLineString{} = b),
do: intersects_any?(a, b, Geo.LineString)
def intersects?(%Geo.MultiPoint{} = a, %Geo.Polygon{} = b), do: intersects_any?(b, a, Geo.Point)
def intersects?(%Geo.MultiPoint{} = a, %Geo.MultiPolygon{} = b),
do: intersects_any?(a, b, Geo.Polygon)
def intersects?(%Geo.LineString{} = a, %Geo.Point{} = b), do: intersects?(b, a)
def intersects?(%Geo.LineString{} = a, %Geo.MultiPoint{} = b), do: intersects?(b, a)
def intersects?(%Geo.LineString{coordinates: a}, %Geo.LineString{coordinates: b}),
do: LineLine.relate(a, b) != :disjoint
def intersects?(%Geo.LineString{} = a, %Geo.MultiLineString{} = b),
do: intersects_any?(a, b, Geo.LineString)
def intersects?(%Geo.LineString{} = a, %Geo.Polygon{coordinates: [b_exterior | b_holes]}) do
LineRing.intersects?(b_exterior, a.coordinates) &&
LineRing.none_contain_line?(b_holes, a.coordinates)
end
def intersects?(%Geo.LineString{} = a, %Geo.MultiPolygon{} = b),
do: intersects_any?(a, b, Geo.Polygon)
def intersects?(%Geo.MultiLineString{} = a, %Geo.Point{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiLineString{} = a, %Geo.MultiPoint{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiLineString{} = a, %Geo.LineString{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiLineString{} = a, %Geo.MultiLineString{} = b),
do: intersects_any?(a, b, Geo.LineString)
def intersects?(%Geo.MultiLineString{} = a, %Geo.Polygon{} = b),
do: intersects_any?(b, a, Geo.LineString)
def intersects?(%Geo.MultiLineString{} = a, %Geo.MultiPolygon{} = b),
do: intersects_any?(a, b, Geo.Polygon)
def intersects?(%Geo.Polygon{} = a, %Geo.Point{} = b), do: intersects?(b, a)
def intersects?(%Geo.Polygon{} = a, %Geo.MultiPoint{} = b), do: intersects?(b, a)
def intersects?(%Geo.Polygon{} = a, %Geo.LineString{} = b), do: intersects?(b, a)
def intersects?(%Geo.Polygon{} = a, %Geo.MultiLineString{} = b), do: intersects?(b, a)
def intersects?(%Geo.Polygon{coordinates: [a_exterior | a_holes]}, %Geo.Polygon{
coordinates: [b_exterior | b_holes]
}) do
RingRing.intersects?(a_exterior, b_exterior) &&
RingRing.none_contain_ring?(a_holes, b_exterior) &&
RingRing.none_contain_ring?(b_holes, a_exterior)
end
def intersects?(%Geo.Polygon{} = a, %Geo.MultiPolygon{} = b),
do: intersects_any?(a, b, Geo.Polygon)
def intersects?(%Geo.MultiPolygon{} = a, %Geo.Point{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiPolygon{} = a, %Geo.MultiPoint{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiPolygon{} = a, %Geo.LineString{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiPolygon{} = a, %Geo.MultiLineString{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiPolygon{} = a, %Geo.Polygon{} = b), do: intersects?(b, a)
def intersects?(%Geo.MultiPolygon{} = a, %Geo.MultiPolygon{} = b),
do: intersects_any?(a, b, Geo.Polygon)
defp intersects_any?(a, b, component_struct) do
Enum.any?(b.coordinates, fn b_comp ->
intersects?(a, struct(component_struct, %{coordinates: b_comp}))
end)
end
end
|
lib/topo/intersects.ex
| 0.821689 | 0.441432 |
intersects.ex
|
starcoder
|
defmodule StepFlow.Step.Helpers do
@moduledoc """
The Helper Step context.
"""
@doc """
Retrieves a value on an Objecta and filtered by the key.
"""
def get_value_in_parameters(object, key) do
StepFlow.Map.get_by_key_or_atom(object, :parameters, [])
|> Enum.filter(fn param ->
StepFlow.Map.get_by_key_or_atom(param, :id) == key
end)
|> Enum.map(fn param ->
StepFlow.Map.get_by_key_or_atom(
param,
:value,
StepFlow.Map.get_by_key_or_atom(param, :default)
)
end)
end
@doc """
Retrieves a value on an Object and filtered by the key and type.
"""
def get_value_in_parameters_with_type(object, key, type) do
StepFlow.Map.get_by_key_or_atom(object, :parameters, [])
|> Enum.filter(fn param ->
StepFlow.Map.get_by_key_or_atom(param, :id) == key &&
StepFlow.Map.get_by_key_or_atom(param, :type) == type
end)
|> Enum.map(fn param ->
StepFlow.Map.get_by_key_or_atom(
param,
:value,
StepFlow.Map.get_by_key_or_atom(param, :default)
)
end)
end
def get_string_or_processed_template_value(
workflow,
step,
dates,
source_paths,
key,
default \\ ""
) do
get_value_in_parameters_with_type(step, key, "string")
|> List.first()
|> case do
nil ->
get_value_in_parameters_with_type(step, key, "template")
|> List.first()
|> case do
nil ->
default
template ->
template
|> template_process(workflow, step, dates, source_paths)
end
strng_value ->
strng_value
end
end
def get_jobs_destination_paths(jobs) do
jobs
|> Enum.map(fn job ->
get_job_destination_paths(job)
end)
|> List.flatten()
|> Enum.uniq()
|> Enum.filter(fn path -> !is_nil(path) end)
end
def get_job_destination_paths(job) do
destination_path = get_value_in_parameters(job, "destination_path")
destination_paths = get_value_in_parameters(job, "destination_paths")
destination_path ++ destination_paths
end
@doc """
Filter a list of paths.
## Examples
iex> StepFlow.Step.Helpers.filter_path_list(["path_1.ext1", "path2.ext2"], [%{"ends_with" => ".ext2"}])
["path2.ext2"]
iex> StepFlow.Step.Helpers.filter_path_list(["path_1.ext1", "path2.ext2"], [%{ends_with: ".ext2"}])
["path2.ext2"]
"""
def filter_path_list(source_paths, []), do: source_paths
def filter_path_list(source_paths, [filter | filters]) do
new_source_paths =
case filter do
%{ends_with: ends_with} ->
Enum.filter(source_paths, fn path -> String.ends_with?(path, ends_with) end)
%{"ends_with" => ends_with} ->
Enum.filter(source_paths, fn path -> String.ends_with?(path, ends_with) end)
end
filter_path_list(new_source_paths, filters)
end
def get_step_requirements(jobs, step) do
%{paths: get_required_paths(jobs, step)}
end
def get_required_paths(jobs, step) do
required_ids = StepFlow.Map.get_by_key_or_atom(step, :required, [])
jobs
|> Enum.filter(fn job -> job.step_id in required_ids end)
|> get_jobs_destination_paths
end
def add_required_paths(requirements, paths) when is_list(paths) do
Map.update(requirements, :paths, paths, fn cur_paths ->
Enum.concat(cur_paths, paths)
|> Enum.uniq()
end)
end
def add_required_paths(requirements, path) do
paths =
Map.get(requirements, :paths, [])
|> List.insert_at(-1, path)
add_required_paths(requirements, paths)
end
def get_dates do
now = Timex.now()
%{
date_time: Timex.format!(now, "%Y_%m_%d__%H_%M_%S", :strftime),
date: Timex.format!(now, "%Y_%m_%d", :strftime),
epoch: Timex.epoch()
}
end
def get_work_directory(step) do
StepFlow.Map.get_by_key_or_atom(step, :work_dir) ||
StepFlow.Configuration.get_var_value(StepFlow, :workers_work_directory) ||
""
end
def get_base_directory(workflow, step) do
get_work_directory(step) <> "/" <> Integer.to_string(workflow.id) <> "/"
end
def templates_process(_templates, _workflow, _step, _dates, result \\ [])
def templates_process([], _workflow, _step, _dates, result), do: result
def templates_process([template | templates], workflow, step, dates, result) do
processed = intern_template_process(template, workflow, step, dates, [])
result = List.insert_at(result, -1, processed)
templates_process(templates, workflow, step, dates, result)
end
def template_process(template, workflow, step, dates, nil) do
intern_template_process(template, workflow, step, dates, [])
end
def template_process(template, workflow, step, dates, source_path)
when is_binary(source_path) do
filename = Path.basename(source_path)
extension = Path.extname(source_path)
name = Path.basename(source_path, extension)
source_keywords =
Keyword.new()
|> Keyword.put(:source_path, source_path)
|> Keyword.put(:filename, filename)
|> Keyword.put(:extension, extension)
|> Keyword.put(:name, name)
intern_template_process(template, workflow, step, dates, source_keywords)
end
def template_process(template, workflow, step, dates, source_paths)
when is_list(source_paths) do
source_keywords =
Keyword.new()
|> Keyword.put(:source_paths, source_paths)
intern_template_process(template, workflow, step, dates, source_keywords)
end
defp intern_template_process(template, workflow, step, dates, source_keywords) do
step_parameters =
StepFlow.Map.get_by_key_or_atom(step, :parameters, [])
|> Enum.filter(fn item ->
StepFlow.Map.get_by_key_or_atom(item, :type) in [
"string",
"integer"
]
end)
|> Enum.map(fn item ->
identifier =
StepFlow.Map.get_by_key_or_atom(item, :id)
|> String.to_atom()
value =
StepFlow.Map.get_by_key_or_atom(
item,
:value,
StepFlow.Map.get_by_key_or_atom(item, :default)
)
{identifier, value}
end)
defined_parameters =
workflow.parameters
|> Enum.filter(fn item ->
StepFlow.Map.get_by_key_or_atom(item, :type) in [
"string",
"array_of_strings",
"integer",
"array_of_integers"
]
end)
|> Enum.map(fn item ->
identifier =
StepFlow.Map.get_by_key_or_atom(item, :id)
|> String.to_atom()
value =
StepFlow.Map.get_by_key_or_atom(
item,
:value,
StepFlow.Map.get_by_key_or_atom(item, :default)
)
|> convert_to_string()
{identifier, value}
end)
|> Keyword.put(:workflow_id, workflow.id)
|> Keyword.put(:workflow_reference, workflow.reference)
|> Keyword.put(:step_name, StepFlow.Map.get_by_key_or_atom(step, :name))
|> Keyword.put(:work_directory, get_work_directory(step))
|> Keyword.put(:date_time, dates.date_time)
|> Keyword.put(:date, dates.date)
|> Keyword.merge(source_keywords)
|> Keyword.merge(step_parameters)
Keyword.keys(defined_parameters)
|> replace(template)
|> EEx.eval_string(defined_parameters)
end
defp replace([], template), do: template
defp replace([key | keys], template) do
template =
String.replace(
template,
"{" <> Atom.to_string(key) <> "}",
"<%= " <> Atom.to_string(key) <> "%>"
)
replace(keys, template)
end
defp convert_to_string(value) when is_bitstring(value), do: value
defp convert_to_string(value), do: "#{inspect(value)}"
end
|
lib/step_flow/step/helpers.ex
| 0.798423 | 0.569344 |
helpers.ex
|
starcoder
|
alias Farmbot.BotState.Hardware, as: Hardware
alias Farmbot.BotState.Configuration, as: Configuration
defmodule Farmbot.BotState do
require Logger
@moduledoc """
Functions to modifying Farmbot's state
all in one convenient (and easy to spell) location.
"""
@doc """
Gets the current position of the bot. Returns [x,y,z]
"""
@spec get_current_pos() :: [integer, ...]
def get_current_pos, do: GenServer.call(Hardware, :get_current_pos)
@doc """
Sets the position to givin position.
"""
@spec set_pos(integer,integer,integer) :: :ok
def set_pos(x, y, z)
when is_integer(x) and is_integer(y) and is_integer(z) do
GenServer.cast(Hardware, {:set_pos, {x, y, z}})
end
@doc """
Sets a pin under the given value
"""
@spec set_pin_value(integer, integer) :: :ok
def set_pin_value(pin, value) when is_integer(pin) and is_integer(value) do
GenServer.cast(Hardware, {:set_pin_value, {pin, value}})
end
@doc """
Sets a mode for a particular pin.
This should happen before setting the value if possible.
"""
@spec set_pin_mode(integer, 0 | 1) :: :ok
def set_pin_mode(pin, mode)
when is_integer(pin) and is_integer(mode) do
GenServer.cast(Hardware, {:set_pin_mode, {pin, mode}})
end
@doc """
Sets a param to a particular value.
This should be the human readable string version of the param.
"""
@spec set_param(atom, integer) :: :ok
def set_param(param, value) when is_atom(param) do
GenServer.cast(Hardware, {:set_param, {param, value}})
end
@doc """
Sets the current end stops
"""
@spec set_end_stops(Hardware.State.end_stops) :: :ok
def set_end_stops({xa,xb,ya,yb,za,zc}) do
GenServer.cast(Hardware, {:set_end_stops, {xa,xb,ya,yb,za,zc}})
end
@doc """
Gets the map of every param.
Useful for resetting params if the arduino flops
"""
@spec get_all_mcu_params :: Hardware.State.mcu_params
def get_all_mcu_params do
GenServer.call(Hardware, :get_all_mcu_params)
end
@doc """
gets the value of a pin.
"""
@spec get_pin(integer) :: %{mode: 0 | 1, value: number}
def get_pin(pin_number) when is_integer(pin_number) do
GenServer.call(Hardware, {:get_pin, pin_number})
end
@doc """
Gets the current firmware version
This is just a shortcut
"""
@spec get_fw_version :: String.t
def get_fw_version, do: GenServer.call(Configuration, :get_fw_version)
@doc """
Set the version
"""
@spec set_fw_version(binary) :: no_return
def set_fw_version(v),
do: GenServer.cast(Configuration, {:update_info, :firmware_version, v})
@doc """
Gets the current controller version
"""
@spec get_os_version :: String.t
def get_os_version, do: GenServer.call(Configuration, :get_version)
@doc """
Gets the value of a hardware param
"""
@spec get_param(atom) :: integer | nil
def get_param(param), do: GenServer.call(Hardware, {:get_param, param})
@doc """
Update a config under key
"""
@spec update_config(String.t, any) :: :ok | {:error, atom}
def update_config(config_key, value)
when is_bitstring(config_key) do
GenServer.call(Configuration, {:update_config, config_key, value})
end
@doc """
Gets the value stored under key.
"""
@spec get_config(atom) :: nil | any
def get_config(config_key) when is_atom(config_key) do
GenServer.call(Configuration, {:get_config, config_key})
end
@doc """
Adds or updates a environment variable for Farmwares
takes either a key and a value, or a map of keys and values.
Creates new keys, or updates existing ones.
"""
@spec set_user_env(String.t, String.t) :: boolean
def set_user_env(key, val) do
GenServer.call(Configuration,
{:update_config, "user_env", Map.new([{key, val}])})
end
@spec set_user_env(map) :: boolean
def set_user_env(map) when is_map(map) do
GenServer.call(Configuration, {:update_config, "user_env", map})
end
@doc """
Locks the bot
"""
@spec lock_bot :: :ok | no_return
def lock_bot do
GenServer.cast(Configuration, {:update_info, :locked, true})
end
@doc """
Unlocks the bot
"""
@spec unlock_bot :: :ok | no_return
def unlock_bot do
GenServer.cast(Configuration, {:update_info, :locked, false})
end
@doc """
Checks the bots lock status
"""
@spec locked? :: boolean
def locked? do
GenServer.call(Configuration, :locked?)
end
@doc """
Sets the bots state of weather we need to sync or not.
"""
@type sync_msg :: Configuration.sync_msg
@spec set_sync_msg(sync_msg) :: :ok
def set_sync_msg(sync_msg)
def set_sync_msg(:synced = thing),
do: GenServer.cast(Configuration, {:update_info, :sync_status, thing})
def set_sync_msg(:sync_now = thing),
do: GenServer.cast(Configuration, {:update_info, :sync_status, thing})
def set_sync_msg(:syncing = thing),
do: GenServer.cast(Configuration, {:update_info, :sync_status, thing})
def set_sync_msg(:sync_error = thing),
do: GenServer.cast(Configuration, {:update_info, :sync_status, thing})
def set_sync_msg(:unknown = thing),
do: GenServer.cast(Configuration, {:update_info, :sync_status, thing})
end
|
lib/bot_state/bot_state.ex
| 0.848109 | 0.415699 |
bot_state.ex
|
starcoder
|
defmodule AdventOfCode2019.MonitoringStation do
@moduledoc """
Day 10 — https://adventofcode.com/2019/day/10
"""
@spec part1(Enumerable.t()) :: integer()
def part1(in_stream) do
in_stream
|> Stream.with_index()
|> Stream.map(&load_belt/1)
|> Enum.to_list()
|> List.flatten()
|> find_max_sights()
end
@spec part2(Enumerable.t(), integer()) :: any()
def part2(in_stream, nth \\ 200) do
{belt, row, col} =
in_stream
|> Stream.with_index()
|> Stream.map(&load_belt/1)
|> Enum.to_list()
|> List.flatten()
|> Enum.reduce(Map.new(), &belt_map/2)
|> find_best_location()
case vaporize_from(belt, row, col, nth) do
{_belt, {nil, nil}} -> "All asteroids already vaporized!"
{_belt, {nth_row, nth_col}} -> nth_row + 100 * nth_col
end
end
@spec load_belt(tuple()) :: Enumerable.t()
defp load_belt({line, row}) do
String.trim(line)
|> String.graphemes()
|> load_belt([], row, 0)
end
@spec load_belt(Enumerable.t(), Enumerable.t(), integer(), integer()) :: Enumerable.t()
defp load_belt([], belt, _row, _col), do: belt
defp load_belt(["." | tail], belt, row, col), do: load_belt(tail, belt, row, col + 1)
defp load_belt(["#" | tail], belt, row, col) do
belt = [{row, col, MapSet.new()} | belt]
load_belt(tail, belt, row, col + 1)
end
### Part 1
@spec find_max_sights(Enumerable.t()) :: integer()
defp find_max_sights(belt), do: find_max_sights(belt, belt, 0)
@spec find_max_sights(Enumerable.t(), Enumerable.t(), integer()) :: integer()
defp find_max_sights([] = _asteroids, _belt, max_sights), do: max_sights
defp find_max_sights([asteroid | tail], belt, max_sights) do
sights = count_sights(asteroid, belt)
find_max_sights(tail, belt, Enum.max([max_sights, sights]))
end
@spec count_sights(tuple(), Enumerable.t()) :: integer()
defp count_sights({_row, _col, sights}, [] = _belt), do: MapSet.size(sights)
defp count_sights({row, col, sights}, [{row, col, _sights} | tail]),
do: count_sights({row, col, sights}, tail)
defp count_sights({row, col, sights}, [{other_row, col, _sights} | tail])
when row > other_row do
{row, col, MapSet.put(sights, 90)}
|> count_sights(tail)
end
defp count_sights({row, col, sights}, [{_row, col, _sights} | tail]) do
{row, col, MapSet.put(sights, 270)}
|> count_sights(tail)
end
defp count_sights({row, col, sights}, [{other_row, other_col, _sights} | tail])
when col > other_col do
angle = angle(row, col, other_row, other_col)
{row, col, MapSet.put(sights, angle)}
|> count_sights(tail)
end
defp count_sights({row, col, sights}, [{other_row, other_col, _sights} | tail]) do
angle = angle(row, col, other_row, other_col)
{row, col, MapSet.put(sights, angle - 180)}
|> count_sights(tail)
end
@spec angle(integer(), integer(), integer(), integer()) :: float()
defp angle(row, col, other_row, other_col) do
((row - other_row) / (other_col - col))
|> :math.atan()
|> degrees()
end
@spec degrees(float()) :: float()
defp degrees(radians), do: radians * 180 / :math.pi()
### Part 2
defp belt_map({row, col, _neighbors}, belt),
do: Map.put(belt, "#{row}:#{col}", {row, col, Map.new()})
defp find_best_location(belt), do: find_best_location({0, nil}, Map.values(belt), belt)
defp find_best_location({_max_neighbors, {row, col, _neighbors}}, [] = _asteroids, belt),
do: {belt, row, col}
defp find_best_location({max_neighbors, best_location}, [asteroid | tail], belt) do
{row, col, neighbors} = add_neighbors(asteroid, Map.values(belt))
belt = Map.put(belt, "#{row}:#{col}", {row, col, neighbors})
Map.to_list(neighbors)
|> length()
|> choose_the_best(asteroid, max_neighbors, best_location)
|> find_best_location(tail, belt)
end
defp choose_the_best(num_neighbors, asteroid, max_neighbors, _best_location)
when num_neighbors > max_neighbors,
do: {num_neighbors, asteroid}
defp choose_the_best(_num_neighbors, _asteroid, max_neighbors, best_location),
do: {max_neighbors, best_location}
defp add_neighbors(asteroid, [] = _belt), do: asteroid
defp add_neighbors({row, col, neighbors}, [{row, col, _neighbors} | tail]) do
add_neighbors({row, col, neighbors}, tail)
end
defp add_neighbors({row, col, neighbors}, [{other_row, col, _neighbors} | tail])
when row > other_row do
neighbors = add_neighbor(neighbors, other_row, col, 0, row - other_row)
add_neighbors({row, col, neighbors}, tail)
end
defp add_neighbors({row, col, neighbors}, [{other_row, col, _neighbors} | tail]) do
neighbors = add_neighbor(neighbors, other_row, col, 180, other_row - row)
add_neighbors({row, col, neighbors}, tail)
end
defp add_neighbors({row, col, neighbors}, [{other_row, other_col, _neighbors} | tail])
when col <= other_col do
angle = angle(row, col, other_row, other_col)
dist = dist(row, col, other_row, other_col)
neighbors = add_neighbor(neighbors, other_row, other_col, 90 - angle, dist)
add_neighbors({row, col, neighbors}, tail)
end
defp add_neighbors({row, col, neighbors}, [{other_row, other_col, _neighbors} | tail]) do
angle = angle(row, col, other_row, other_col)
dist = dist(row, col, other_row, other_col)
neighbors = add_neighbor(neighbors, other_row, other_col, 270 - angle, dist)
add_neighbors({row, col, neighbors}, tail)
end
defp dist(row, col, other_row, other_col) do
(:math.pow(row - other_row, 2) + :math.pow(col - other_col, 2))
|> :math.sqrt()
end
defp add_neighbor(neighbors, new_row, new_col, angle, new_dist)
when not is_map_key(neighbors, angle),
do: Map.put(neighbors, angle, {new_row, new_col, new_dist})
defp add_neighbor(neighbors, new_row, new_col, angle, new_dist) do
{_row, _col, dist} = neighbors[angle]
add_neighbor(neighbors, new_row, new_col, angle, new_dist, dist)
end
defp add_neighbor(neighbors, new_row, new_col, angle, new_dist, dist)
when new_dist < dist,
do: Map.put(neighbors, angle, {new_row, new_col, new_dist})
defp add_neighbor(neighbors, _new_row, _new_col, _angle, _new_dist, _dist),
do: neighbors
defp vaporize_from(belt, row, col, total) do
{row, col, targets} = belt["#{row}:#{col}"]
vaporize_from([], belt, targets, row, col, total)
end
defp vaporize_from([], belt, targets, _row, _col, _total) when targets == %{},
do: {belt, {nil, nil}}
defp vaporize_from([], belt, targets, row, col, total) do
Enum.sort(targets)
|> vaporize_from(belt, targets, row, col, total)
end
defp vaporize_from(
[{_angle, {target_row, target_col, _dist}} | _tail],
belt,
_targets,
_row,
_col,
total
)
when total < 2 do
{_target, belt} = Map.pop!(belt, "#{target_row}:#{target_col}")
{belt, {target_row, target_col}}
end
defp vaporize_from(
[{angle, {target_row, target_col, _dist}} | tail],
belt,
targets,
row,
col,
total
) do
{belt, targets} = vaporize(belt, target_row, target_col, targets, angle)
vaporize_from(tail, belt, targets, row, col, total - 1)
end
defp vaporize(belt, row, col, targets, angle) do
{{_row, _col, neighbors}, belt} = Map.pop!(belt, "#{row}:#{col}")
case neighbors[angle] do
nil ->
{belt, Map.delete(targets, angle)}
next_target ->
{belt, Map.replace!(targets, angle, next_target)}
end
end
end
|
lib/advent_of_code_2019/day10.ex
| 0.872347 | 0.459258 |
day10.ex
|
starcoder
|
defmodule LXD.Container do
alias LXD.Client
alias LXD.Utils
defp url(container_name \\ "", opts \\ []) do
exec = if(opts[:exec], do: "/exec", else: "")
state = if(opts[:state], do: "/state", else: "")
metadata = if(opts[:metadata], do: "/metadata", else: "")
["/containers", container_name, exec, state, metadata]
|> Path.join
end
@doc """
List all containers
"""
def all(opts \\ []) do
url()
|> Client.get(opts)
|> case do
{:ok, containers} ->
{:ok, containers |> Enum.map(&Path.basename/1)}
other ->
other
end
end
@doc """
Create a new container
`configs` is a map that define how to create the container
See official documention for more details [here](https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
"""
def create(configs, opts \\ []) do
url()
|> Client.post(configs, opts)
end
@doc """
Container information
"""
def info(container_name, opts \\ []) do
url(container_name)
|> Client.get(opts)
end
@doc """
Replace container configuration
"""
def replace(container_name, configs, opts \\ []) do
url(container_name)
|> Client.put(configs, opts)
end
@doc """
Update container configuration
"""
def update(container_name, configs, opts \\ []) do
url(container_name)
|> Client.patch(configs, opts)
end
@doc """
Rename the container
"""
def rename(container_name, new_name, opts \\ []) do
url(container_name)
|> Client.post(%{"name" => new_name}, opts)
end
@doc """
Remove the container
"""
def remove(container_name, opts \\ []) do
url(container_name)
|> Client.delete(opts)
end
@doc """
Run a remote command
- `command`: must be a list splitted by space (see official documentation)
- `envs`: map of environement variables to set
Record output is set to true
Return `{:ok, return_code, stdout, stderr}` if success
Return `{:error, reason}` if error
If stdout or stderr can't be read, `:error` is set instead
Does not support websocket
See official documention for more details [here](https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersnameexec)
"""
def exec(container_name, command, envs \\ %{}, opts \\ []) do
configs = %{
"command" => command,
"environment" => envs,
"record-output" => true
}
url(container_name, exec: true)
|> Client.post(configs, opts)
|> case do
{:ok, %{"output" => %{"1" => stdout, "2" => stderr}, "return" => return}} ->
get_result = fn log_name ->
case LXD.Container.Log.get(container_name, log_name |> Path.basename) do
{:ok, body} when is_binary(body) ->
body
_ ->
:error
end
end
{:ok, return, get_result.(stdout), get_result.(stderr)}
{:ok, _body} ->
{:error, "Cannot find output logs"}
{:error, reason} ->
{:error, reason}
end
end
@doc """
State of the container
"""
def state(container_name, opts \\ []) do
url(container_name, state: true)
|> Client.get(opts)
end
@doc """
Status of the container as a string
"""
def status(name) do
case state(name) do
{:ok, %{"status" => value}} ->
{:ok, value}
other ->
other
end
end
@doc """
Stop the container
"""
def stop(container_name, opts \\ []) do
state_change("stop", container_name, opts)
end
def start(container_name, opts \\ []) do
state_change("start", container_name, opts)
end
def restart(container_name, opts \\ []) do
state_change("restart", container_name, opts)
end
def freeze(container_name, opts \\ []) do
state_change("freeze", container_name, opts)
end
def unfreeze(container_name, opts \\ []) do
state_change("unfreeze", container_name, opts)
end
defp state_change(action, container_name, opts) do
force = Utils.arg(opts, :force, false)
timeout = Utils.arg(opts, :timeout, 0)
stateful = Utils.arg(opts, :stateful, false)
input = %{
"action" => action,
"timeout" => timeout,
"force" => force,
"stateful" => stateful
}
url(container_name, state: true)
|> Client.put(input, opts)
|> case do
{:ok, %{"status_code" => 200}} ->
:ok
{:ok, %{"err" => error}} ->
{:error, error}
other ->
other
end
end
def metadata(container_name, opts \\ []) do
url(container_name, metadata: true)
|> Client.get(opts)
end
end
defmodule LXD.Container.Template do
alias LXD.Client
defp url(container_name) do
["/containers", container_name, "/metadata/templates"]
|> Path.join
end
def all(container_name, opts \\ []) do
url(container_name)
|> Client.get(opts)
end
def get(container_name, template_name, opts \\ []) do
url(container_name)
|> Client.get(opts, [], params: [{"path", template_name}])
end
end
defmodule LXD.Container.Log do
alias LXD.Client
defp url(container_name, log_name \\ []) do
["/containers", container_name, "/logs", log_name]
|> Path.join
end
def all(container_name, opts \\ []) do
url(container_name)
|> Client.get(opts)
|> case do
{:ok, data} ->
{:ok, data |> Enum.map(&Path.basename/1)}
others ->
others
end
end
def get(container_name, log_name, opts \\ []) do
url(container_name, log_name)
|> Client.get(opts)
end
end
defmodule LXD.Container.File do
alias LXD.Utils
alias LXD.Client
defp url(container_name) do
["/containers", container_name, "/files"]
|> Path.join
end
def get(container_name, path_in_container, opts \\ []) do
url(container_name)
|> Client.get(opts, [], params: [{"path", path_in_container}])
end
def get_and_write(container_name, path_in_container, target_path, opts \\ []) do
url(container_name)
|> Client.get(opts, [], params: [{"path", path_in_container}])
|> case do
{:ok, ls} when is_list(ls) ->
case File.mkdir(target_path) do
:ok ->
ls |> Enum.each(fn one ->
get_and_write(
container_name,
[path_in_container, one] |> Path.join,
[target_path, one] |> Path.join,
opts
)
end)
{:error, posix} ->
{:error, "Cannot create directory #{target_path} (reason: #{:file.format_error(posix)})"}
end
{:ok, file_content} when is_binary(file_content) ->
case File.write(target_path, file_content) do
:ok ->
:ok
{:error, posix} ->
{:error, "Cannot write in file #{target_path} (reason: #{:file.format_error(posix)})"}
end
other ->
other
end
end
defp put(container_name, path_in_container, type, content, opts) do
append = Utils.arg(opts, :append, false)
headers = [{"x-lxd-type", type}]
headers = [{"x-lxd-write", if(append, do: "append", else: "overwrite")} | headers ]
url(container_name)
|> Client.post(content, opts, headers, params: [{"path", path_in_container}])
|> case do
{:ok, _} ->
:ok
others ->
others
end
end
def put_file(container_name, path_in_container, content, opts \\ []) do
put(container_name, path_in_container, "file", content, opts)
end
def create_dir(container_name, path_in_container, opts \\ []) do
put(container_name, path_in_container, "directory", "", opts)
end
def read_and_put(container_name, path_file, path_in_container, opts \\ []) do
case File.exists?(path_file) do
true ->
case File.dir?(path_file) do
true ->
create_dir(container_name, path_in_container, opts)
case File.ls(path_file) do
{:ok, ls} ->
ls |> Enum.reduce(:ok, fn(file, acc) ->
case acc do
:ok ->
read_and_put(
container_name,
[path_file, file] |> Path.join,
[path_in_container, file] |> Path.join,
opts
)
other ->
other
end
end)
{:error, reason} ->
{:error, "Failed to list files in #{path_file} (reason: #{reason})"}
end
false ->
case File.read(path_file) do
{:ok, binary} ->
put_file(container_name, path_in_container, binary)
{:error, posix} ->
{:error, "Failed to read file #{path_file} (#{:file.format_error(posix)})"}
end
end
false ->
{:error, "File #{path_file} doesn't exist"}
end
end
def remove(container_name, path_in_container, opts \\ []) do
url(container_name)
|> Client.delete(opts, [], params: [{"path", path_in_container}])
end
end
defmodule LXD.Container.Snapshot do
alias LXD.Client
defp url(container_name, snapshot_name \\ "") do
["/containers/", container_name, "/snapshots", snapshot_name]
|> Path.join
end
def all(container_name, opts \\ []) do
url(container_name)
|> Client.get(opts)
end
def create(container_name, snapshot_name, stateful \\ true, opts \\ []) do
input = %{
"name" => snapshot_name,
"stateful" => stateful
}
url(container_name)
|> Client.post(input, opts)
end
def get(container_name, snapshot_name, opts \\ []) do
url(container_name, snapshot_name)
|> Client.get(opts)
end
def rename(container_name, snapshot_name, new_name, opts \\ []) do
url(container_name, snapshot_name)
|> Client.post(%{ "name" => new_name }, opts)
end
def remove(container_name, snapshot_name, opts \\ []) do
url(container_name, snapshot_name)
|> Client.delete(opts)
end
end
|
lib/endpoints/container.ex
| 0.683208 | 0.439747 |
container.ex
|
starcoder
|
defmodule Cryppo.DerivedKey do
@moduledoc """
A struct for a derived encryption key and its derivation artefacts
A `Cryppo.EncryptedData` struct may be marked as belonging to a certain key derivation strategy
using field `key_derivation_strategy` containing the module of the key derivation.
A `Cryppo.DerivedKey` comes in 2 flavors:
* With a derived encryption key. When used for encryption or decryption this key will be used
* Without an encryption key. Encrypting or decrypting with this struct requires a passphrase to derive the key
"""
alias Cryppo.{DerivedKey, EncryptionKey, Serialization}
@typedoc """
Struct `Cryppo.DerivedKey`
A `Cryppo.DerivedKey` struct contains
* `encryption_key` - `nil` or a `Cryppo.EncryptionKey`
* `key_derivation_strategy` - module of the key derivation strategy
* `salt` - salt used for key derivation
* `iter` - number of iterations for key derivation
* `length` - key length
* `hash` - hash function for key derivation
"""
@type t :: %__MODULE__{
encryption_key: EncryptionKey.t() | nil,
key_derivation_strategy: Cryppo.encryption_strategy_module(),
salt: binary,
iter: integer,
length: integer,
hash: String.t()
}
@enforce_keys [:key_derivation_strategy, :salt, :iter, :length, :hash]
defstruct [:encryption_key, :key_derivation_strategy, :salt, :iter, :length, :hash]
# 75 is the version byte for derivation artefacts encoded with BSON
@current_version "K"
@spec current_version :: <<_::8>>
def current_version, do: @current_version
def load_artefacts(<<@current_version::binary, bin::binary>>) do
with {:ok, %{"iv" => {0x0, iv}, "i" => i, "l" => l}} <- Cyanide.decode(bin) do
%{"iv" => iv, "i" => i, "l" => l} |> parse_derivation_artefacts()
end
end
def load_artefacts(_), do: {:error, :invalid_derivation_artefacts}
@spec parse_derivation_artefacts(any) ::
{:error, :invalid_derivation_artefacts} | {:ok, binary, integer, integer}
defp parse_derivation_artefacts(%{"iv" => iv, "i" => i, "l" => l}), do: {:ok, iv, i, l}
defp parse_derivation_artefacts(_), do: {:error, :invalid_derivation_artefacts}
defimpl Serialization do
@spec serialize(DerivedKey.t()) :: String.t() | {:error, :cannot_bson_encode}
def serialize(%DerivedKey{
key_derivation_strategy: key_derivation_mod,
salt: salt,
iter: iterations,
length: length
}) do
key_derivation_mod = apply(key_derivation_mod, :strategy_name, [])
with {:ok, bytes} <- serialize_for_version(salt, iterations, length) do
derivation_artefacts = Base.url_encode64(bytes, padding: true)
[key_derivation_mod, derivation_artefacts] |> Enum.join(".")
end
end
def serialize_for_version(salt, iterations, length) do
# 0x0 is a marker for generic binary subtype in BSON
# see http://bsonspec.org/spec.html
with_wrapped_binaries = %{"iv" => {0x0, salt}, "i" => iterations, "l" => length}
with {:ok, bin} <- Cyanide.encode(with_wrapped_binaries) do
with_version_prefix = <<DerivedKey.current_version()::binary, bin::binary>>
{:ok, with_version_prefix}
end
end
end
end
|
lib/cryppo/derived_key.ex
| 0.895578 | 0.587529 |
derived_key.ex
|
starcoder
|
defmodule VintageNet.Connectivity.CheckLogic do
@moduledoc """
Core logic for determining internet connectivity based on check results
This module is meant to be used by `InternetChecker` and others for
determining when to run checks and how many failures should change the
network interface's state.
It implements a state machine that figures out what the connectivity status
is based on internet-connectivity check successes and fails. It also returns
how long to wait between checks.
```mermaid
stateDiagram-v2
direction LR
[*]-->internet : init
state connected {
internet-->lan : max failures
lan-->internet : check succeeded
}
connected-->disconnected : ifdown
disconnected-->lan : ifup
```
"""
@min_interval 500
@max_interval 30_000
@max_fails_in_a_row 3
@type state() :: %{
connectivity: VintageNet.connection_status(),
strikes: non_neg_integer(),
interval: non_neg_integer() | :infinity
}
@doc """
Initialize check state machine
Pass in the assumed connection status. This is a best guess to start things out.
"""
@spec init(VintageNet.connection_status()) :: state()
def init(:internet) do
# Best case, but check quickly to verify that the internet truly is reachable.
%{connectivity: :internet, strikes: 0, interval: @min_interval}
end
def init(:lan) do
%{connectivity: :lan, strikes: @max_fails_in_a_row, interval: @min_interval}
end
def init(other) when other in [:disconnected, nil] do
%{connectivity: :disconnected, strikes: @max_fails_in_a_row, interval: :infinity}
end
@doc """
Call this when the interface comes up
It is assumed that the interface has LAN connectivity now and a check will
be scheduled to happen shortly.
"""
@spec ifup(state()) :: state()
def ifup(%{connectivity: :disconnected} = state) do
# Physical layer is up. Optimistically assume that the LAN is accessible and
# start polling again after a short delay
%{state | connectivity: :lan, interval: @min_interval}
end
def ifup(state), do: state
@doc """
Call this when the interface goes down
The interface will be categorized as `:disconnected` until `ifup/1` gets
called again.
"""
@spec ifdown(state()) :: state()
def ifdown(state) do
# Physical layer is down. Don't poll for connectivity since it won't happen.
%{state | connectivity: :disconnected, interval: :infinity}
end
@doc """
Call this when an Internet connectivity check succeeds
"""
@spec check_succeeded(state()) :: state()
def check_succeeded(%{connectivity: :disconnected} = state), do: state
def check_succeeded(state) do
# Success - reset the number of strikes to stay in Internet mode
# even if there are hiccups.
%{state | connectivity: :internet, strikes: 0, interval: @max_interval}
end
@doc """
Call this when an Internet connectivity check fails
Depending on how many failures have happened it a row, the connectivity may
be degraded to `:lan`.
"""
@spec check_failed(state()) :: state()
def check_failed(%{connectivity: :internet} = state) do
# There's no discernment between types of failures. Everything means
# that the internet is not available and every failure could be a hiccup.
# NOTE: only `ifdown/1` can transition to the `:disconnected` state since only
# `ifup/1` can exit the `:disconnected` state.
strikes = state.strikes + 1
if strikes < @max_fails_in_a_row do
# If a check fails, retry, but don't wait as long as when everything was working
%{state | strikes: strikes, interval: max(@min_interval, div(@max_interval, strikes + 1))}
else
%{state | connectivity: :lan, strikes: @max_fails_in_a_row}
end
end
def check_failed(%{connectivity: :lan} = state) do
# Back off of checks since they're not working
%{state | interval: min(state.interval * 2, @max_interval)}
end
def check_failed(state), do: state
end
|
lib/vintage_net/connectivity/check_logic.ex
| 0.831074 | 0.866923 |
check_logic.ex
|
starcoder
|
defmodule Commanded.Event.Mapper do
@moduledoc """
Map events to/from the structs used for persistence.
## Example
Map domain event structs to `Commanded.EventStore.EventData` structs in
preparation for appending to the configured event store:
events = [%ExampleEvent1{}, %ExampleEvent2{}]
event_data = Commanded.Event.Mapper.map_to_event_data(events)
:ok = Commanded.EventStore.append_to_stream("stream-1234", :any_version, event_data)
"""
alias Commanded.EventStore.TypeProvider
alias Commanded.EventStore.{EventData, RecordedEvent}
@type event :: struct
@doc """
Map a domain event (or list of events) to an
`Commanded.EventStore.EventData` struct (or list of structs).
Optionally, include the `causation_id`, `correlation_id`, and `metadata`
associated with the event(s).
## Examples
event_data = Commanded.Event.Mapper.map_to_event_data(%ExampleEvent{})
event_data =
Commanded.Event.Mapper.map_to_event_data(
[
%ExampleEvent1{},
%ExampleEvent2{}
],
causation_id: UUID.uuid4(),
correlation_id: UUID.uuid4(),
metadata: %{"user_id" => user_id}
)
"""
def map_to_event_data(events, fields \\ [])
@spec map_to_event_data(list(event), Keyword.t()) :: list(EventData.t())
def map_to_event_data(events, fields) when is_list(events) do
Enum.map(events, &map_to_event_data(&1, fields))
end
@spec map_to_event_data(struct, Keyword.t()) :: EventData.t()
def map_to_event_data(event, fields) do
%EventData{
causation_id: Keyword.get(fields, :causation_id),
correlation_id: Keyword.get(fields, :correlation_id),
event_type: TypeProvider.to_string(event),
data: event,
metadata: Keyword.get(fields, :metadata, %{})
}
end
@doc """
Map a list of `Commanded.EventStore.RecordedEvent` structs to their event data.
"""
@spec map_from_recorded_events(list(RecordedEvent.t())) :: [event]
def map_from_recorded_events(recorded_events) when is_list(recorded_events) do
Enum.map(recorded_events, &map_from_recorded_event/1)
end
@doc """
Map an `Commanded.EventStore.RecordedEvent` struct to its event data.
"""
@spec map_from_recorded_event(RecordedEvent.t()) :: event
def map_from_recorded_event(%RecordedEvent{data: data}), do: data
end
|
lib/commanded/event/mapper.ex
| 0.874212 | 0.513668 |
mapper.ex
|
starcoder
|
defmodule Rheostat.Adapter do
@type key :: iodata
@type options :: [sample_rate: float, tags: [String.t()]]
@type on_send :: :ok | {:error, term}
## Metrix interface
@callback count(map(), String.t(), number()) :: any()
@callback sample(map(), String.t(), any()) :: any()
@callback measure(map(), String.t(), any()) :: any()
@callback measure(String.t(), list(), any()) :: any()
## StatsD interface
@doc """
Opens the connection to the StatsD-compatible server.
The configuration is read from the configuration for the `:statix` application
(both globally and per connection).
"""
@callback connect(list()) :: :ok
@doc """
Increments the StatsD counter identified by `key` by the given `value`.
`value` is supposed to be zero or positive and `c:decrement/3` should be
used for negative values.
## Examples
iex> MyApp.Statix.increment("hits", 1, [])
:ok
"""
@callback increment(key, value :: number, options) :: on_send
@doc """
Same as `increment(key, 1, [])`.
"""
@callback increment(key) :: on_send
@doc """
Same as `increment(key, value, [])`.
"""
@callback increment(key, value :: number) :: on_send
@doc """
Decrements the StatsD counter identified by `key` by the given `value`.
Works same as `c:increment/3` but subtracts `value` instead of adding it. For
this reason `value` should be zero or negative.
## Examples
iex> MyApp.Statix.decrement("open_connections", 1, [])
:ok
"""
@callback decrement(key, value :: number, options) :: on_send
@doc """
Same as `decrement(key, 1, [])`.
"""
@callback decrement(key) :: on_send
@doc """
Same as `decrement(key, value, [])`.
"""
@callback decrement(key, value :: number) :: on_send
@doc """
Writes to the StatsD gauge identified by `key`.
## Examples
iex> MyApp.Statix.gauge("cpu_usage", 0.83, [])
:ok
"""
@callback gauge(key, value :: String.Chars.t(), options) :: on_send
@doc """
Same as `gauge(key, value, [])`.
"""
@callback gauge(key, value :: String.Chars.t()) :: on_send
@doc """
Writes `value` to the histogram identified by `key`.
Not all StatsD-compatible servers support histograms. An example of a such
server [statsite](https://github.com/statsite/statsite).
## Examples
iex> MyApp.Statix.histogram("online_users", 123, [])
:ok
"""
@callback histogram(key, value :: String.Chars.t(), options) :: on_send
@doc """
Same as `histogram(key, value, [])`.
"""
@callback histogram(key, value :: String.Chars.t()) :: on_send
@doc """
Writes the given `value` to the StatsD timing identified by `key`.
`value` is expected in milliseconds.
## Examples
iex> MyApp.Statix.timing("rendering", 12, [])
:ok
"""
@callback timing(key, value :: String.Chars.t(), options) :: on_send
@doc """
Same as `timing(key, value, [])`.
"""
@callback timing(key, value :: String.Chars.t()) :: on_send
@doc """
Writes the given `value` to the StatsD set identified by `key`.
## Examples
iex> MyApp.Statix.set("unique_visitors", "user1", [])
:ok
"""
@callback set(key, value :: String.Chars.t(), options) :: on_send
@doc """
Same as `set(key, value, [])`.
"""
@callback set(key, value :: String.Chars.t()) :: on_send
end
|
lib/adapter/adapter.ex
| 0.892995 | 0.507995 |
adapter.ex
|
starcoder
|
defmodule Vantagex.Cryptocurrencies do
@moduledoc """
Contains functions related to the Cryptocurrencies functions from Alpha Vantage
"""
import Vantagex.Helper
alias Vantagex.Forex
@module_id "DIGITAL_CURRENCY"
@doc """
Uses Alpha Vantage's CURRENCY_EXCHANGE_RATE function.
Added here for simplicity, but performs a call to `Forex.exchange_rate`
Args:
* `from_currency` - The currency to get the exchange rate for. e.g. "BTC"
* `to_currency` - The destination currency for the exchange rate. e.g. "USD"
* `opts` - A list of extra options to pass to the function.
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
@spec exchange_rate(String.t(), String.t(), Keyword.t()) :: Map.t() | String.t()
def exchange_rate(from_currency, to_currency, opts) do
Forex.exchange_rate(from_currency, to_currency, opts)
end
@doc """
Uses Alpha Vantage's DIGITAL_CURRENCY_DAILY function.
Returns the daily time series for a digital currency traded on a specific market
Args:
* `symbol` - the digital/crypto currency of your choice. any of the currencies in the [digital currency list]("https://www.alphavantage.co/digital_currency_list/")
* `market` - the exchange market of your choice. any from the [market list]("https://www.alphavantage.co/physical_currency_list/")
* `opts` - A list of extra options to pass to the function.
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
## Examples:
iex> Vantagex.Cryptocurrencies.daily("BTC", "USD")
iex(1)> Vantagex.Cryptocurrencies.daily("BTC", "USD")
%{
"Meta Data" => %{
"1. Information" => "Daily Prices and Volumes for Digital Currency",
"2. Digital Currency Code" => "BTC",
"3. Digital Currency Name" => "Bitcoin",
"4. Market Code" => "USD",
"5. Market Name" => "United States Dollar",
"6. Last Refreshed" => "2019-03-09 (end of day)",
"7. Time Zone" => "UTC"
},
"Time Series (Digital Currency Daily)" => %{
"2017-07-13" => %{
"1a. open (USD)" => "2397.70831714",
"1b. open (USD)" => "2397.70831714",
"2a. high (USD)" => "2429.55116636",
"2b. high (USD)" => "2429.55116636",
"3a. low (USD)" => "2329.24694466",
"3b. low (USD)" => "2329.24694466",
"4a. close (USD)" => "2353.72968273",
"4b. close (USD)" => "2353.72968273",
"5. volume" => "73837.90295505",
"6. market cap (USD)" => "173794463.89599040"
},
"2018-11-12" => %{
"1a. open (USD)" => "6404.47988049",
"1b. open (USD)" => "6404.47988049",
"2a. high (USD)" => "6435.95061677",
"2b. high (USD)" => "6435.95061677",
"3a. low (USD)" => "6359.81993277",
"3b. low (USD)" => "6359.81993277",
"4a. close (USD)" => "6375.86047086",
"4b. close (USD)" => "6375.86047086",
"5. volume" => "57756.07950395",
"6. market cap (USD)" => "368244704.26095134"
},
...
}
}
"""
@spec daily(String.t(), String.t(), Keyword.t()) :: String.t() | Map.t()
def daily(symbol, market, opts \\ []) do
params = %{
symbol: symbol,
market: market,
datatype: Keyword.get(opts, :datatype)
} |> clean_params()
resolve_request(:daily, params, @module_id)
end
@doc """
Uses Alpha Vantage's DIGITAL_CURRENCY_WEEKLY function.
Returns the daily time series for a digital currency traded on a specific market
Args:
* `symbol` - the digital/crypto currency of your choice. any of the currencies in the [digital currency list]("https://www.alphavantage.co/digital_currency_list/")
* `market` - the exchange market of your choice. any from the [market list]("https://www.alphavantage.co/physical_currency_list/")
* `opts` - A list of extra options to pass to the function.
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
## Examples
iex> Vantagex.Cryptocurrencies.weekly("BTC", "USD")
%{
"Meta Data" => %{
"1. Information" => "Weekly Prices and Volumes for Digital Currency",
"2. Digital Currency Code" => "BTC",
"3. Digital Currency Name" => "Bitcoin",
"4. Market Code" => "USD",
"5. Market Name" => "United States Dollar",
"6. Last Refreshed" => "2019-03-09 (end of day)",
"7. Time Zone" => "UTC"
},
"Time Series (Digital Currency Weekly)" => %{
"2018-09-02" => %{
"1a. open (USD)" => "6727.99365712",
"1b. open (USD)" => "6727.99365712",
"2a. high (USD)" => "7314.28946177",
"2b. high (USD)" => "7314.28946177",
"3a. low (USD)" => "6656.81489204",
"3b. low (USD)" => "6656.81489204",
"4a. close (USD)" => "7277.19919306",
"4b. close (USD)" => "7277.19919306",
"5. volume" => "522049.82045400",
"6. market cap (USD)" => "3694710857.50000000"
},
"2016-09-18" => %{
"1a. open (USD)" => "607.40733392",
"1b. open (USD)" => "607.40733392",
"2a. high (USD)" => "612.26786946",
"2b. high (USD)" => "612.26786946",
"3a. low (USD)" => "605.42161452",
"3b. low (USD)" => "605.42161452",
"4a. close (USD)" => "610.52319377",
"4b. close (USD)" => "610.52319377",
"5. volume" => "224699.68643400",
"6. market cap (USD)" => "136768377.19800001"
},
...
}
}
"""
@spec weekly(String.t(), String.t(), Keyword.t()) :: String.t() | Map.t()
def weekly(symbol, market, opts \\ []) do
params = %{
symbol: symbol,
market: market,
datatype: Keyword.get(opts, :datatype)
} |> clean_params()
resolve_request(:weekly, params, @module_id)
end
@doc """
Uses Alpha Vantage's DIGITAL_CURRENCY_MONTHLY function.
Returns the daily time series for a digital currency traded on a specific market
Args:
* `symbol` - the digital/crypto currency of your choice. any of the currencies in the [digital currency list]("https://www.alphavantage.co/digital_currency_list/")
* `market` - the exchange market of your choice. any from the [market list]("https://www.alphavantage.co/physical_currency_list/")
* `opts` - A list of extra options to pass to the function.
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
## Examples
iex> Vantagex.Cryptocurrencies.monthly("BTC", "USD")
%{
"Meta Data" => %{
"1. Information" => "Monthly Prices and Volumes for Digital Currency",
"2. Digital Currency Code" => "BTC",
"3. Digital Currency Name" => "Bitcoin",
"4. Market Code" => "USD",
"5. Market Name" => "United States Dollar",
"6. Last Refreshed" => "2019-03-09 (end of day)",
"7. Time Zone" => "UTC"
},
"Time Series (Digital Currency Monthly)" => %{
"2017-06-30" => %{
"1a. open (USD)" => "2281.68026573",
"1b. open (USD)" => "2281.68026573",
"2a. high (USD)" => "2975.72639337",
"2b. high (USD)" => "2975.72639337",
"3a. low (USD)" => "2134.66195666",
"3b. low (USD)" => "2134.66195666",
"4a. close (USD)" => "2468.29342943",
"4b. close (USD)" => "2468.29342943",
"5. volume" => "3101284.39484000",
"6. market cap (USD)" => "8096761491.46000004"
},
"2018-11-30" => %{
"1a. open (USD)" => "6341.38852605",
"1b. open (USD)" => "6341.38852605",
"2a. high (USD)" => "6555.60352543",
"2b. high (USD)" => "6555.60352543",
"3a. low (USD)" => "3559.57432974",
"3b. low (USD)" => "3559.57432974",
"4a. close (USD)" => "4012.09235790",
"4b. close (USD)" => "4012.09235790",
"5. volume" => "3739720.46008000",
"6. market cap (USD)" => "18164674672.59999847"
},
...
}
}
"""
@spec monthly(String.t(), String.t(), Keyword.t()) :: String.t() | Map.t()
def monthly(symbol, market, opts \\ []) do
params = %{
symbol: symbol,
market: market,
datatype: Keyword.get(opts, :datatype)
} |> clean_params()
resolve_request(:monthly, params, @module_id)
end
end
|
lib/vantagex/cryptocurrencies.ex
| 0.895893 | 0.673863 |
cryptocurrencies.ex
|
starcoder
|
defmodule AdventOfCode.Solutions.Day11 do
@moduledoc """
Solution for day 11 exercise.
### Exercise
https://adventofcode.com/2021/day/11
"""
require Logger
def count_flashes(filename, steps) do
input =
filename
|> File.read!()
|> parse_input()
{num_flashes, _final_status} = calculate_flashes(input, steps)
IO.puts("Number of flashes after #{steps}: #{num_flashes}")
end
def find_sync_step(filename) do
input =
filename
|> File.read!()
|> parse_input()
result = do_find_sync_step(input)
IO.puts("Flashes will be synchronized at step: #{result}")
end
defp parse_input(file_content) do
matrix =
file_content
|> String.replace("\r\n", "\n")
|> String.split("\n", trim: true)
|> Enum.map(fn row ->
row
|> String.graphemes()
|> Enum.map(&String.to_integer/1)
end)
num_rows = length(matrix)
Enum.reduce(0..(num_rows - 1), %{}, fn y, acc ->
row = Enum.at(matrix, y)
num_columns = length(row)
Enum.reduce(0..(num_columns - 1), acc, fn x, row_acc ->
energy = Enum.at(row, x)
Map.put(row_acc, {x, y}, energy)
end)
end)
end
defp calculate_flashes(input, steps) do
Enum.reduce(1..steps, {0, input}, fn _step, {num_flashes, acc_input} ->
updated_input = process_step(acc_input)
step_flashes = Enum.count(updated_input, fn {{_x, _y}, energy} -> energy == 0 end)
{num_flashes + step_flashes, updated_input}
end)
end
defp do_find_sync_step(input) do
updated_input = process_step(input)
if Enum.all?(updated_input, fn {{_x, _y}, energy} -> energy == 0 end) do
1
else
1 + do_find_sync_step(updated_input)
end
end
defp process_step(input) do
coords = Map.keys(input)
coords
|> Enum.reduce(input, fn {x, y}, acc ->
increase_energy(acc, {x, y})
end)
|> Enum.map(fn
{{x, y}, energy} when energy > 9 -> {{x, y}, 0}
{{x, y}, energy} -> {{x, y}, energy}
end)
|> Enum.into(%{})
end
defp increase_energy(input, {x, y}) do
value = Map.get(input, {x, y})
if is_integer(value) do
new_value = value + 1
updated_input = Map.put(input, {x, y}, new_value)
if new_value == 10 do
get_surroundings({x, y})
|> Enum.reduce(updated_input, &increase_energy(&2, &1))
else
updated_input
end
else
input
end
end
defp get_surroundings({x, y}) do
[
{x + 1, y},
{x - 1, y},
{x, y + 1},
{x, y - 1},
{x + 1, y + 1},
{x + 1, y - 1},
{x - 1, y + 1},
{x - 1, y - 1}
]
end
end
|
lib/advent_of_code/solutions/day11.ex
| 0.644337 | 0.510313 |
day11.ex
|
starcoder
|
alias Graphqexl.Schema
alias Graphqexl.Schema.{
Field,
Interface,
Ref,
TEnum,
Type,
Union,
}
alias Graphqexl.Tokens
defmodule Graphqexl.Schema.Ref do
@moduledoc """
Struct holding a reference to a custom `t:Graphqexl.Schema.Ref.component/0`. Used by while parsing
a `t:Graphqexl.Schema.t/0` because it my refer to a component that has not yet been defined in the
schema (but will later). A validator (TODO: link to the appropriate validator module) will check
that all `t:Graphqexl.Schema.Ref`s resolve during the validation step.
"""
@moduledoc since: "0.1.0"
@enforce_keys [:type]
defstruct [:type]
@type built_in:: :Boolean | :Float | :Id | :Integer | :String
@type component:: built_in | Interface.t | TEnum.t | Type.t | Union.t
@type t:: %Graphqexl.Schema.Ref{type: atom}
@doc """
Returns a list of the `t:Graphqexl.Schema.Field.t/0`s available on the type resolved to by the
given `t:Graphqexl.Schema.Ref.t/0`.
Returns: `[t:Graphqexl.Schema.Field.t/0]`
"""
@doc since: "0.1.0"
def fields(%Ref{type: :Boolean}, _), do: []
def fields(%Ref{type: :Float}, _), do: []
def fields(%Ref{type: :Id}, _), do: []
def fields(%Ref{type: :Integer}, _), do: []
def fields(%Ref{type: :String}, _), do: []
@spec fields(t, Schema.t) :: list(Field.t)
# TODO: fully implement
def fields(ref, schema) do
resolved = ref |> Ref.resolve(schema)
case resolved do
%Interface{} -> resolved |> Interface.fields(schema)
%Type{} -> resolved |> Type.fields(schema)
%Union{} -> resolved |> Union.fields(schema)
_ -> []
end
end
@doc """
Resolves the given `t:Graphqexl.Schema.Ref.t/0` into its corresponding
`t:Graphqexl.Schema.Ref.component/0` according to the given `t:Graphqexl.Schema.t/0`.
Returns: `t:Graphqexl.Schema.Ref.component/0`
"""
@doc since: "0.1.0"
@spec resolve(t, Schema.t) :: component
def resolve(%Ref{type: :Boolean}, _), do: :Boolean
def resolve(%Ref{type: :Float}, _), do: :Float
def resolve(%Ref{type: :Id}, _), do: :Id
def resolve(%Ref{type: :Integer}, _), do: :Integer
def resolve(%Ref{type: :String}, _), do: :String
def resolve(ref, schema) when is_list(ref), do: ref |> List.first |> resolve(schema)
def resolve(ref, schema) do
object_type = cond do
schema.enums |> Map.keys |> Enum.member?(ref.type) -> :enums
schema.interfaces |> Map.keys |> Enum.member?(ref.type) -> :interfaces
schema.types |> Map.keys |> Enum.member?(ref.type) -> :types
schema.unions |> Map.keys |> Enum.member?(ref.type) -> :unions
end
schema |> Map.get(object_type) |> Map.get(ref.type)
end
@doc """
Checks whether the given `t:Graphqexl.Schema.Ref.t/0` resolves to a scalar type.
Returns: `t:boolean/0`
"""
@doc since: "0.1.0"
@spec scalar?(t, Schema.t):: boolean
# TODO: (across the board in scalar checking) handle List types
# _possibly_ this is where using Elixir's built-in List breaks down, and instead
# we want a `TList` module, which should definitely implement the Enumerable protocol.
def scalar?(nil, _), do: false
def scalar?(ref, schema) do
ref
|> resolve(schema)
|> (&(
case &1 do
%Interface{} -> false
%TEnum{} -> true
%Type{} -> &1.implements |> scalar?(schema)
%Union{} ->
[&1.type1, &1.type2] |> Enum.any?(fn (type) -> type |> scalar?(schema) end)
_ -> :types |> Tokens.get |> Map.get(:scalar) |> Enum.member?(&1)
end
)).()
end
end
|
lib/graphqexl/schema/ref.ex
| 0.657318 | 0.513851 |
ref.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.