code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Forma do
@moduledoc """
Applies typespecs to JSON-like data.
This module can parse JSON-like data (such as maps with key strings)
into a more structured form by trying to map it to conform to a
module's typespec.
This can generally be useful when interfacing with external data
sources that provide you data as JSON or MessagePack, but that you
wish to transform into either proper structs or richer data types
without a native JSON representation (such as dates or sets) in
your application.
It is heavily inspired by Go's way of dealing with JSON data.
defmodule User do
defstruct [:id, :name, :age, :gender]
@type t :: %__MODULE__{
id: String.t,
name: String.t,
age: non_neg_integer(),
gender: :male | :female | :other | :prefer_not_to_say
}
end
Forma.parse(%{"id" => "1", "name" => "Fredrik", "age" => 30, "gender" => "male"}, User)
# => %User{id: "1", name: "Fredrik", age: 30, gender: :male}
Forma tries to figure out how to translate its input to a typespec. However, not all
types have natural representations in JSON, for example dates, or don't want to expose
their internals (opaque types).
If you're in control of the module defining the type, you can implement the `__forma__/2`
function to handle parsing input to your desired type
defmodule App.Date do
@opaque t :: Date
# first argument is the type to be parsed in this module
def __forma__(:t, input) do
case Date.from_iso8601(input) do
{:ok, date} -> date
{:error, reason} -> raise reason
end
end
end
If you're not in control of the module, you can pass a parser along as an optional
argument,
defmodule LogRow do
defstruct [:log, :timestamp]
type t :: %__MODULE__{
log: String.t,
timestamp: NaiveDateTime.t
}
end
Forma.parse(%{"log" => "An error occurred", "timestamp" => "2015-01-23 23:50:07"},
%{{NaiveDateTime, :t} => fn input ->
case NaiveDateTime.from_iso8601(input) do
{:ok, datetime} -> datetime
{:error, err} -> raise err
end
end})
The number of arguments to the parser functions depends on if the type is parameterized
or not (`MapSet.t` vs `MapSet.t(integer)`).
"""
use Application
alias Forma.Types
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
{Forma.Types, name: Forma.Types},
]
Supervisor.start_link(children, strategy: :one_for_one)
end
@type input :: %{optional(String.t) => any} | [any] | String.t | number
@type typeref :: {atom, atom}
@type callback :: (input, [] -> any)
@type parsers :: %{optional(typeref) => callback}
def parse(input, module, parsers \\ %{})
@spec parse(input, typeref, parsers) :: {:ok, any} | {:error, reason :: any}
def parse(input, {module, type}, parsers) do
typ = Types.for(module, type)
try do
{:ok, Forma.Parser.parse!(input, parsers, typ)}
rescue
err -> {:error, err}
end
end
@spec parse(input, atom, parsers) :: {:ok, any} | {:error, reason :: any}
def parse(input, into, parsers) do
parse(input, {into, :t}, parsers)
end
end
|
lib/forma.ex
| 0.78345 | 0.628664 |
forma.ex
|
starcoder
|
defmodule AWS.Kinesis do
@moduledoc """
Amazon Kinesis Data Streams Service API Reference
Amazon Kinesis Data Streams is a managed service that scales elastically
for real-time processing of streaming big data.
"""
@doc """
Adds or updates tags for the specified Kinesis data stream. Each time you
invoke this operation, you can specify up to 10 tags. If you want to add
more than 10 tags to your stream, you can invoke this operation multiple
times. In total, each stream can have up to 50 tags.
If tags have already been assigned to the stream, `AddTagsToStream`
overwrites any existing tags that correspond to the specified tag keys.
`AddTagsToStream` has a limit of five transactions per second per account.
"""
def add_tags_to_stream(client, input, options \\ []) do
request(client, "AddTagsToStream", input, options)
end
@doc """
Creates a Kinesis data stream. A stream captures and transports data
records that are continuously emitted from different data sources or
*producers*. Scale-out within a stream is explicitly supported by means of
shards, which are uniquely identified groups of data records in a stream.
You specify and control the number of shards that a stream is composed of.
Each shard can support reads up to five transactions per second, up to a
maximum data read total of 2 MB per second. Each shard can support writes
up to 1,000 records per second, up to a maximum data write total of 1 MB
per second. If the amount of data input increases or decreases, you can add
or remove shards.
The stream name identifies the stream. The name is scoped to the AWS
account used by the application. It is also scoped by AWS Region. That is,
two streams in two different accounts can have the same name, and two
streams in the same account, but in two different Regions, can have the
same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Kinesis Data Streams immediately returns and sets
the stream status to `CREATING`. After the stream is created, Kinesis Data
Streams sets the stream status to `ACTIVE`. You should perform read and
write operations only on an `ACTIVE` stream.
You receive a `LimitExceededException` when making a `CreateStream` request
when you try to do one of the following:
<ul> <li> Have more than five streams in the `CREATING` state at any point
in time.
</li> <li> Create more shards than are authorized for your account.
</li> </ul> For the default shard limit for an AWS account, see [Amazon
Kinesis Data Streams
Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*. To increase this
limit, [contact AWS
Support](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
You can use `DescribeStream` to check the stream status, which is returned
in `StreamStatus`.
`CreateStream` has a limit of five transactions per second per account.
"""
def create_stream(client, input, options \\ []) do
request(client, "CreateStream", input, options)
end
@doc """
Decreases the Kinesis data stream's retention period, which is the length
of time data records are accessible after they are added to the stream. The
minimum value of a stream's retention period is 24 hours.
This operation may result in lost data. For example, if the stream's
retention period is 48 hours and is decreased to 24 hours, any data already
in the stream that is older than 24 hours is inaccessible.
"""
def decrease_stream_retention_period(client, input, options \\ []) do
request(client, "DecreaseStreamRetentionPeriod", input, options)
end
@doc """
Deletes a Kinesis data stream and all its shards and data. You must shut
down any applications that are operating on the stream before you delete
the stream. If an application attempts to operate on a deleted stream, it
receives the exception `ResourceNotFoundException`.
If the stream is in the `ACTIVE` state, you can delete it. After a
`DeleteStream` request, the specified stream is in the `DELETING` state
until Kinesis Data Streams completes the deletion.
**Note:** Kinesis Data Streams might continue to accept data read and write
operations, such as `PutRecord`, `PutRecords`, and `GetRecords`, on a
stream in the `DELETING` state until the stream deletion is complete.
When you delete a stream, any shards in that stream are also deleted, and
any tags are dissociated from the stream.
You can use the `DescribeStream` operation to check the state of the
stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of five transactions per second per account.
"""
def delete_stream(client, input, options \\ []) do
request(client, "DeleteStream", input, options)
end
@doc """
To deregister a consumer, provide its ARN. Alternatively, you can provide
the ARN of the data stream and the name you gave the consumer when you
registered it. You may also provide all three parameters, as long as they
don't conflict with each other. If you don't know the name or ARN of the
consumer that you want to deregister, you can use the `ListStreamConsumers`
operation to get a list of the descriptions of all the consumers that are
currently registered with a given data stream. The description of a
consumer contains its name and ARN.
This operation has a limit of five transactions per second per account.
"""
def deregister_stream_consumer(client, input, options \\ []) do
request(client, "DeregisterStreamConsumer", input, options)
end
@doc """
Describes the shard limits and usage for the account.
If you update your account limits, the old limits might be returned for a
few minutes.
This operation has a limit of one transaction per second per account.
"""
def describe_limits(client, input, options \\ []) do
request(client, "DescribeLimits", input, options)
end
@doc """
Describes the specified Kinesis data stream.
The information returned includes the stream name, Amazon Resource Name
(ARN), creation time, enhanced metric configuration, and shard map. The
shard map is an array of shard objects. For each shard object, there is the
hash key and sequence number ranges that the shard spans, and the IDs of
any earlier shards that played in a role in creating the shard. Every
record ingested in the stream is identified by a sequence number, which is
assigned when the record is put into the stream.
You can limit the number of shards returned by each call. For more
information, see [Retrieving Shards from a
Stream](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html)
in the *Amazon Kinesis Data Streams Developer Guide*.
There are no guarantees about the chronological order shards returned. To
process shards in chronological order, use the ID of the parent shard to
track the lineage to the oldest shard.
This operation has a limit of 10 transactions per second per account.
"""
def describe_stream(client, input, options \\ []) do
request(client, "DescribeStream", input, options)
end
@doc """
To get the description of a registered consumer, provide the ARN of the
consumer. Alternatively, you can provide the ARN of the data stream and the
name you gave the consumer when you registered it. You may also provide all
three parameters, as long as they don't conflict with each other. If you
don't know the name or ARN of the consumer that you want to describe, you
can use the `ListStreamConsumers` operation to get a list of the
descriptions of all the consumers that are currently registered with a
given data stream.
This operation has a limit of 20 transactions per second per account.
"""
def describe_stream_consumer(client, input, options \\ []) do
request(client, "DescribeStreamConsumer", input, options)
end
@doc """
Provides a summarized description of the specified Kinesis data stream
without the shard list.
The information returned includes the stream name, Amazon Resource Name
(ARN), status, record retention period, approximate creation time,
monitoring, encryption details, and open shard count.
"""
def describe_stream_summary(client, input, options \\ []) do
request(client, "DescribeStreamSummary", input, options)
end
@doc """
Disables enhanced monitoring.
"""
def disable_enhanced_monitoring(client, input, options \\ []) do
request(client, "DisableEnhancedMonitoring", input, options)
end
@doc """
Enables enhanced Kinesis data stream monitoring for shard-level metrics.
"""
def enable_enhanced_monitoring(client, input, options \\ []) do
request(client, "EnableEnhancedMonitoring", input, options)
end
@doc """
Gets data records from a Kinesis data stream's shard.
Specify a shard iterator using the `ShardIterator` parameter. The shard
iterator specifies the position in the shard from which you want to start
reading data records sequentially. If there are no records available in the
portion of the shard that the iterator points to, `GetRecords` returns an
empty list. It might take multiple calls to get to a portion of the shard
that contains records.
You can scale by provisioning multiple shards per stream while considering
service limits (for more information, see [Amazon Kinesis Data Streams
Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*). Your application
should have one thread per shard, each reading continuously from its
stream. To read from a stream continually, call `GetRecords` in a loop. Use
`GetShardIterator` to get the shard iterator to specify in the first
`GetRecords` call. `GetRecords` returns a new shard iterator in
`NextShardIterator`. Specify the shard iterator returned in
`NextShardIterator` in subsequent calls to `GetRecords`. If the shard has
been closed, the shard iterator can't return more data and `GetRecords`
returns `null` in `NextShardIterator`. You can terminate the loop when the
shard is closed, or when the shard iterator reaches the record with the
sequence number or other attribute that marks it as the last record to
process.
Each data record can be up to 1 MiB in size, and each shard can read up to
2 MiB per second. You can ensure that your calls don't exceed the maximum
supported size or throughput by using the `Limit` parameter to specify the
maximum number of records that `GetRecords` can return. Consider your
average record size when determining this limit. The maximum number of
records that can be returned per call is 10,000.
The size of the data returned by `GetRecords` varies depending on the
utilization of the shard. The maximum size of data that `GetRecords` can
return is 10 MiB. If a call returns this amount of data, subsequent calls
made within the next 5 seconds throw
`ProvisionedThroughputExceededException`. If there is insufficient
provisioned throughput on the stream, subsequent calls made within the next
1 second throw `ProvisionedThroughputExceededException`. `GetRecords`
doesn't return any data when it throws an exception. For this reason, we
recommend that you wait 1 second between calls to `GetRecords`. However,
it's possible that the application will get exceptions for longer than 1
second.
To detect whether the application is falling behind in processing, you can
use the `MillisBehindLatest` response attribute. You can also monitor the
stream using CloudWatch metrics and other mechanisms (see
[Monitoring](http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html)
in the *Amazon Kinesis Data Streams Developer Guide*).
Each Amazon Kinesis record includes a value, `ApproximateArrivalTimestamp`,
that is set when a stream successfully receives and stores a record. This
is commonly referred to as a server-side time stamp, whereas a client-side
time stamp is set when a data producer creates or sends the record to a
stream (a data producer is any data source putting data records into a
stream, for example with `PutRecords`). The time stamp has millisecond
precision. There are no guarantees about the time stamp accuracy, or that
the time stamp is always increasing. For example, records in a shard or
across a stream might have time stamps that are out of order.
This operation has a limit of five transactions per second per account.
"""
def get_records(client, input, options \\ []) do
request(client, "GetRecords", input, options)
end
@doc """
Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes
after it is returned to the requester.
A shard iterator specifies the shard position from which to start reading
data records sequentially. The position is specified using the sequence
number of a data record in a shard. A sequence number is the identifier
associated with every record ingested in the stream, and is assigned when a
record is put into the stream. Each stream has one or more shards.
You must specify the shard iterator type. For example, you can set the
`ShardIteratorType` parameter to read exactly from the position denoted by
a specific sequence number by using the `AT_SEQUENCE_NUMBER` shard iterator
type. Alternatively, the parameter can read right after the sequence number
by using the `AFTER_SEQUENCE_NUMBER` shard iterator type, using sequence
numbers returned by earlier calls to `PutRecord`, `PutRecords`,
`GetRecords`, or `DescribeStream`. In the request, you can specify the
shard iterator type `AT_TIMESTAMP` to read records from an arbitrary point
in time, `TRIM_HORIZON` to cause `ShardIterator` to point to the last
untrimmed record in the shard in the system (the oldest data record in the
shard), or `LATEST` so that you always read the most recent data in the
shard.
When you read repeatedly from a stream, use a `GetShardIterator` request to
get the first shard iterator for use in your first `GetRecords` request and
for subsequent reads use the shard iterator returned by the `GetRecords`
request in `NextShardIterator`. A new shard iterator is returned by every
`GetRecords` request in `NextShardIterator`, which you use in the
`ShardIterator` parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you receive a
`ProvisionedThroughputExceededException`. For more information about
throughput limits, see `GetRecords`, and [Streams
Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*.
If the shard is closed, `GetShardIterator` returns a valid iterator for the
last sequence number of the shard. A shard can be closed as a result of
using `SplitShard` or `MergeShards`.
`GetShardIterator` has a limit of five transactions per second per account
per open shard.
"""
def get_shard_iterator(client, input, options \\ []) do
request(client, "GetShardIterator", input, options)
end
@doc """
Increases the Kinesis data stream's retention period, which is the length
of time data records are accessible after they are added to the stream. The
maximum value of a stream's retention period is 168 hours (7 days).
If you choose a longer stream retention period, this operation increases
the time period during which records that have not yet expired are
accessible. However, it does not make previous, expired data (older than
the stream's previous retention period) accessible after the operation has
been called. For example, if a stream's retention period is set to 24 hours
and is increased to 168 hours, any data that is older than 24 hours remains
inaccessible to consumer applications.
"""
def increase_stream_retention_period(client, input, options \\ []) do
request(client, "IncreaseStreamRetentionPeriod", input, options)
end
@doc """
Lists the shards in a stream and provides information about each shard.
This operation has a limit of 100 transactions per second per data stream.
<important> This API is a new operation that is used by the Amazon Kinesis
Client Library (KCL). If you have a fine-grained IAM policy that only
allows specific operations, you must update your policy to allow calls to
this API. For more information, see [Controlling Access to Amazon Kinesis
Data Streams Resources Using
IAM](https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html).
</important>
"""
def list_shards(client, input, options \\ []) do
request(client, "ListShards", input, options)
end
@doc """
Lists the consumers registered to receive data from a stream using enhanced
fan-out, and provides information about each consumer.
This operation has a limit of 10 transactions per second per account.
"""
def list_stream_consumers(client, input, options \\ []) do
request(client, "ListStreamConsumers", input, options)
end
@doc """
Lists your Kinesis data streams.
The number of streams may be too large to return from a single call to
`ListStreams`. You can limit the number of returned streams using the
`Limit` parameter. If you do not specify a value for the `Limit` parameter,
Kinesis Data Streams uses the default limit, which is currently 10.
You can detect if there are more streams available to list by using the
`HasMoreStreams` flag from the returned output. If there are more streams
available, you can request more streams by using the name of the last
stream returned by the `ListStreams` request in the
`ExclusiveStartStreamName` parameter in a subsequent request to
`ListStreams`. The group of stream names returned by the subsequent request
is then added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of five transactions per second per account.
"""
def list_streams(client, input, options \\ []) do
request(client, "ListStreams", input, options)
end
@doc """
Lists the tags for the specified Kinesis data stream. This operation has a
limit of five transactions per second per account.
"""
def list_tags_for_stream(client, input, options \\ []) do
request(client, "ListTagsForStream", input, options)
end
@doc """
Merges two adjacent shards in a Kinesis data stream and combines them into
a single shard to reduce the stream's capacity to ingest and transport
data. Two shards are considered adjacent if the union of the hash key
ranges for the two shards form a contiguous set with no gaps. For example,
if you have two shards, one with a hash key range of 276...381 and the
other with a hash key range of 382...454, then you could merge these two
shards into a single shard that would have a hash key range of 276...454.
After the merge, the single child shard receives data for all hash key
values covered by the two parent shards.
`MergeShards` is called when there is a need to reduce the overall capacity
of a stream because of excess capacity that is not being used. You must
specify the shard to be merged and the adjacent shard for a stream. For
more information about merging shards, see [Merge Two
Shards](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html)
in the *Amazon Kinesis Data Streams Developer Guide*.
If the stream is in the `ACTIVE` state, you can call `MergeShards`. If a
stream is in the `CREATING`, `UPDATING`, or `DELETING` state, `MergeShards`
returns a `ResourceInUseException`. If the specified stream does not exist,
`MergeShards` returns a `ResourceNotFoundException`.
You can use `DescribeStream` to check the state of the stream, which is
returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a `MergeShards`
request, Amazon Kinesis Data Streams immediately returns a response and
sets the `StreamStatus` to `UPDATING`. After the operation is completed,
Kinesis Data Streams sets the `StreamStatus` to `ACTIVE`. Read and write
operations continue to work while the stream is in the `UPDATING` state.
You use `DescribeStream` to determine the shard IDs that are specified in
the `MergeShards` request.
If you try to operate on too many streams in parallel using `CreateStream`,
`DeleteStream`, `MergeShards`, or `SplitShard`, you receive a
`LimitExceededException`.
`MergeShards` has a limit of five transactions per second per account.
"""
def merge_shards(client, input, options \\ []) do
request(client, "MergeShards", input, options)
end
@doc """
Writes a single data record into an Amazon Kinesis data stream. Call
`PutRecord` to send data into the stream for real-time ingestion and
subsequent processing, one record at a time. Each shard can support writes
up to 1,000 records per second, up to a maximum data write total of 1 MB
per second.
You must specify the name of the stream that captures, stores, and
transports the data; a partition key; and the data blob itself.
The data blob can be any type of data; for example, a segment from a log
file, geographic/location data, website clickstream data, and so on.
The partition key is used by Kinesis Data Streams to distribute data across
shards. Kinesis Data Streams segregates the data records that belong to a
stream into multiple shards, using the partition key associated with each
data record to determine the shard to which a given data record belongs.
Partition keys are Unicode strings, with a maximum length limit of 256
characters for each key. An MD5 hash function is used to map partition keys
to 128-bit integer values and to map associated data records to shards
using the hash key ranges of the shards. You can override hashing the
partition key to determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information, see [Adding
Data to a
Stream](http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
in the *Amazon Kinesis Data Streams Developer Guide*.
`PutRecord` returns the shard ID of where the data record was placed and
the sequence number that was assigned to the data record.
Sequence numbers increase over time and are specific to a shard within a
stream, not across all shards within a stream. To guarantee strictly
increasing ordering, write serially to a shard and use the
`SequenceNumberForOrdering` parameter. For more information, see [Adding
Data to a
Stream](http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
in the *Amazon Kinesis Data Streams Developer Guide*.
If a `PutRecord` request cannot be processed because of insufficient
provisioned throughput on the shard involved in the request, `PutRecord`
throws `ProvisionedThroughputExceededException`.
By default, data records are accessible for 24 hours from the time that
they are added to a stream. You can use `IncreaseStreamRetentionPeriod` or
`DecreaseStreamRetentionPeriod` to modify this retention period.
"""
def put_record(client, input, options \\ []) do
request(client, "PutRecord", input, options)
end
@doc """
Writes multiple data records into a Kinesis data stream in a single call
(also referred to as a `PutRecords` request). Use this operation to send
data into the stream for data ingestion and processing.
Each `PutRecords` request can support up to 500 records. Each record in the
request can be as large as 1 MB, up to a limit of 5 MB for the entire
request, including partition keys. Each shard can support writes up to
1,000 records per second, up to a maximum data write total of 1 MB per
second.
You must specify the name of the stream that captures, stores, and
transports the data; and an array of request `Records`, with each record in
the array requiring a partition key and data blob. The record size limit
applies to the total size of the partition key and data blob.
The data blob can be any type of data; for example, a segment from a log
file, geographic/location data, website clickstream data, and so on.
The partition key is used by Kinesis Data Streams as input to a hash
function that maps the partition key and associated data to a specific
shard. An MD5 hash function is used to map partition keys to 128-bit
integer values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition key map to
the same shard within the stream. For more information, see [Adding Data to
a
Stream](http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
in the *Amazon Kinesis Data Streams Developer Guide*.
Each record in the `Records` array may include an optional parameter,
`ExplicitHashKey`, which overrides the partition key to shard mapping. This
parameter allows a data producer to determine explicitly the shard where
the record is stored. For more information, see [Adding Multiple Records
with
PutRecords](http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords)
in the *Amazon Kinesis Data Streams Developer Guide*.
The `PutRecords` response includes an array of response `Records`. Each
record in the response array directly correlates with a record in the
request array using natural ordering, from the top to the bottom of the
request and response. The response `Records` array always includes the same
number of records as the request array.
The response `Records` array includes both successfully and unsuccessfully
processed records. Kinesis Data Streams attempts to process all records in
each `PutRecords` request. A single record failure does not stop the
processing of subsequent records.
A successfully processed record includes `ShardId` and `SequenceNumber`
values. The `ShardId` parameter identifies the shard in the stream where
the record is stored. The `SequenceNumber` parameter is an identifier
assigned to the put record, unique to all records in the stream.
An unsuccessfully processed record includes `ErrorCode` and `ErrorMessage`
values. `ErrorCode` reflects the type of error and can be one of the
following values: `ProvisionedThroughputExceededException` or
`InternalFailure`. `ErrorMessage` provides more detailed information about
the `ProvisionedThroughputExceededException` exception including the
account ID, stream name, and shard ID of the record that was throttled. For
more information about partially successful responses, see [Adding Multiple
Records with
PutRecords](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords)
in the *Amazon Kinesis Data Streams Developer Guide*.
By default, data records are accessible for 24 hours from the time that
they are added to a stream. You can use `IncreaseStreamRetentionPeriod` or
`DecreaseStreamRetentionPeriod` to modify this retention period.
"""
def put_records(client, input, options \\ []) do
request(client, "PutRecords", input, options)
end
@doc """
Registers a consumer with a Kinesis data stream. When you use this
operation, the consumer you register can read data from the stream at a
rate of up to 2 MiB per second. This rate is unaffected by the total number
of consumers that read from the same stream.
You can register up to 5 consumers per stream. A given consumer can only be
registered with one stream.
This operation has a limit of five transactions per second per account.
"""
def register_stream_consumer(client, input, options \\ []) do
request(client, "RegisterStreamConsumer", input, options)
end
@doc """
Removes tags from the specified Kinesis data stream. Removed tags are
deleted and cannot be recovered after this operation successfully
completes.
If you specify a tag that does not exist, it is ignored.
`RemoveTagsFromStream` has a limit of five transactions per second per
account.
"""
def remove_tags_from_stream(client, input, options \\ []) do
request(client, "RemoveTagsFromStream", input, options)
end
@doc """
Splits a shard into two new shards in the Kinesis data stream, to increase
the stream's capacity to ingest and transport data. `SplitShard` is called
when there is a need to increase the overall capacity of a stream because
of an expected increase in the volume of data records being ingested.
You can also use `SplitShard` when a shard appears to be approaching its
maximum utilization; for example, the producers sending data into the
specific shard are suddenly sending more than previously anticipated. You
can also call `SplitShard` to increase stream capacity, so that more
Kinesis Data Streams applications can simultaneously read data from the
stream for real-time processing.
You must specify the shard to be split and the new hash key, which is the
position in the shard where the shard gets split in two. In many cases, the
new hash key might be the average of the beginning and ending hash key, but
it can be any hash key value in the range being mapped into the shard. For
more information, see [Split a
Shard](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html)
in the *Amazon Kinesis Data Streams Developer Guide*.
You can use `DescribeStream` to determine the shard ID and hash key values
for the `ShardToSplit` and `NewStartingHashKey` parameters that are
specified in the `SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a `SplitShard`
request, Kinesis Data Streams immediately returns a response and sets the
stream status to `UPDATING`. After the operation is completed, Kinesis Data
Streams sets the stream status to `ACTIVE`. Read and write operations
continue to work while the stream is in the `UPDATING` state.
You can use `DescribeStream` to check the status of the stream, which is
returned in `StreamStatus`. If the stream is in the `ACTIVE` state, you can
call `SplitShard`. If a stream is in `CREATING` or `UPDATING` or `DELETING`
states, `DescribeStream` returns a `ResourceInUseException`.
If the specified stream does not exist, `DescribeStream` returns a
`ResourceNotFoundException`. If you try to create more shards than are
authorized for your account, you receive a `LimitExceededException`.
For the default shard limit for an AWS account, see [Kinesis Data Streams
Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*. To increase this
limit, [contact AWS
Support](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
If you try to operate on too many streams simultaneously using
`CreateStream`, `DeleteStream`, `MergeShards`, and/or `SplitShard`, you
receive a `LimitExceededException`.
`SplitShard` has a limit of five transactions per second per account.
"""
def split_shard(client, input, options \\ []) do
request(client, "SplitShard", input, options)
end
@doc """
Enables or updates server-side encryption using an AWS KMS key for a
specified stream.
Starting encryption is an asynchronous operation. Upon receiving the
request, Kinesis Data Streams returns immediately and sets the status of
the stream to `UPDATING`. After the update is complete, Kinesis Data
Streams sets the status of the stream back to `ACTIVE`. Updating or
applying encryption normally takes a few seconds to complete, but it can
take minutes. You can continue to read and write data to your stream while
its status is `UPDATING`. Once the status of the stream is `ACTIVE`,
encryption begins for records written to the stream.
API Limits: You can successfully apply a new AWS KMS key for server-side
encryption 25 times in a rolling 24-hour period.
Note: It can take up to 5 seconds after the stream is in an `ACTIVE` status
before all records written to the stream are encrypted. After you enable
encryption, you can verify that encryption is applied by inspecting the API
response from `PutRecord` or `PutRecords`.
"""
def start_stream_encryption(client, input, options \\ []) do
request(client, "StartStreamEncryption", input, options)
end
@doc """
Disables server-side encryption for a specified stream.
Stopping encryption is an asynchronous operation. Upon receiving the
request, Kinesis Data Streams returns immediately and sets the status of
the stream to `UPDATING`. After the update is complete, Kinesis Data
Streams sets the status of the stream back to `ACTIVE`. Stopping encryption
normally takes a few seconds to complete, but it can take minutes. You can
continue to read and write data to your stream while its status is
`UPDATING`. Once the status of the stream is `ACTIVE`, records written to
the stream are no longer encrypted by Kinesis Data Streams.
API Limits: You can successfully disable server-side encryption 25 times in
a rolling 24-hour period.
Note: It can take up to 5 seconds after the stream is in an `ACTIVE` status
before all records written to the stream are no longer subject to
encryption. After you disabled encryption, you can verify that encryption
is not applied by inspecting the API response from `PutRecord` or
`PutRecords`.
"""
def stop_stream_encryption(client, input, options \\ []) do
request(client, "StopStreamEncryption", input, options)
end
@doc """
Call this operation from your consumer after you call
`RegisterStreamConsumer` to register the consumer with Kinesis Data
Streams. If the call succeeds, your consumer starts receiving events of
type `SubscribeToShardEvent` for up to 5 minutes, after which time you need
to call `SubscribeToShard` again to renew the subscription if you want to
continue to receive records.
You can make one call to `SubscribeToShard` per second per `ConsumerARN`.
If your call succeeds, and then you call the operation again less than 5
seconds later, the second call generates a `ResourceInUseException`. If you
call the operation a second time more than 5 seconds after the first call
succeeds, the second call succeeds and the first connection gets shut down.
"""
def subscribe_to_shard(client, input, options \\ []) do
request(client, "SubscribeToShard", input, options)
end
@doc """
Updates the shard count of the specified stream to the specified number of
shards.
Updating the shard count is an asynchronous operation. Upon receiving the
request, Kinesis Data Streams returns immediately and sets the status of
the stream to `UPDATING`. After the update is complete, Kinesis Data
Streams sets the status of the stream back to `ACTIVE`. Depending on the
size of the stream, the scaling action could take a few minutes to
complete. You can continue to read and write data to your stream while its
status is `UPDATING`.
To update the shard count, Kinesis Data Streams performs splits or merges
on individual shards. This can cause short-lived shards to be created, in
addition to the final shards. We recommend that you double or halve the
shard count, as this results in the fewest number of splits or merges.
This operation has the following default limits. By default, you cannot do
the following:
<ul> <li> Scale more than twice per rolling 24-hour period per stream
</li> <li> Scale up to more than double your current shard count for a
stream
</li> <li> Scale down below half your current shard count for a stream
</li> <li> Scale up to more than 500 shards in a stream
</li> <li> Scale a stream with more than 500 shards down unless the result
is less than 500 shards
</li> <li> Scale up to more than the shard limit for your account
</li> </ul> For the default limits for an AWS account, see [Streams
Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
in the *Amazon Kinesis Data Streams Developer Guide*. To request an
increase in the call rate limit, the shard limit for this API, or your
overall shard limit, use the [limits
form](https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis).
"""
def update_shard_count(client, input, options \\ []) do
request(client, "UpdateShardCount", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "kinesis"}
host = get_host("kinesis", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "Kinesis_20131202.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/kinesis.ex
| 0.945758 | 0.740761 |
kinesis.ex
|
starcoder
|
defmodule Stripe.Subscription do
@moduledoc """
Work with Stripe subscription objects.
You can:
- Create a subscription
- Retrieve a subscription
- Update a subscription
- Delete a subscription
Does not yet render lists or take options.
Stripe API reference: https://stripe.com/docs/api#subscription
"""
@type t :: %__MODULE__{}
defstruct [
:id, :object,
:application_fee_percent, :cancel_at_period_end, :canceled_at,
:created, :current_period_end, :current_period_start, :customer,
:ended_at, :livemode, :metadata, :plan, :prorate, :quantity, :source,
:start, :status, :tax_percent, :trial_end, :trial_start
]
@plural_endpoint "subscriptions"
@schema %{
application_fee_percent: [:create, :retrieve, :update],
cancel_at_period_end: [:retrieve],
canceled_at: [:retrieve],
coupon: [:create, :update],
created: [:retrieve],
current_period_end: [:retrieve],
current_period_start: [:retrieve],
customer: [:create, :retrieve],
discount: [:retrieve],
ended_at: [:retrieve],
id: [:retrieve],
livemode: [:retrieve],
metadata: [:create, :retrieve, :update],
object: [:retrieve],
plan: [:create, :retrieve, :update],
prorate: [:create, :update],
quantity: [:create, :retrieve, :update],
source: [:create, :update],
start: [:retrieve],
status: [:retrieve],
tax_percent: [:create, :retrieve, :update],
trial_end: [:create, :retrieve, :update],
trial_period_days: [:create],
trial_start: [:create, :retrieve]
}
@nullable_keys [
:metadata
]
@doc """
Create a subscription.
"""
@spec create(map, Keyword.t) :: {:ok, t} | {:error, Stripe.api_error_struct}
def create(changes, opts \\ []) do
Stripe.Request.create(@plural_endpoint, changes, @schema, opts)
end
@doc """
Retrieve a subscription.
"""
@spec retrieve(binary, Keyword.t) :: {:ok, t} | {:error, Stripe.api_error_struct}
def retrieve(id, opts \\ []) do
endpoint = @plural_endpoint <> "/" <> id
Stripe.Request.retrieve(endpoint, opts)
end
@doc """
Update a subscription.
Takes the `id` and a map of changes.
"""
@spec update(binary, map, list) :: {:ok, t} | {:error, Stripe.api_error_struct}
def update(id, changes, opts \\ []) do
endpoint = @plural_endpoint <> "/" <> id
Stripe.Request.update(endpoint, changes, @schema, @nullable_keys, opts)
end
@doc """
Delete a subscription.
Takes the `id` and an optional map of `params`.
"""
@spec delete(binary, map, list) :: :ok | {:error, Stripe.api_error_struct}
def delete(id, params \\ %{}, opts \\ []) do
endpoint = @plural_endpoint <> "/" <> id
Stripe.Request.delete(endpoint, params, opts)
end
@doc """
List all subscriptions.
"""
@spec list(map, Keyword.t) :: {:ok, Stripe.List.t} | {:error, Stripe.api_error_struct}
def list(params \\ %{}, opts \\ []) do
endpoint = @plural_endpoint
Stripe.Request.retrieve(params, endpoint, opts)
end
end
|
lib/stripe/subscription.ex
| 0.791781 | 0.581719 |
subscription.ex
|
starcoder
|
defmodule ExDns.Resource do
@moduledoc """
Manages resource records.
4.1.3. Resource record format
The answer, authority, and additional sections all share the same
format: a variable number of resource records, where the number of
records is specified in the corresponding count field in the header.
Each resource record has the following format:
1 1 1 1 1 1
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| |
/ /
/ NAME /
| |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| TYPE |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| CLASS |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| TTL |
| |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| RDLENGTH |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
/ RDATA /
/ /
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
where:
NAME a domain name to which this resource record pertains.
TYPE two octets containing one of the RR type codes. This
field specifies the meaning of the data in the RDATA
field.
CLASS two octets which specify the class of the data in the
RDATA field.
TTL a 32 bit unsigned integer that specifies the time
interval (in seconds) that the resource record may be
cached before it should be discarded. Zero values are
interpreted to mean that the RR can only be used for the
transaction in progress, and should not be cached.
RDLENGTH an unsigned 16 bit integer that specifies the length in
octets of the RDATA field.
RDATA a variable length string of octets that describes the
resource. The format of this information varies
according to the TYPE and CLASS of the resource record.
For example, the if the TYPE is A and the CLASS is IN,
the RDATA field is a 4 octet ARPA Internet address.
3.2.3. QTYPE values
QTYPE fields appear in the question part of a query. QTYPES are a
superset of TYPEs, hence all TYPEs are valid QTYPEs. In addition, the
following QTYPEs are defined:
AXFR 252 A request for a transfer of an entire zone
MAILB 253 A request for mailbox-related records (MB, MG or MR)
MAILA 254 A request for mail agent RRs (Obsolete - see MX)
* 255 A request for all records
"""
@keys [:name, :type, :class, :ttl, :rdlength, :rdata]
defstruct @keys
@typedoc """
The TYPE fields used in resource records.
Note that these types are a subset of QTYPEs.
TYPE value and meaning
A 1 a host address
NS 2 an authoritative name server
MD 3 a mail destination (Obsolete - use MX)
MF 4 a mail forwarder (Obsolete - use MX)
CNAME 5 the canonical name for an alias
SOA 6 marks the start of a zone of authority
MB 7 a mailbox domain name (EXPERIMENTAL)
MG 8 a mail group member (EXPERIMENTAL)
MR 9 a mail rename domain name (EXPERIMENTAL)
NULL 10 a null RR (EXPERIMENTAL)
WKS 11 a well known service description
PTR 12 a domain name pointer
HINFO 13 host information
MINFO 14 mailbox or mail list information
MX 15 mail exchange
TXT 16 text strings
"""
@type type ::
:a
| :ns
| :md
| :mf
| :cname
| :soa
| :mb
| :mg
| :mr
| :null
| :wks
| :ptr
| :hinfo
| :minfo
| :mx
| :txt
| :rp
| :adsdb
| :rt
| :sig
| :key
| :loc
| :aaa
| :srv
| :naptr
| :dname
| :opt
| :ds
| :rrsig
| :nsec
| :dnskey
| :spf
| :axfr
| :mailb
| :maila
| :any
| :uri
| :private_use
@typedoc """
The [CLASS](https://tools.ietf.org/html/rfc1035#section-3.2.4) fields used in resource records
CLASS fields appear in resource records. The following CLASS mnemonics
and values are defined:
IN 1 the Internet
CS 2 the CSNET class (Obsolete - used only for examples in
some obsolete RFCs)
CH 3 the CHAOS class
HS 4 Hesiod [Dyer 87]
### QCLASS values
QCLASS fields appear in the question section of a query. QCLASS values
are a superset of CLASS values; every CLASS is a valid QCLASS. In
addition to CLASS values, the following QCLASSes are defined:
254 none
* 255 any class
"""
@type class ::
:in
| :cs
| :ch
| :hs
| :none
| :all
| :private_user
@doc """
Returns the TYPE mnemonic from the wire
protocol integer format
"""
def decode_type(1), do: :a
def decode_type(2), do: :ns
def decode_type(3), do: :md
def decode_type(4), do: :mf
def decode_type(5), do: :cname
def decode_type(6), do: :soa
def decode_type(7), do: :mb
def decode_type(8), do: :mg
def decode_type(9), do: :mr
def decode_type(10), do: :null
def decode_type(11), do: :wks
def decode_type(12), do: :ptr
def decode_type(13), do: :hinfo
def decode_type(14), do: :minfo
def decode_type(15), do: :mx
def decode_type(16), do: :txt
def decode_type(17), do: :rp
def decode_type(18), do: :adsdb
def decode_type(21), do: :rt
def decode_type(24), do: :sig
def decode_type(25), do: :key
def decode_type(29), do: :loc
def decode_type(28), do: :aaa
def decode_type(33), do: :srv
def decode_type(35), do: :naptr
def decode_type(39), do: :dname
def decode_type(41), do: :opt
def decode_type(43), do: :ds
def decode_type(46), do: :rrsig
def decode_type(47), do: :nsec
def decode_type(48), do: :dnskey
def decode_type(99), do: :spf
def decode_type(252), do: :axfr
def decode_type(253), do: :mailb
def decode_type(254), do: :maila
def decode_type(255), do: :any
def decode_type(256), do: :uri
def decode_type(type) when type in 65280..65534, do: :private_use
@doc """
Returns the class name from the integer in the
DNS wire protocol.
"""
def decode_class(1), do: :in
def decode_class(2), do: :cs
def decode_class(3), do: :ch
def decode_class(4), do: :hs
def decode_class(254), do: :non
def decode_class(255), do: :all
def decode_class(class) when class in 65280..65534, do: :private_use
# Translates the encoded class to the zone file representation
def decode_class(:internet), do: "IN"
# Standard format string for name, ttl, class
@doc false
def preamble_format, do: '~-20s ~10w ~2s '
end
|
lib/ex_dns/resource.ex
| 0.619701 | 0.579817 |
resource.ex
|
starcoder
|
defmodule Coxir.Struct.Message do
@moduledoc """
Defines methods used to interact with channel messages.
Refer to [this](https://discord.com/developers/docs/resources/channel#message-object)
for a list of fields and a broader documentation.
In addition, the following fields are also embedded.
- `guild` - a guild object
- `channel` - a channel object
"""
@type message :: map
use Coxir.Struct
alias Coxir.Struct.{User, Guild, Channel}
def pretty(struct) do
struct
|> replace(:author, &User.get/1)
|> replace(:guild_id, &Guild.get/1)
|> replace(:channel_id, &Channel.get/1)
end
@doc """
Replies to a given message.
Refer to `Coxir.Struct.Channel.send_message/2` for more information.
"""
@spec reply(message, String.t | Enum.t) :: map
def reply(%{channel_id: channel}, content),
do: Channel.send_message(channel, content)
@doc """
Modifies a given message.
Returns a message object upon success
or a map containing error information.
#### Content
Either a string or an enumerable with
the fields listed below.
- `content` - the message contents (up to 2000 characters)
- `embed` - embedded rich content, refer to
[this](https://discord.com/developers/docs/resources/channel#embed-object)
"""
@spec edit(message, String.t | Enum.t) :: map
def edit(%{id: id, channel_id: channel}, content) do
content = \
cond do
is_binary(content) ->
%{content: content}
true ->
content
end
API.request(:patch, "channels/#{channel}/messages/#{id}", content)
|> pretty
end
@doc """
Deletes a given message.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec delete(message) :: :ok | map
def delete(%{id: id, channel_id: channel}) do
API.request(:delete, "channels/#{channel}/messages/#{id}")
end
@doc """
Pins a given message.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec pin(message) :: :ok | map
def pin(%{id: id, channel_id: channel}) do
API.request(:put, "channels/#{channel}/pins/#{id}")
end
@doc """
Unpins a given message.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec unpin(message) :: :ok | map
def unpin(%{id: id, channel_id: channel}) do
API.request(:delete, "channels/#{channel}/pins/#{id}")
end
@doc """
Reacts to a given message.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec react(message, String.t) :: :ok | map
def react(%{id: id, channel_id: channel}, emoji) do
API.request(:put, "channels/#{channel}/messages/#{id}/reactions/#{emoji}/@me")
end
@doc """
Fetches a list of users specific to a reaction on a given message.
Returns a list of user objects upon success
or a map containing error information.
"""
@spec get_reactions(message, String.t) :: list | map
def get_reactions(%{id: id, channel_id: channel}, emoji) do
API.request(:get, "channels/#{channel}/messages/#{id}/reactions/#{emoji}")
end
@doc """
Deletes a specific reaction from a given message.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec delete_reaction(message, String.t, String.t) :: :ok | map
def delete_reaction(%{id: id, channel_id: channel}, emoji, user \\ "@me") do
API.request(:delete, "channels/#{channel}/messages/#{id}/reactions/#{emoji}/#{user}")
end
@doc """
Deletes all reactions from a given message.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec delete_all_reactions(message) :: :ok | map
def delete_all_reactions(%{id: id, channel_id: channel}) do
API.request(:delete, "channels/#{channel}/messages/#{id}/reactions")
end
@doc """
Checks whether a given message is an activity.
Returns a boolean.
"""
@spec is_activity?(message) :: boolean
def is_activity?(message) do
message[:activity]
!= nil
end
@doc """
Checks whether a given message contains any images or videos.
Returns a boolean.
"""
@spec contains_media?(message) :: boolean
def contains_media?(%{attachments: attachments, embeds: embeds}) do
Enum.any?(attachments, & &1[:width]) or
Enum.any?(embeds, &(&1[:image] || &1[:video]))
end
end
|
lib/coxir/struct/message.ex
| 0.886868 | 0.439026 |
message.ex
|
starcoder
|
defmodule Req do
require Logger
@external_resource "README.md"
@moduledoc "README.md"
|> File.read!()
|> String.split("<!-- MDOC !-->")
|> Enum.fetch!(1)
@doc """
Makes a GET request.
See `request/3` for a list of supported options.
"""
@doc api: :high_level
def get!(uri, options \\ []) do
request!(:get, uri, options)
end
@doc """
Makes a POST request.
See `request/3` for a list of supported options.
"""
@doc api: :high_level
def post!(uri, body, options \\ []) do
options = Keyword.put(options, :body, body)
request!(:post, uri, options)
end
@doc """
Makes an HTTP request.
## Options
* `:header` - request headers, defaults to `[]`
* `:body` - request body, defaults to `""`
* `:finch` - Finch pool to use, defaults to `Req.Finch` which is automatically started
by the application. See `Finch` module documentation for more information on starting pools.
* `:finch_options` - Options passed down to Finch when making the request, defaults to `[]`.
See `Finch.request/3` for more information.
The `options` are passed down to `add_default_steps/2`, see its documentation for more
information how they are being used.
"""
@doc api: :high_level
def request(method, uri, options \\ []) do
method
|> build(uri, options)
|> add_default_steps(options)
|> run()
end
@doc """
Makes an HTTP request and returns a response or raises an error.
See `request/3` for more information.
"""
@doc api: :high_level
def request!(method, uri, options \\ []) do
method
|> build(uri, options)
|> add_default_steps(options)
|> run!()
end
## Low-level API
@doc """
Builds a request pipeline.
## Options
* `:header` - request headers, defaults to `[]`
* `:body` - request body, defaults to `""`
* `:finch` - Finch pool to use, defaults to `Req.Finch` which is automatically started
by the application. See `Finch` module documentation for more information on starting pools.
* `:finch_options` - Options passed down to Finch when making the request, defaults to `[]`.
See `Finch.request/3` for more information.
"""
@doc api: :low_level
def build(method, uri, options \\ []) do
%Req.Request{
method: method,
uri: URI.parse(uri),
headers: Keyword.get(options, :headers, []),
body: Keyword.get(options, :body, ""),
finch: Keyword.get(options, :finch, Req.Finch),
finch_options: Keyword.get(options, :finch_options, [])
}
end
@doc """
Adds steps that should be reasonable defaults for most users.
## Request steps
* `normalize_headers/1`
* `default_headers/1`
* `encode/1`
* [`&netrc(&1, options[:netrc])`](`netrc/2`) (if `options[:netrc]` is set
to an atom true for default path or a string for custom path)
* [`&auth(&1, options[:auth])`](`auth/2`) (if `options[:auth]` is set to)
* [`¶ms(&1, options[:params])`](`params/2`) (if `options[:params]` is set)
* [`&range(&1, options[:range])`](`range/2`) (if `options[:range]` is set)
## Response steps
* [`&retry(&1, &2, options[:retry])`](`retry/3`) (if `options[:retry]` is set to
an atom true or a options keywords list)
* `follow_redirects/2`
* `decompress/2`
* `decode/2`
## Error steps
* [`&retry(&1, &2, options[:retry])`](`retry/3`) (if `options[:retry]` is set and is a
keywords list or an atom `true`)
## Options
* `:netrc` - if set, adds the `netrc/2` step
* `:auth` - if set, adds the `auth/2` step
* `:params` - if set, adds the `params/2` step
* `:range` - if set, adds the `range/2` step
* `:cache` - if set to `true`, adds `if_modified_since/2` step
* `:raw` if set to `true`, skips `decompress/2` and `decode/2` steps
* `:retry` - if set, adds the `retry/3` step to response and error steps
"""
@doc api: :low_level
def add_default_steps(request, options \\ []) do
request_steps =
[
&normalize_headers/1,
&default_headers/1,
&encode/1
] ++
maybe_steps(options[:netrc], [&netrc(&1, options[:netrc])]) ++
maybe_steps(options[:auth], [&auth(&1, options[:auth])]) ++
maybe_steps(options[:params], [¶ms(&1, options[:params])]) ++
maybe_steps(options[:range], [&range(&1, options[:range])]) ++
maybe_steps(options[:cache], [&if_modified_since/1])
retry = options[:retry]
retry = if retry == true, do: [], else: retry
raw? = options[:raw] == true
response_steps =
maybe_steps(retry, [&retry(&1, &2, retry)]) ++
[
&follow_redirects/2
] ++
maybe_steps(not raw?, [
&decompress/2,
&decode/2
])
error_steps = maybe_steps(retry, [&retry(&1, &2, retry)])
request
|> add_request_steps(request_steps)
|> add_response_steps(response_steps)
|> add_error_steps(error_steps)
end
defp maybe_steps(nil, _step), do: []
defp maybe_steps(false, _step), do: []
defp maybe_steps(_, steps), do: steps
@doc """
Adds request steps.
"""
@doc api: :low_level
def add_request_steps(request, steps) do
update_in(request.request_steps, &(&1 ++ steps))
end
@doc """
Adds response steps.
"""
@doc api: :low_level
def add_response_steps(request, steps) do
update_in(request.response_steps, &(&1 ++ steps))
end
defp prepend_response_steps(request, steps) do
update_in(request.response_steps, &(steps ++ &1))
end
@doc """
Adds error steps.
"""
@doc api: :low_level
def add_error_steps(request, steps) do
update_in(request.error_steps, &(&1 ++ steps))
end
@doc """
Runs a request pipeline.
Returns `{:ok, response}` or `{:error, exception}`.
"""
@doc api: :low_level
def run(request) do
case run_request(request) do
%Req.Request{} = request ->
finch_request = Finch.build(request.method, request.uri, request.headers, request.body)
case Finch.request(finch_request, request.finch, request.finch_options) do
{:ok, response} ->
run_response(request, response)
{:error, exception} ->
run_error(request, exception)
end
result ->
result
end
end
@doc """
Runs a request pipeline and returns a response or raises an error.
See `run/1` for more information.
"""
@doc api: :low_level
def run!(request) do
case run(request) do
{:ok, response} -> response
{:error, exception} -> raise exception
end
end
defp run_request(request) do
steps = request.request_steps
Enum.reduce_while(steps, request, fn step, acc ->
case step.(acc) do
%Req.Request{} = request ->
{:cont, request}
{%Req.Request{halted: true}, response_or_exception} ->
{:halt, result(response_or_exception)}
{request, %{status: _, headers: _, body: _} = response} ->
{:halt, run_response(request, response)}
{request, %{__exception__: true} = exception} ->
{:halt, run_error(request, exception)}
end
end)
end
defp run_response(request, response) do
steps = request.response_steps
{_request, response_or_exception} =
Enum.reduce_while(steps, {request, response}, fn step, {request, response} ->
case step.(request, response) do
{%Req.Request{halted: true} = request, response_or_exception} ->
{:halt, {request, response_or_exception}}
{request, %{status: _, headers: _, body: _} = response} ->
{:cont, {request, response}}
{request, %{__exception__: true} = exception} ->
{:halt, run_error(request, exception)}
end
end)
result(response_or_exception)
end
defp run_error(request, exception) do
steps = request.error_steps
{_request, response_or_exception} =
Enum.reduce_while(steps, {request, exception}, fn step, {request, exception} ->
case step.(request, exception) do
{%Req.Request{halted: true} = request, response_or_exception} ->
{:halt, {request, response_or_exception}}
{request, %{__exception__: true} = exception} ->
{:cont, {request, exception}}
{request, %{status: _, headers: _, body: _} = response} ->
{:halt, run_response(request, response)}
end
end)
result(response_or_exception)
end
defp result(%{status: _, headers: _, body: _} = response) do
{:ok, response}
end
defp result(%{__exception__: true} = exception) do
{:error, exception}
end
## Request steps
@doc """
Sets request authentication.
`auth` can be one of:
* `{username, password}` - uses Basic HTTP authentication
## Examples
iex> Req.get!("https://httpbin.org/basic-auth/foo/bar", auth: {"bad", "bad"}).status
401
iex> Req.get!("https://httpbin.org/basic-auth/foo/bar", auth: {"foo", "bar"}).status
200
"""
@doc api: :request
def auth(request, auth)
def auth(request, {username, password}) when is_binary(username) and is_binary(password) do
value = Base.encode64("#{username}:#{password}")
put_new_header(request, "authorization", "Basic #{value}")
end
@doc """
Sets request authentication for a matching host from a netrc file.
## Examples
iex> Req.get!("https://httpbin.org/basic-auth/foo/bar").status
401
iex> Req.get!("https://httpbin.org/basic-auth/foo/bar", netrc: true).status
200
iex> Req.get!("https://httpbin.org/basic-auth/foo/bar", netrc: "/path/to/custom_netrc").status
200
"""
@doc api: :request
def netrc(request, path)
def netrc(request, path) when is_binary(path) do
case Map.fetch(load_netrc(path), request.uri.host) do
{:ok, {username, password}} ->
auth(request, {username, password})
:error ->
request
end
end
def netrc(request, true) do
netrc(request, Path.join(System.user_home!(), ".netrc"))
end
defp load_netrc(path) do
path
|> File.read!()
|> String.split("\n")
|> parse_netrc(nil, %{})
end
@user_agent "req/#{Mix.Project.config()[:version]}"
@doc """
Adds common request headers.
Currently the following headers are added:
* `"user-agent"` - `#{inspect(@user_agent)}`
* `"accept-encoding"` - `"gzip"`
"""
@doc api: :request
def default_headers(request) do
request
|> put_new_header("user-agent", @user_agent)
|> put_new_header("accept-encoding", "gzip")
end
@doc """
Normalizes request headers.
Turns atom header names into strings, e.g.: `:user_agent` becomes `"user-agent"`. Non-atom names
are returned as is.
## Examples
iex> Req.get!("https://httpbin.org/user-agent", headers: [user_agent: "my_agent"]).body
%{"user-agent" => "my_agent"}
"""
@doc api: :request
def normalize_headers(request) do
headers =
for {name, value} <- request.headers do
if is_atom(name) do
{name |> Atom.to_string() |> String.replace("_", "-"), value}
else
{name, value}
end
end
%{request | headers: headers}
end
@doc """
Encodes the request body based on its shape.
If body is of the following shape, it's encoded and its `content-type` set
accordingly. Otherwise it's unchanged.
| Shape | Encoder | Content-Type |
| --------------- | --------------------------- | ------------------------------------- |
| `{:form, data}` | `URI.encode_query/1` | `"application/x-www-form-urlencoded"` |
| `{:json, data}` | `Jason.encode_to_iodata!/1` | `"application/json"` |
## Examples
iex> Req.post!("https://httpbin.org/post", {:form, comments: "hello!"}).body["form"]
%{"comments" => "hello!"}
"""
@doc api: :request
def encode(request) do
case request.body do
{:form, data} ->
request
|> Map.put(:body, URI.encode_query(data))
|> put_new_header("content-type", "application/x-www-form-urlencoded")
{:json, data} ->
request
|> Map.put(:body, Jason.encode_to_iodata!(data))
|> put_new_header("content-type", "application/json")
_other ->
request
end
end
@doc """
Adds params to request query string.
## Examples
iex> Req.get!("https://httpbin.org/anything/query", params: [x: "1", y: "2"]).body["args"]
%{"x" => "1", "y" => "2"}
"""
@doc api: :request
def params(request, params) do
encoded = URI.encode_query(params)
update_in(request.uri.query, fn
nil -> encoded
query -> query <> "&" <> encoded
end)
end
@doc """
Sets the "Range" request header.
`range` can be one of the following:
* a string - returned as is
* a `first..last` range - converted to `"bytes=<first>-<last>"`
## Examples
iex> Req.get!("https://repo.hex.pm/builds/elixir/builds.txt", range: 0..67)
%{
status: 206,
headers: [
{"content-range", "bytes 0-67/45400"},
...
],
body: "master df65074a8143cebec810dfb91cafa43f19dcdbaf 2021-04-23T15:36:18Z"
}
"""
@doc api: :request
def range(request, range)
def range(request, binary) when is_binary(binary) do
put_header(request, "range", binary)
end
def range(request, first..last) do
put_header(request, "range", "bytes=#{first}-#{last}")
end
@doc """
Handles HTTP cache using `if-modified-since` header.
Only successful (200 OK) responses are cached.
This step also _prepends_ a response step that loads and writes the cache. Be careful when
_prepending_ other response steps, make sure the cache is loaded/written as soon as possible.
## Options
* `:dir` - the directory to store the cache, defaults to `<user_cache_dir>/req`
(see: `:filename.basedir/3`)
## Examples
iex> url = "https://hexdocs.pm/elixir/Kernel.html"
iex> response = Req.get!(url, cache: true)
%{
status: 200,
headers: [
{"date", "Fri, 16 Apr 2021 10:09:56 GMT"},
...
],
...
}
iex> Req.get!(url, cache: true) == response
true
"""
@doc api: :request
def if_modified_since(request, options \\ []) do
dir = options[:dir] || :filename.basedir(:user_cache, 'req')
request
|> put_if_modified_since(dir)
|> prepend_response_steps([&handle_cache(&1, &2, dir)])
end
defp put_if_modified_since(request, dir) do
case File.stat(cache_path(dir, request)) do
{:ok, stat} ->
datetime = stat.mtime |> NaiveDateTime.from_erl!() |> format_http_datetime()
put_new_header(request, "if-modified-since", datetime)
_ ->
request
end
end
defp handle_cache(request, response, dir) do
cond do
response.status == 200 ->
write_cache(dir, request, response)
{request, response}
response.status == 304 ->
response = load_cache(dir, request)
{request, response}
true ->
{request, response}
end
end
## Response steps
@doc """
Decompresses the response body based on the `content-encoding` header.
## Examples
iex> response = Req.get!("https://httpbin.org/gzip")
iex> response.headers
[
{"content-encoding", "gzip"},
{"content-type", "application/json"},
...
]
iex> response.body
%{
"gzipped" => true,
...
}
"""
@doc api: :response
def decompress(request, %{body: ""} = response) do
{request, response}
end
def decompress(request, response) do
compression_algorithms = get_content_encoding_header(response.headers)
{request, update_in(response.body, &decompress_body(&1, compression_algorithms))}
end
defp decompress_body(body, algorithms) do
Enum.reduce(algorithms, body, &decompress_with_algorithm(&1, &2))
end
defp decompress_with_algorithm(gzip, body) when gzip in ["gzip", "x-gzip"] do
:zlib.gunzip(body)
end
defp decompress_with_algorithm("deflate", body) do
:zlib.unzip(body)
end
defp decompress_with_algorithm("identity", body) do
body
end
defp decompress_with_algorithm(algorithm, _body) do
raise("unsupported decompression algorithm: #{inspect(algorithm)}")
end
@doc """
Decodes response body based on the detected format.
Supported formats:
| Format | Decoder |
| ------ | ---------------------------------------------------------------- |
| json | `Jason.decode!/1` |
| gzip | `:zlib.gunzip/1` |
| tar | `:erl_tar.extract/2` |
| zip | `:zip.unzip/2` |
| csv | `NimbleCSV.RFC4180.parse_string/2` (if `NimbleCSV` is installed) |
## Examples
iex> Req.get!("https://hex.pm/api/packages/finch").body["meta"]
%{
"description" => "An HTTP client focused on performance.",
"licenses" => ["MIT"],
"links" => %{"GitHub" => "https://github.com/keathley/finch"},
...
}
"""
@doc api: :response
def decode(request, %{body: ""} = response) do
{request, response}
end
def decode(request, response) do
case format(request, response) do
"json" ->
{request, update_in(response.body, &Jason.decode!/1)}
"gz" ->
{request, update_in(response.body, &:zlib.gunzip/1)}
"tar" ->
{:ok, files} = :erl_tar.extract({:binary, response.body}, [:memory])
{request, put_in(response.body, files)}
"tgz" ->
{:ok, files} = :erl_tar.extract({:binary, response.body}, [:memory, :compressed])
{request, put_in(response.body, files)}
"zip" ->
{:ok, files} = :zip.extract(response.body, [:memory])
{request, put_in(response.body, files)}
"csv" ->
if Code.ensure_loaded?(NimbleCSV) do
options = [skip_headers: false]
{request, update_in(response.body, &NimbleCSV.RFC4180.parse_string(&1, options))}
else
{request, response}
end
_ ->
{request, response}
end
end
defp format(request, response) do
with {_, content_type} <- List.keyfind(response.headers, "content-type", 0) do
case extensions(content_type, request) do
[ext | _] -> ext
[] -> nil
end
end
end
defp extensions("application/octet-stream", request) do
path = request.uri.path
if tgz?(path) do
["tgz"]
else
path |> MIME.from_path() |> MIME.extensions()
end
end
defp extensions("application/" <> subtype, request) when subtype in ~w(gzip x-gzip) do
path = request.uri.path
if tgz?(path) do
["tgz"]
else
["gz"]
end
end
defp extensions(content_type, _request) do
MIME.extensions(content_type)
end
defp tgz?(path) do
case Path.extname(path) do
".tgz" -> true
".gz" -> String.ends_with?(path, ".tar.gz")
_ -> false
end
end
@doc """
Follows redirects.
## Examples
iex> Req.get!("http://api.github.com").status
# 23:24:11.670 [debug] Req.follow_redirects/2: Redirecting to https://api.github.com/
200
"""
@doc api: :response
def follow_redirects(request, response)
def follow_redirects(request, %{status: status} = response) when status in 301..302 do
{_, location} = List.keyfind(response.headers, "location", 0)
Logger.debug(["Req.follow_redirects/2: Redirecting to ", location])
request =
if String.starts_with?(location, "/") do
uri = URI.parse(location)
update_in(request.uri, &%{&1 | path: uri.path, query: uri.query})
else
uri = URI.parse(location)
put_in(request.uri, uri)
end
{_, result} = run(request)
{Req.Request.halt(request), result}
end
def follow_redirects(request, response) do
{request, response}
end
## Error steps
@doc """
Retries a request in face of errors.
This function can be used as either or both response and error step. It retries a request that
resulted in:
* a response with status 5xx
* an exception
## Options
* `:delay` - sleep this number of milliseconds before making another attempt, defaults
to `2000`
* `:max_attempts` - maximum number of attempts, defaults to `2`
## Examples
With default options:
iex> Req.get!("https://httpbin.org/status/500,200", retry: true).status
# 19:02:08.463 [error] Req.retry/3: Got response with status 500. Will retry in 2000ms, 2 attempts left
# 19:02:10.710 [error] Req.retry/3: Got response with status 500. Will retry in 2000ms, 1 attempt left
200
With custom options:
iex> Req.get!("http://localhost:9999", retry: [delay: 100, max_attempts: 3])
# 17:00:38.371 [error] Req.retry/3: Got exception. Will retry in 100ms, 3 attempts left
# 17:00:38.371 [error] ** (Mint.TransportError) connection refused
# 17:00:38.473 [error] Req.retry/3: Got exception. Will retry in 100ms, 2 attempts left
# 17:00:38.473 [error] ** (Mint.TransportError) connection refused
# 17:00:38.575 [error] Req.retry/3: Got exception. Will retry in 100ms, 1 attempt left
# 17:00:38.575 [error] ** (Mint.TransportError) connection refused
** (Mint.TransportError) connection refused
"""
@doc api: :error
def retry(request, response_or_exception, options \\ [])
def retry(request, %{status: status} = response, _options) when status < 500 do
{request, response}
end
def retry(request, response_or_exception, options) when is_list(options) do
delay = Keyword.get(options, :delay, 2000)
max_attempts = Keyword.get(options, :max_attempts, 2)
attempt = Req.Request.get_private(request, :retry_attempt, 0)
if attempt < max_attempts do
log_retry(response_or_exception, attempt, max_attempts, delay)
Process.sleep(delay)
request = Req.Request.put_private(request, :retry_attempt, attempt + 1)
{_, result} = run(request)
{Req.Request.halt(request), result}
else
{request, response_or_exception}
end
end
defp log_retry(response_or_exception, attempt, max_attempts, delay) do
attempts_left =
case max_attempts - attempt do
1 -> "1 attempt"
n -> "#{n} attempts"
end
message = ["Will retry in #{delay}ms, ", attempts_left, " left"]
case response_or_exception do
%{__exception__: true} = exception ->
Logger.error([
"Req.retry/3: Got exception. ",
message
])
Logger.error([
"** (#{inspect(exception.__struct__)}) ",
Exception.message(exception)
])
response ->
Logger.error(["Req.retry/3: Got response with status #{response.status}. ", message])
end
end
## Utilities
defp put_new_header(struct, name, value) do
if Enum.any?(struct.headers, fn {key, _} -> String.downcase(key) == name end) do
struct
else
put_header(struct, name, value)
end
end
defp put_header(struct, name, value) do
update_in(struct.headers, &[{name, value} | &1])
end
defp get_content_encoding_header(headers) do
if value = get_header(headers, "content-encoding") do
value
|> String.downcase()
|> String.split(",", trim: true)
|> Stream.map(&String.trim/1)
|> Enum.reverse()
else
[]
end
end
defp get_header(headers, name) do
Enum.find_value(headers, nil, fn {key, value} ->
if String.downcase(key) == name do
value
else
nil
end
end)
end
defp cache_path(cache_dir, request) do
Path.join(cache_dir, cache_key(request))
end
defp write_cache(cache_dir, request, response) do
path = cache_path(cache_dir, request)
File.mkdir_p!(Path.dirname(path))
File.write!(path, :erlang.term_to_binary(response))
end
defp load_cache(cache_dir, request) do
path = cache_path(cache_dir, request)
path |> File.read!() |> :erlang.binary_to_term()
end
defp cache_key(request) do
hash =
:crypto.hash(:sha256, :erlang.term_to_binary(request.uri))
|> Base.encode16(case: :lower)
request.uri.host <> "-" <> hash
end
defp format_http_datetime(datetime) do
Calendar.strftime(datetime, "%a, %d %b %Y %H:%m:%S GMT")
end
defp parse_netrc(["#" <> _ | rest], current_acc, acc) do
parse_netrc(rest, current_acc, acc)
end
defp parse_netrc(["machine " <> machine | rest], _, acc) do
parse_netrc(rest, {machine, nil, nil}, acc)
end
defp parse_netrc(["username " <> username | rest], {machine, nil, nil}, acc) do
parse_netrc(rest, {machine, username, nil}, acc)
end
defp parse_netrc(["password " <> password | rest], {machine, username, nil}, acc) do
parse_netrc(rest, nil, Map.put(acc, machine, {username, password}))
end
defp parse_netrc([other | rest], current_acc, acc) do
if String.trim(other) == "" do
parse_netrc(rest, current_acc, acc)
else
raise "parse error: #{inspect(other)}"
end
end
defp parse_netrc([], nil, acc) do
acc
end
end
|
lib/req.ex
| 0.867471 | 0.488527 |
req.ex
|
starcoder
|
defmodule Versioning do
@moduledoc """
Versionings allow data to be changed to different versions of itself.
A the heart of our versioning is the `Versioning` struct. A `Versioning` struct
contains the following fields:
- `:current` - The current version that our data represents.
- `:target` - The version that we want our data to be changed into.
- `:type` - The type of data we are working with. If we are working with structs,
this will typically be the struct name in string format, eg: `"Post"`
- `:data` - The underlying data that we want to change. For structs, like our
`Post`, be aware that we typically have our data as a bare map since it
is easier to transform.
- `:changes` - A list of change modules that have been applied against the versioning.
The first change module would be the most recent module run.
- `:changed` - A boolean representing if change modules have been applied.
- `:assigns` - A map of arbitrary data we can use to store additonal information in.
## Example
Versioning.new(%Post{}, "2.0.0", "1.0.0")
With the above, we have created a versioning of a `Post` struct. This represents
us wanting to transform our post from a version "2.0.0" to an older "1.0.0"
version.
## Schemas
The versioning struct is used in combination with a `Versioning.Schema`, which
allows us to map out the changes that should occur through versions. Please see
the `Versioning.Schema` documentation for more details.
"""
@derive {Inspect, only: [:type, :current, :target, :data, :changed]}
defstruct [
:current,
:target,
:parsed_current,
:parsed_target,
:type,
:schema,
data: %{},
assigns: %{},
changed: false,
changes: []
]
@type version :: binary() | nil
@type type :: binary() | nil
@type data :: %{optional(binary()) => any()}
@type assigns :: %{optional(atom()) => any()}
@type t :: %__MODULE__{
current: version(),
target: version(),
type: type(),
data: map(),
schema: Versioning.Schema.t(),
assigns: assigns(),
changed: boolean(),
changes: [Versioning.Change.t()]
}
@doc """
Creates a new versioning using the data provided.
If a struct is the data, and no type is provided, the struct module is set as
the versioning `:type` (as described in `put_type/2`), and the struct is turned
into a string-key map that is used for the `:data`.
## Examples
# These are equivalent
Versioning.new(%{"foo" => "bar"}, "2.0.0", "1.0.0", SomeData)
Versioning.new(%{foo: "bar"}, "2.0.0", "1.0.0", "SomeData")
Versioning.new(%SomeData{foo: "bar"}, "2.0.0", "1.0.0")
"""
@spec new(map(), version(), version(), type()) :: Verisoning.t()
def new(data \\ %{}, current \\ nil, target \\ nil, type \\ nil)
def new(%{__struct__: struct_type} = data, current, target, type) do
data = Map.from_struct(data)
new(data, current, target, type || struct_type)
end
def new(data, current, target, type) when is_map(data) do
%Versioning{}
|> put_data(data)
|> put_current(current)
|> put_target(target)
|> put_type(type)
end
@doc """
Puts the current version that the data represents.
The version should be represented somewhere within your `Versioning.Schema`.
This will become the "starting" point from which change modules will be run.
## Examples
Versioning.put_current(versioning, "0.1.0")
"""
@spec put_current(Versioning.t(), version()) :: Versioning.t()
def put_current(%Versioning{} = versioning, current) do
%{versioning | current: current}
end
@doc """
Puts the target version that the data will be transformed to.
The version should be represented somewhere within your `Versioning.Schema`.
Once the change modules in the target version are run, no more changes will
be made.
## Examples
Versioning.put_target(versioning, "0.1.0")
"""
@spec put_target(Versioning.t(), version()) :: Versioning.t()
def put_target(%Versioning{} = versioning, target) do
%{versioning | target: target}
end
@doc """
Puts the type of the versioning data.
Typically, if working with data that is associated with a struct, this will
be the struct trailing module name in binary format. For example,
`MyApp.Foo` will be represented as `"Foo"`.
When running a versioning through a schema, only the changes that match the
type set on the versioning will be run.
## Examples
# These are equivalent
Versioning.put_type(versioning, "Post")
Versioning.put_type(versioning, MyApp.Post)
"""
@spec put_type(Versioning.t(), type() | atom()) :: Versioning.t()
def put_type(%Versioning{} = versioning, nil) do
%{versioning | type: nil}
end
def put_type(%Versioning{} = versioning, type) when is_atom(type) do
type =
type
|> to_string()
|> String.split(".")
|> List.last()
put_type(versioning, type)
end
def put_type(%Versioning{} = versioning, type) when is_binary(type) do
%{versioning | type: type}
end
@doc """
Assigns a value to a key in the versioning.
The “assigns” storage is meant to be used to store values in the versioning so
that change modules in your schema can access them. The assigns storage is a map.
## Examples
iex> versioning.assigns[:hello]
nil
iex> versioning = Versioning.assign(versioning, :hello, :world)
iex> versioning.assigns[:hello]
:world
"""
@spec assign(Versioning.t(), atom(), any()) :: Versioning.t()
def assign(%Versioning{assigns: assigns} = versioning, key, value) do
%{versioning | assigns: Map.put(assigns, key, value)}
end
@doc """
Returns and removes the value associated with `key` within the `data` of `versioning`.
If `key` is present in `data` with value `value`, `{value, new_versioning}` is
returned where `new_versioning` is the result of removing `key` from `data`. If `key`
is not present in `data`, `{default, new_versioning}` is returned.
## Examples
iex> Versioning.pop_data(versioning, "foo")
{"bar", versioning}
iex> Versioning.pop_data(versioning, "foo")
{nil, versioning}
"""
@spec pop_data(Versioning.t(), any()) :: {any(), Versioning.t()}
def pop_data(%Versioning{data: data} = versioning, key, default \\ nil) do
{result, data} = Map.pop(data, key, default)
{result, %{versioning | data: data}}
end
@doc """
Gets the value for a specific `key` in the `data` of `versioning`.
If `key` is present in `data` with value `value`, then `value` is returned.
Otherwise, `default` is returned (which is `nil` unless specified otherwise).
## Examples
iex> Versioning.get_data(versioning, "foo")
"bar"
iex> Versioning.get_data(versioning, "bar")
nil
iex> Versioning.get_data(versioning, "bar", "baz")
"baz"
"""
@spec get_data(Versioning.t(), binary(), term()) :: any()
def get_data(%Versioning{data: data}, key, default \\ nil) do
Map.get(data, key, default)
end
@doc """
Fetches the value for a specific `key` in the `data` of `versioning`.
If `data` contains the given `key` with value `value`, then `{:ok, value}` is
returned. If `data` doesn't contain `key`, `:error` is returned.
## Examples
iex> Versioning.fetch_data(versioning, "foo")
{:ok, "bar"}
iex> Versioning.fetch_data(versioning, "bar")
:error
"""
@spec fetch_data(Versioning.t(), binary()) :: {:ok, any()} | :error
def fetch_data(%Versioning{data: data}, key) do
Map.fetch(data, key)
end
@doc """
Puts the full data in the versioning.
The data represents the base of what will be modified when a versioning is
run through a schema.
Data must be a map. If a struct is provided, the struct will be turned into
a basic map - though its type information will not be inferred.
The keys of data will always be strings. If passed an
## Examples
iex> versioning = Versioning.put_data(versioning, %{"foo" => "bar"})
iex> versioning.data
%{"foo" => "bar"}
"""
@spec put_data(Versioning.t(), map()) :: Versioning.t()
def put_data(%Versioning{} = versioning, data) when is_map(data) do
data = deep_stringify(data)
%{versioning | data: data}
end
@doc """
Puts the given `value` under `key` within the `data` of `versioning`.
## Examples
iex> Versioning.put_data(versioning, "foo", "bar")
iex> versioning.data["foo"]
"bar"
"""
@spec put_data(Versioning.t(), binary(), any()) :: Versioning.t()
def put_data(%Versioning{data: data} = versioning, key, value)
when is_map(data) and is_binary(key) do
value = if is_map(value), do: deep_stringify(value), else: value
%{versioning | data: Map.put(data, key, value)}
end
@doc """
Updates the `key` within the `data` of `versioning` using the given function.
If the `data` does not contain `key` - nothing occurs. If it does, the `fun`
is invoked with argument `value` and its result is used as the new value of
`key`.
## Examples
iex> Versioning.update_data(versioning, "foo", fn _val -> "bar" end)
iex> versioning.data["foo"]
"bar"
"""
@spec update_data(Versioning.t(), binary(), (any() -> any())) :: Versioning.t()
def update_data(%Versioning{} = versioning, key, fun)
when is_binary(key) and is_function(fun, 1) do
if Map.has_key?(versioning.data, key) do
val = fun.(versioning.data[key])
put_data(versioning, key, val)
else
versioning
end
end
defp deep_stringify(%{__struct__: _} = struct) do
struct |> Map.from_struct() |> deep_stringify()
end
defp deep_stringify(map) when is_map(map) do
Enum.reduce(map, %{}, fn
{key, val}, acc when is_map(val) ->
val = deep_stringify(val)
Map.put(acc, to_string(key), val)
{key, val}, acc ->
Map.put(acc, to_string(key), val)
end)
end
end
|
lib/versioning.ex
| 0.934058 | 0.743098 |
versioning.ex
|
starcoder
|
defmodule Ptolemy.Provider do
@moduledoc """
`Ptolemy.Provider` defines behaviours for implementing a configuration provider.
The source that `Ptolemy.Loader` pulls values from are defined
by providers. Each provider is responsible for retrieving a value, given a query.
Once implemented, a provider can supply dynamic run time environment variables and
keep them up to date.
# Example
In it's simplest form, a provider should be a light API wrapper for retrieving a
value given some kind of query. A good example would be `Ptolemy.Providers.SystemEnv`:
```elixir
defmodule Ptolemy.Providers.SystemEnv do
use Ptolemy.Provider
def init(_loader_pid), do: :ok
def load(_loader_pid, var_name) do
System.get_env(var_name)
end
end
```
When `Ptolemy.Loader` first starts up, it will iterate over each item from the environment config.
The first time the loader comes across a provider, it will be initialized by calling
the `init/1` callback. For every other occurrence in the startup sequence, `init/1` will **not**
be called. In the example above, there is no setup action required to use `System.get_env/1`,
so it will simply return `:ok`.
During the loader's startup iteration over the configuration, it will potentially query the same
loader many times. Each time it queries, it will invoke the `load/2` callback to preform the lookup.
For the example above, the lookup is a call to `System.get_env/1`.
# Managing Expiring or Changing Configurations
Sometimes, secrets are needed that change over time. When this is the case, the loader can be sent
a message signaling that a configuration has likely changed. The most common form of such dynamic
secrets is with a TTL. Providers can support values with a TTL by utilizing the `register_ttl/3`
utility method. A simple example would look like:
```elixir
defmodule TtlExample do
use Ptolemy.Provider
def init(_loader_pid), do: :ok
def load(loader_pid, query) do
{value, ttl} = MyApp.get_value(query)
register_ttl(loader_pid, query, ttl)
value
end
end
```
This implementation will notify the loader to re-load the value after a given TTL has expired. When
the loader is notified, it will call the `load/2` callback again, returning the updated value and
giving the opportunity to set another TTL.
# Loading as a Single Process
The loader and providers all execute on the same process. The primary reason for this is because of the role
of a provider must perform. Foremost, providers should yield values in the order they are defined in the configuration.
This requirement allows earlier providers to supply configuration to later executing providers that may
require additional configuration before they could yield values. After the initialization of the loader,
there is little reason for providers to remain on the same process, however for now it simplifies the loader.
# Depending on External Processes
The intention of a provider is to be a simple external API wrapper. Sometimes, external APIs require
a process to be started to manage interactions. The loader process should usually be started first
in the application's supervision tree, as it will need to populate the configuration of the application.
Any required process should then be started in a provider's `init/1` definition. This will ensure that
process dependencies are started as late as possible in the startup of the application. This ensures that
dependent process will also be able to be configured by providers that appear earlier in the loader configuration.
"""
@doc """
Invoked to setup a provider. This callback is only called once per provider, and is called lazily.
"""
@callback init(pid) :: :ok | {:error, String.t()}
@doc """
Invoked when querying the provider for a value.
"""
@callback load(pid, any) :: any
@doc false
defmacro __using__(_args) do
implement_helpers()
end
defp implement_helpers do
quote do
@behaviour Ptolemy.Provider
@doc false
def register_ttl(loader_pid, query, ttl, ttl_unit \\ :milliseconds) do
Process.send_after(
loader_pid,
{:expired, {__MODULE__, query}},
Ptolemy.Provider.to_millis(ttl, ttl_unit)
)
end
end
end
@typedoc """
Accepted units of time for scheduling a ttl.
"""
@type time_unit :: :milliseconds | :seconds | :minutes | :hours
@doc """
Used to convert accepted time units to milliseconds.
This is used internally for scheduling.
"""
@spec to_millis(non_neg_integer, time_unit) :: non_neg_integer
def to_millis(time, time_unit)
def to_millis(time, :hours), do: time * 3.6e+6
def to_millis(time, :minutes), do: time * 60000
def to_millis(time, :seconds), do: time * 1000
def to_millis(time, :milliseconds), do: time
end
|
lib/provider.ex
| 0.912709 | 0.914673 |
provider.ex
|
starcoder
|
defmodule Timex.Utils do
@moduledoc false
@doc """
Determines the current version of OTP running this node. The result is
cached for fast lookups in performance-sensitive functions.
## Example
iex> rel = Timex.Utils.get_otp_release
...> '\#{rel}' == :erlang.system_info(:otp_release)
true
"""
def get_otp_release do
case Process.get(:current_otp_release) do
nil ->
case ("#{:erlang.system_info(:otp_release)}" |> Integer.parse) do
{ver, _} when is_integer(ver) ->
Process.put(:current_otp_release, ver)
ver
_ ->
raise RuntimeError, message: "Unable to determine Erlang version"
end
ver -> ver
end
end
@doc """
Loads all modules that extend a given module in the current code path.
The convention is that it will fetch modules with the same root namespace,
and that are suffixed with the name of the module they are extending.
## Example
iex> Timex.Utils.get_plugins(Timex.Parse.DateTime.Tokenizer) |> Enum.sort
[Timex.Parse.DateTime.Tokenizers.Default, Timex.Parse.DateTime.Tokenizers.Strftime]
"""
@spec get_plugins(atom) :: list(atom)
def get_plugins(plugin_type) when is_atom(plugin_type) do
case Process.get(:timex_plugins) do
plugins when is_list(plugins) ->
plugins
_ ->
plugins = available_modules(plugin_type) |> Enum.reduce([], &load_plugin/2)
Process.put(:timex_plugins, plugins)
plugins
end
end
@spec load_plugin(atom, list(atom)) :: list(atom) | no_return
defp load_plugin(module, modules) do
if Code.ensure_loaded?(module), do: [module | modules], else: modules
end
@spec available_modules(atom) :: Enumerable.t
defp available_modules(plugin_type) do
apps_path = Mix.Project.build_path |> Path.join("lib")
case apps_path |> File.ls do
{:ok, apps} ->
apps
|> Stream.map(&(Path.join([apps_path, &1, "ebin"])))
|> Stream.map(fn app_path ->
case app_path |> File.ls do
{:ok, files} -> files |> Enum.map(&(Path.join(app_path, &1)))
_ -> []
end
end)
|> Stream.flat_map(&(&1))
|> Stream.filter(&(String.ends_with?(&1, ".beam")))
|> Stream.map(fn path ->
{:ok, {module, chunks}} = :beam_lib.chunks('#{path}', [:attributes])
{module, get_in(chunks, [:attributes, :behaviour])}
end)
|> Stream.filter(fn {_module, behaviours} ->
is_list(behaviours) && plugin_type in behaviours
end)
|> Enum.map(fn {module, _} -> module end)
_ ->
[]
end
end
end
|
lib/timex/utils.ex
| 0.837985 | 0.451689 |
utils.ex
|
starcoder
|
defmodule RecurringEvents do
@moduledoc """
*RecurringEvents* is an Elixir library providing recurring events support
(duh!).
It loosely follows
[iCal Recurrence rule specification](http://www.kanzaki.com/docs/ical/rrule.html)
[RFC 2445](https://tools.ietf.org/html/rfc2445).
iex> RecurringEvents.take(%{date_start: ~D[2016-12-07],freq: :daily}, 3)
[~D[2016-12-07], ~D[2016-12-08], ~D[2016-12-09]]
iex> RecurringEvents.take(~N[2016-01-17 12:21:06], %{freq: :weekly}, 2)
[~N[2016-01-17 12:21:06], ~N[2016-01-24 12:21:06]]
Supported rules
- `:date_start` - start date can be provided directly in rules map
- `:count` - how many occurences should be return
- `:interval` - how often recurrence rule repeats
- `:freq` - this is the only required rule, possible values: `:yearly`,
`:monthly`, `:weekly`, `:daily`, `:hourly`, `:minutely`, `:secondly`
- `:week_start` - start day of the week, see `:by_day` for possible values
- `:by_month` - month number or list of month numbers
- `:by_day` - day or list of days, possible values: `:monday`, `:tuesday`,
`:wednesday`, `:thursday`, `:friday`, `:saturday`, `:sunday`.
This rule can also accept tuples with occurrence number when used with
`:monthly` or `:yearly` frequency (e.g. `{3, :monday}` for 3rd Monday or
`{-2, :tuesday}` for 2nd to last Tuesday)
- `:by_month_day` - month day number or list of month day numbers
- `:by_week_number` - number of the week in a year, first week should have at
least 4 days, `:week_start` may affect result of this rule
- `:by_year_day` - number of the day in a year `1` is the first `-1` is the last
- `:by_hour` - hour from 0 to 23
- `:by_minute` - minute from 0 to 59
- `:by_second` - second from 0 to 59
- `:by_set_position` - if present, this indicates the nth occurrence of the
date withing frequency period
- `:exclude_date` - dates to be excluded from the result
- `:until` - limit result up to provided date
For more usage examples, please, refer to
[tests](https://github.com/pbogut/recurring_events/blob/master/test/ical_rrul_test.exs)
"""
alias RecurringEvents.{
Date,
Guards,
Yearly,
Monthly,
Frequency,
Weekly,
ByPump,
ByChecker
}
use Guards
@rules [
:by_month_day,
:by_year_day,
:by_day,
:by_week_number,
:by_month,
:by_hour,
:by_minute,
:by_second,
:by_set_position,
:exclude_date
]
@doc """
Returns stream of recurring events based on date and rules
# Example
iex> RecurringEvents.unfold(~D[2014-06-07], %{freq: :yearly})
...> |> Enum.take(3)
[~D[2014-06-07], ~D[2015-06-07], ~D[2016-06-07]]
"""
def unfold(date, rules) do
validate(date, rules)
do_unfold(date, listify(rules))
end
@doc """
Returns stream of recurring events based on rules
# Example
iex> RecurringEvents.unfold(%{date_start: ~D[2014-06-07], freq: :yearly})
...> |> Enum.take(3)
[~D[2014-06-07], ~D[2015-06-07], ~D[2016-06-07]]
"""
def unfold(%{date_start: date} = rules) do
unfold(date, rules)
end
@doc """
Returns list of recurring events based on date and rules
# Example
iex> RecurringEvents.take(~D[2015-09-13], %{freq: :monthly}, 4)
[~D[2015-09-13], ~D[2015-10-13], ~D[2015-11-13], ~D[2015-12-13]]
"""
def take(date, rules, count) when count >= 0 do
date |> unfold(rules) |> Enum.take(count)
end
def take(_date, _rules, _count) do
raise ArgumentError, message: "Count must be a non-negative integer."
end
@doc """
Returns list of recurring events based on rules
# Example
iex> RecurringEvents.take(%{date_start: ~D[2015-09-13], freq: :monthly}, 4)
[~D[2015-09-13], ~D[2015-10-13], ~D[2015-11-13], ~D[2015-12-13]]
"""
def take(%{date_start: date} = rules, count) do
take(date, rules, count)
end
defp do_unfold(date, %{freq: freq} = rules) do
date
|> get_freq_module(freq).unfold(rules)
|> by_rules(rules)
|> by_set_position(rules)
|> drop_before(date)
|> prepend(date)
|> drop_exclude(rules)
|> drop_after(rules)
end
defp drop_exclude(dates, %{exclude_date: excludes}) do
dates |> Stream.filter(&(&1 not in excludes))
end
defp drop_exclude(dates, _) do
dates
end
defp by_rules(dates, rules) do
dates
|> Stream.flat_map(&ByPump.inflate(&1, rules))
|> Stream.filter(&ByChecker.check(&1, rules))
end
defp drop_before(list, date) do
Stream.drop_while(list, &(Date.compare(date, &1) != :lt))
end
defp drop_after(list, %{until: date}) do
Stream.take_while(list, &(Date.compare(date, &1) != :lt))
end
defp drop_after(list, %{count: count}), do: Stream.take(list, count)
defp drop_after(list, %{}), do: list
defp prepend(list, element), do: Stream.concat([element], list)
defp get_freq_module(:yearly), do: Yearly
defp get_freq_module(:monthly), do: Monthly
defp get_freq_module(:weekly), do: Weekly
defp get_freq_module(:daily), do: Frequency
defp get_freq_module(:hourly), do: Frequency
defp get_freq_module(:minutely), do: Frequency
defp get_freq_module(:secondly), do: Frequency
defp listify(%{freq: _} = rules) do
Enum.reduce(@rules, rules, fn key, rules ->
case Map.get(rules, key, nil) do
nil -> rules
value -> %{rules | key => listify(value)}
end
end)
end
defp listify(list) when is_list(list), do: list
defp listify(item) when not is_list(item), do: [item]
defp by_set_position(dates, %{by_set_position: positions} = rules) do
dates
|> Stream.chunk_by(chunk_func(rules))
|> Stream.flat_map(&get_at_positions(&1, positions))
end
defp by_set_position(dates, _rules), do: dates
defp get_at_positions(date, positions) do
positions
|> Enum.map(fn position -> get_position(date, position) end)
|> Enum.filter(fn date -> date != nil end)
end
defp get_position(dates, position) do
cond do
position > 0 -> Enum.at(dates, position - 1)
position < 0 -> dates |> Enum.reverse() |> Enum.at(-position - 1)
end
end
defp chunk_func(%{freq: :yearly}), do: fn date -> date.year end
defp chunk_func(%{freq: :monthly}), do: fn date -> date.month end
defp chunk_func(%{freq: :daily}), do: fn date -> date.day end
defp chunk_func(%{freq: :weekly} = rules) do
&Date.week_number(&1, week_start: week_start(rules))
end
defp week_start(%{week_start: week_start}), do: week_start
defp week_start(_), do: :monday
defp validate(date, rules) do
freq = Map.get(rules, :freq)
cond do
!freq ->
raise ArgumentError, message: "Frequency is required"
!is_freq_valid(freq) ->
raise ArgumentError, message: "Frequency is invalid"
Map.has_key?(rules, :count) and Map.has_key?(rules, :until) ->
raise ArgumentError, message: "Can have either, count or until"
!is_count_valid(rules) ->
raise ArgumentError, message: "Count must be a non-negative integer."
!is_date(date) ->
raise ArgumentError, message: "You have to use date or datetime structure"
(is_time_freq(freq) or has_time_rule(rules)) and not has_time(date) ->
raise ArgumentError, message: "To use time rules you have to provide date with time"
true ->
nil
end
end
defp is_count_valid(%{count: nil}), do: true
defp is_count_valid(%{count: count}) when count >= 0, do: true
defp is_count_valid(%{count: count}) when count < 0, do: false
defp is_count_valid(%{}), do: true
end
|
lib/recurring_events.ex
| 0.876132 | 0.612657 |
recurring_events.ex
|
starcoder
|
defmodule SnowplowTracker.Events.Helper do
@moduledoc """
This module contains the implementations of
function used to set the default values in the payload.
"""
@uuid_version 4
@rfc_4122_variant10 2
@doc """
Generate a v4 UUID string to uniquely identify an event
"""
@spec generate_uuid() :: String.t()
def generate_uuid() do
uuid_hex()
|> uuid_to_string()
end
defp uuid_hex() do
<<u0::48, _::4, u1::12, _::2, u2::62>> = :crypto.strong_rand_bytes(16)
<<u0::48, @uuid_version::4, u1::12, @rfc_4122_variant10::2, u2::62>>
end
defp uuid_to_string(<<
fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,
fc00:e968:6179::de52:7100,
afd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,
a4::4,
a5::4,
a6::4,
a7::4,
a8::4,
b1::4,
b2::4,
b3::4,
b4::4,
cfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,
cfc00:db20:35b:7399::5,
c3::4,
c4::4,
d1::4,
d2::4,
d3::4,
d4::4,
e1::4,
e2::4,
e3::4,
e4::4,
e5::4,
e6::4,
e7::4,
e8::4,
e9::4,
e1fc00:db20:35b:7399::5,
efd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,
e12::4
>>) do
<<e(a1), e(a2), e(a3), e(a4), e(a5), e(a6), e(a7), e(a8), ?-, e(b1), e(b2), e(b3), e(b4), ?-,
e(c1), e(c2), e(c3), e(c4), ?-, e(d1), e(d2), e(d3), e(d4), ?-, e(e1), e(e2), e(e3), e(e4),
e(e5), e(e6), e(e7), e(e8), e(e9), e(e10), e(e11), e(e12)>>
end
@compile {:inline, e: 1}
defp e(0), do: ?0
defp e(1), do: ?1
defp e(2), do: ?2
defp e(3), do: ?3
defp e(4), do: ?4
defp e(5), do: ?5
defp e(6), do: ?6
defp e(7), do: ?7
defp e(8), do: ?8
defp e(9), do: ?9
defp e(10), do: ?a
defp e(11), do: ?b
defp e(12), do: ?c
defp e(13), do: ?d
defp e(14), do: ?e
defp e(15), do: ?f
@doc """
Generate unix timestamp in microseconds to identify time of each event.
"""
@spec generate_timestamp(module()) :: Integer.t()
def generate_timestamp(module \\ :os) do
module.system_time(:milli_seconds)
end
@doc """
This function is used to convert a given number to a string. If the number is of type float,
it is rounded off to 2 places and converted to a string.
"""
@spec to_string(any()) :: nil | String.t()
def to_string(nil), do: nil
def to_string(number) when is_integer(number) do
Integer.to_string(number)
end
def to_string(number) when is_float(number) do
Float.round(number, 2) |> Float.to_string()
end
def to_string(number), do: number
end
|
lib/snowplow_tracker/events/helper.ex
| 0.775987 | 0.42057 |
helper.ex
|
starcoder
|
defmodule StarkInfra.IssuingHolder do
alias __MODULE__, as: IssuingHolder
alias StarkInfra.IssuingRule
alias StarkInfra.Utils.Rest
alias StarkInfra.Utils.API
alias StarkInfra.Utils.Check
alias StarkInfra.User.Project
alias StarkInfra.User.Organization
alias StarkInfra.Error
@moduledoc """
Groups IssuingHolder related functions
"""
@doc """
The IssuingHolder describes a card holder that may group several cards.
## Parameters (required):
- `:name` [binary]: card holder's name.
- `:tax_id` [binary]: card holder's tax ID
- `:external_id` [binary] card holder's external ID
## Parameters (optional):
- `:rules` [list of IssuingRule structs, default []]: [EXPANDABLE] list of holder spending rules
- `:tags` [list of binarys, default []]: list of binarys for tagging. ex: ["travel", "food"]
## Attributes (return-only):
- `:id` [binary]: unique id returned when IssuingHolder is created. ex: "5656565656565656"
- `:status` [binary]: current IssuingHolder status. ex: "active", "blocked" or "canceled"
- `:updated` [DateTime]: latest update DateTime for the IssuingHolder. ex: ~U[2020-3-10 10:30:0:0]
- `:created` [DateTime]: creation datetime for the IssuingHolder. ex: ~U[2020-03-10 10:30:0:0]
"""
@enforce_keys [
:name,
:tax_id,
:external_id
]
defstruct [
:id,
:status,
:updated,
:created,
:name,
:tax_id,
:external_id,
:rules,
:tags
]
@type t() :: %__MODULE__{}
@doc """
Send a list of IssuingHolder structs for creation in the Stark Infra API.
## Parameters (required):
- `:holders` [list of IssuingHolder structs]: list of IssuingHolder structs to be created in the API
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- list of IssuingHolder structs with updated attributes
"""
@spec create(
holders: [IssuingHolder.t()],
user: Project.t() | Organization.t() | nil
) ::
{ :ok, [IssuingHolder.t()] } |
{ :error, [error: Error.t()] }
def create(holders, options \\ []) do
Rest.post(resource(), holders, options)
end
@doc """
Same as create(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec create!(
holders: [IssuingHolder.t()],
user: Project.t() | Organization.t() | nil
) :: any
def create!(holders, options \\ []) do
Rest.post!(resource(), holders, options)
end
@doc """
Receive a single IssuingHolder struct previously created in the Stark Infra API by its id.
## Parameters (required):
- `:id` [binary]: struct unique id. ex: "5656565656565656"
## Options:
- `:expand` [list of binarys, default nil]: fields to expand information. ex: ["rules"]
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- IssuingHolder struct with updated attributes
"""
@spec get(
id: binary,
expand: [binary] | nil,
user: Project.t() | Organization.t() | nil
) ::
{ :ok, [IssuingHolder.t()] } |
{ :error, [error: Error.t()] }
def get(id, options \\ []) do
Rest.get_id(resource(), id, options)
end
@doc """
Same as get(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec get!(
id: binary,
expand: [binary] | nil,
user: Project.t() | Organization.t() | nil
) :: any
def get!(id, options \\ []) do
Rest.get_id!(resource(), id, options)
end
@doc """
Receive a stream of IssuingHolder structs previously created in the Stark Infra API
## Options:
- `:limit` [integer, default nil]: maximum number of structs to be retrieved. Unlimited if nil. ex: 35
- `:after` [Date or string, default nil]: date filter for structs created only after specified date. ex: ~D[2020-03-25]
- `:before` [Date or string, default nil]: date filter for structs created only before specified date. ex: ~D[2020-03-25]
- `:status` [binary, default nil]: filter for status of retrieved structs. ex: "paid" or "registered"
- `:tags` [list of binarys, default nil]: tags to filter retrieved structs. ex: ["tony", "stark"]
- `:ids` [list of binarys, default nil]: list of ids to filter retrieved structs. ex: ["5656565656565656", "4545454545454545"]
- `:expand` [list of binarys, default nil]: fields to expand information. ex: ["rules"]
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- stream of IssuingHolder structs with updated attributes
"""
@spec query(
limit: integer | nil,
after: Date.t() | binary | nil,
before: Date.t() | binary | nil,
status: binary | nil,
tags: [binary] | nil,
ids: [binary] | nil,
expand: [binary] | nil,
user: Project.t() | Organization.t() | nil
) ::
{ :cont, [IssuingHolder.t()] } |
{ :error, [error: Error.t()] }
def query(options \\ []) do
Rest.get_list(resource(), options)
end
@doc """
Same as query(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec query!(
limit: integer | nil,
after: Date.t() | binary | nil,
before: Date.t() | binary | nil,
status: binary | nil,
tags: [binary] | nil,
ids: [binary] | nil,
expand: [binary] | nil,
user: Project.t() | Organization.t() | nil
) :: any
def query!(options \\ []) do
Rest.get_list!(resource(), options)
end
@doc """
Receive a list of IssuingHolder structs previously created in the Stark Infra API and the cursor to the next page.
## Options:
- `:limit` [integer, default 100]: maximum number of structs to be retrieved. Unlimited if nil. ex: 35
- `:after` [Date or string, default nil]: date filter for structs created only after specified date. ex: ~D[2020-03-25]
- `:before` [Date or string, default nil]: date filter for structs created only before specified date. ex: ~D[2020-03-25]
- `:status` [string, default nil]: filter for status of retrieved structs. ex: "paid" or "registered"
- `:tags` [list of strings, default nil]: tags to filter retrieved structs. ex: ["tony", "stark"]
- `:ids` [list of strings, default nil]: list of ids to filter retrieved structs. ex: ["5656565656565656", "4545454545454545"]
- `:expand` [string, default nil]: fields to expand information. ex: "rules, securityCode, number, expiration"
- `:cursor` [string, default nil]: cursor returned on the previous page function call
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- list of IssuingHolder structs with updated attributes
- cursor to retrieve the next page of IssuingHolder structs
"""
@spec page(
limit: integer | nil,
after: Date.t() | binary | nil,
before: Date.t() | binary | nil,
status: binary | nil,
tags: [binary] | nil,
ids: [binary] | nil,
expand: [binary] | nil,
cursor: binary | nil,
user: Project.t() | Organization.t() | nil
) ::
{ :cont, {binary, [IssuingHolder.t()] }} |
{ :error, [error: Error.t()] }
def page(options \\ []) do
Rest.get_page(resource(), options)
end
@doc """
Same as page(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec page!(
limit: integer | nil,
after: Date.t() | binary | nil,
before: Date.t() | binary | nil,
status: binary | nil,
tags: [binary] | nil,
ids: [binary] | nil,
expand: [binary] | nil,
cursor: binary | nil,
user: Project.t() | Organization.t() | nil
) :: any
def page!(options \\ []) do
Rest.get_page!(resource(), options)
end
@doc """
Update an IssuingHolder by passing id, if it hasn't been paid yet.
## Parameters (required):
- `:id` [string]: IssuingHolder id. ex: '5656565656565656'
## Parameters (optional):
- `:status` [string, default nil]: You may block the IssuingHolder by passing 'blocked' in the status.
- `:name` [string, default nil]: card holder name.
- `:tags` [list of strings, default nil]: list of strings for tagging.
- `:rules` [list of dictionaries, default nil]: list of dictionaries with "amount": int, "currencyCode": string, "id": string, "interval": string, "name": string pairs
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- target IssuingHolder with updated attributes
"""
@spec update(
id: binary,
status: binary | nil,
name: binary | nil,
tags: [binary] | nil,
rules: [IssuingRule.t()] | nil,
user: Project.t() | Organization.t() | nil
) ::
{ :ok, IssuingHolder.t() } |
{ :error, [error: Error.t()] }
def update(id, parameters \\ []) do
Rest.patch_id(resource(), id, parameters)
end
@doc """
Same as update(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec update!(
id: binary,
status: binary | nil,
name: binary | nil,
tags: [binary] | nil,
rules: [IssuingRule.t()] | nil,
user: Project.t() | Organization.t() | nil
) :: any
def update!(id, parameters \\ []) do
Rest.patch_id!(resource(), id, parameters)
end
@doc """
Cancel an IssuingHolder entity previously created in the Stark Infra API.
## Parameters (required):
- `:id` [string]: IssuingHolder unique id. ex: "5656565656565656"
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- canceled IssuingHolder struct
"""
@spec cancel(
id: binary,
user: Project.t() | Organization.t() | nil
) ::
{ :ok, IssuingHolder.t() } |
{ :error, [error: Error.t()] }
def cancel(id, options \\ []) do
Rest.delete_id(resource(), id, options)
end
@doc """
Same as cancel(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec cancel!(
id: binary,
user: Project.t() | Organization.t() | nil
) :: any
def cancel!(id, options \\ []) do
Rest.delete_id!(resource(), id, options)
end
@doc false
def resource() do
{
"IssuingHolder",
&resource_maker/1
}
end
@doc false
def resource_maker(json) do
%IssuingHolder{
id: json[:id],
status: json[:status],
updated: json[:updated] |> Check.datetime(),
created: json[:created] |> Check.datetime(),
name: json[:name],
tax_id: json[:tax_id],
external_id: json[:external_id],
rules: json[:rules] |> Enum.map(fn rule -> API.from_api_json(rule, &IssuingRule.resource_maker/1) end),
tags: json[:tags]
}
end
end
|
lib/issuing_holder/issuing_holder.ex
| 0.897622 | 0.551091 |
issuing_holder.ex
|
starcoder
|
defmodule Skooma do
require Logger
alias Skooma.Basic
def valid?(data, schema, path \\ []) do
results =
cond do
is_atom(schema) ->
valid?(data, [schema], path)
is_tuple(schema) ->
validate_tuple(data, schema, path)
Keyword.keyword?(schema) ->
validate_keyword(data, schema, path)
is_map(schema) ->
Skooma.Map.validate_map(data, schema, path)
Enum.member?(schema, :list) ->
validate_list(data, schema, path)
Enum.member?(schema, :not_required) ->
handle_not_required(data, schema, path)
Enum.member?(schema, :map) ->
Skooma.Map.nested_map(data, schema, path)
Enum.member?(schema, :union) ->
union_handler(data, schema, path)
Enum.member?(schema, :string) ->
Basic.validator(&is_binary/1, "STRING", data, schema, path)
Enum.member?(schema, :int) ->
Basic.validator(&is_integer/1, "INTEGER", data, schema, path)
Enum.member?(schema, :float) ->
Basic.validator(&is_float/1, "FLOAT", data, schema, path)
Enum.member?(schema, :number) ->
Basic.validator(&is_number/1, "NUMBER", data, schema, path)
Enum.member?(schema, :bool) ->
Basic.validator(&is_boolean/1, "BOOLEAN", data, schema, path)
Enum.member?(schema, :atom) ->
atom_handler(data, schema, path)
Enum.member?(schema, :any) ->
:ok
true ->
{:error, {path, "Your data is all jacked up"}}
end
handle_results(results)
end
defp handle_results(:ok), do: :ok
defp handle_results({:error, error}), do: {:error, [error]}
defp handle_results(results) do
case results |> Enum.reject(&(&1 == :ok)) do
[] ->
:ok
errors ->
errors
|> List.flatten()
|> Enum.map(fn {:error, error} -> {:error, List.flatten([error])} end)
|> Enum.map(fn {:error, error} -> error end)
|> List.flatten()
|> (fn n -> {:error, n} end).()
end
end
defp atom_handler(data, schema, path) do
Basic.validator(
fn value -> is_atom(value) and not is_nil(value) end,
"ATOM",
data,
schema,
path
)
end
defp union_handler(data, schema, path) do
schemas = Enum.find(schema, &is_list/1)
results = Enum.map(schemas, &valid?(data, &1, path))
if Enum.any?(results, &(&1 == :ok)) do
:ok
else
results
end
end
defp handle_not_required(data, schema, path) do
if data == nil do
:ok
else
valid?(data, Enum.reject(schema, &(&1 == :not_required)), path)
end
end
defp validate_keyword(data, schema, path) do
if Keyword.keys(data) |> length == Keyword.keys(schema) |> length do
Enum.map(data, fn {k, v} -> valid?(v, schema[k], path ++ [k]) end)
|> Enum.reject(&(&1 == :ok))
else
{:error, {path, "missing some keys"}}
end
end
defp validate_list(data, schema, path) do
cond do
is_list(data) ->
list_schema =
case Enum.reject(schema, &(&1 == :list)) do
[[:list | _] = nested_list_schema | _] -> nested_list_schema
[[:union | _] = union_list_schema | _] -> union_list_schema
list_schema -> list_schema
end
data
|> Enum.with_index()
|> Enum.map(fn {v, k} -> valid?(v, list_schema, path ++ [k]) end)
is_list(schema) and :not_required in schema and data == nil ->
:ok
true ->
{:error, {path, "expected list"}}
end
end
defp validate_tuple(data, schema, path) do
data_list = Tuple.to_list(data)
schema_list = Tuple.to_list(schema)
if Enum.count(data_list) == Enum.count(schema_list) do
Enum.zip(data_list, schema_list)
|> Enum.with_index()
|> Enum.map(fn {v, k} -> valid?(elem(v, 0), elem(v, 1), path ++ [k]) end)
# |> Enum.map(&(valid?(elem(&1, 0), elem(&1, 1))))
|> Enum.reject(&(&1 == :ok))
else
{:error, {path, "tuple schema doesn't match tuple length"}}
end
end
end
|
lib/skooma.ex
| 0.507812 | 0.465934 |
skooma.ex
|
starcoder
|
defmodule XDR.IntError do
@moduledoc """
This module contains the definition of `XDR.IntError` exception that may be raised by the `XDR.Int` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.IntError` exception with the message of the `error_type` passed.
"""
def exception(:not_integer) do
new("The value which you try to encode is not an integer")
end
def exception(:not_binary) do
new("The value which you try to decode must be a binary value, for example: <<0, 0, 0, 2>>")
end
def exception(:exceed_upper_limit) do
new(
"The integer which you try to encode exceed the upper limit of an integer, the value must be less than 2_147_483_647"
)
end
def exception(:exceed_lower_limit) do
new(
"The integer which you try to encode exceed the lower limit of an integer, the value must be more than -2_147_483_648"
)
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.IntError{message: msg}
end
defmodule XDR.UIntError do
@moduledoc """
This module contains the definition of `XDR.UIntError` exception that may be raised by the `XDR.UInt` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.UIntError` exception with the message of the `error_type` passed.
"""
def exception(:not_integer) do
new("The value which you try to encode is not an integer")
end
def exception(:not_binary) do
new("The value which you try to decode must be a binary value, for example: <<0, 0, 0, 2>>")
end
def exception(:exceed_upper_limit) do
new(
"The integer which you try to encode exceed the upper limit of an unsigned integer, the value must be less than 4_294_967_295"
)
end
def exception(:exceed_lower_limit) do
new(
"The integer which you try to encode exceed the lower limit of an unsigned integer, the value must be more than 0"
)
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.UIntError{message: msg}
end
defmodule XDR.EnumError do
@moduledoc """
This module contains the definition of `XDR.EnumError` exception that may be raised by the `XDR.Enum` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.EnumError` exception with the message of the `error_type` passed.
"""
def exception(:not_list) do
new("The declaration inside the Enum structure isn't a list")
end
def exception(:not_an_atom) do
new("The name of the key which you try to encode isn't an atom")
end
def exception(:not_binary) do
new("The value which you try to decode must be a binary value, for example: <<0, 0, 0, 2>>")
end
def exception(:invalid_key) do
new("The key which you try to encode doesn't belong to the current declarations")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.EnumError{message: msg}
end
defmodule XDR.BoolError do
@moduledoc """
This module contains the definition of `XDR.BoolError` exception that may be raised by the `XDR.Bool` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.BoolError` exception with the message of the `error_type` passed.
"""
def exception(:not_boolean) do
new("The value which you try to encode is not a boolean")
end
def exception(:invalid_value) do
new("The value which you try to decode must be <<0, 0, 0, 0>> or <<0, 0, 0, 1>>")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.BoolError{message: msg}
end
defmodule XDR.HyperIntError do
@moduledoc """
This module contains the definition of `XDR.HyperIntError` exception that may be raised by the `XDR.HyperInt` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.HyperIntError` exception with the message of the `error_type` passed.
"""
def exception(:not_integer) do
new("The value which you try to encode is not an integer")
end
def exception(:not_binary) do
new(
"The value which you try to decode must be a binary value, for example: <<0, 0, 0, 0, 0, 0, 0, 5>>"
)
end
def exception(:exceed_upper_limit) do
new(
"The integer which you try to encode exceed the upper limit of an Hyper Integer, the value must be less than 9_223_372_036_854_775_807"
)
end
def exception(:exceed_lower_limit) do
new(
"The integer which you try to encode exceed the lower limit of an Hyper Integer, the value must be more than -9_223_372_036_854_775_808"
)
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.HyperIntError{message: msg}
end
defmodule XDR.HyperUIntError do
@moduledoc """
This module contains the definition of `XDR.HyperUIntError` exception that may be raised by the `XDR.HyperUInt` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.HyperUIntError` exception with the message of the `error_type` passed.
"""
def exception(:not_integer) do
new("The value which you try to encode is not an integer")
end
def exception(:not_binary) do
new(
"The value which you try to decode must be a binary value, for example: <<0, 0, 0, 0, 0, 0, 0, 5>>"
)
end
def exception(:exceed_upper_limit) do
new(
"The integer which you try to encode exceed the upper limit of an Hyper Unsigned Integer, the value must be less than 18_446_744_073_709_551_615"
)
end
def exception(:exceed_lower_limit) do
new(
"The integer which you try to encode exceed the lower limit of an Hyper Unsigned Integer, the value must be more than 0"
)
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.HyperUIntError{message: msg}
end
defmodule XDR.FloatError do
@moduledoc """
This module contains the definition of `XDR.FloatError` exception that may be raised by the `XDR.Float` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.FloatError` exception with the message of the `error_type` passed.
"""
def exception(:not_number) do
new("The value which you try to encode is not an integer or float value")
end
def exception(:not_binary) do
new("The value which you try to decode must be a binary value, for example: <<0, 0, 0, 2>>")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.FloatError{message: msg}
end
defmodule XDR.DoubleFloatError do
@moduledoc """
This module contains the definition of `XDR.DoubleFloatError` exception that may be raised by the `XDR.DoubleFloat` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.DoubleFloatError` exception with the message of the `error_type` passed.
"""
def exception(:not_number) do
new("The value which you try to encode is not an integer or float value")
end
def exception(:not_binary) do
new(
"The value which you try to decode must be a binary value, for example: <<0, 0, 0, 0, 0, 0, 0, 5>>"
)
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.DoubleFloatError{message: msg}
end
defmodule XDR.FixedOpaqueError do
@moduledoc """
This module contains the definition of `XDR.FixedOpaqueError` exception that may be raised by the `XDR.FixedOpaque` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.FixedOpaqueError` exception with the message of the `error_type` passed.
"""
def exception(:not_number) do
new("The value which you pass through parameters is not an integer")
end
def exception(:not_binary) do
new(
"The value which you pass through parameters must be a binary value, for example: <<0, 0, 0, 5>>"
)
end
def exception(:invalid_length) do
new(
"The length that is passed through parameters must be equal or less to the byte size of the XDR to complete"
)
end
def exception(:exceed_length) do
new("The length is bigger than the byte size of the XDR")
end
def exception(:not_valid_binary) do
new("The binary size of the binary which you try to decode must be a multiple of 4")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.FixedOpaqueError{message: msg}
end
defmodule XDR.VariableOpaqueError do
@moduledoc """
This module contains the definition of `XDR.VariableOpaqueError` exception that may be raised by the `XDR.VariableOpaque` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.VariableOpaqueError` exception with the message of the `error_type` passed.
"""
def exception(:not_number) do
new("The value which you pass through parameters is not an integer")
end
def exception(:not_binary) do
new(
"The value which you pass through parameters must be a binary value, for example: <<0, 0, 0, 5>>"
)
end
def exception(:invalid_length) do
new(
"The max length that is passed through parameters must be biger to the byte size of the XDR"
)
end
def exception(:exceed_lower_bound) do
new("The minimum value of the length of the variable is 0")
end
def exception(:exceed_upper_bound) do
new("The maximum value of the length of the variable is 4_294_967_295")
end
def exception(:length_over_max) do
new(
"The number which represents the length from decode the opaque as UInt is bigger than the defined max (max by default is 4_294_967_295)"
)
end
def exception(:length_over_rest) do
new("The XDR has an invalid length, it must be less than byte-size of the rest")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.VariableOpaqueError{message: msg}
end
defmodule XDR.FixedArrayError do
@moduledoc """
This module contains the definition of `XDR.FixedArrayError` exception that may be raised by the `XDR.FixedArray` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.FixedArrayError` exception with the message of the `error_type` passed.
"""
def exception(:invalid_length) do
new("the length of the array and the length must be the same")
end
def exception(:not_list) do
new("the value which you try to encode must be a list")
end
def exception(:not_number) do
new("the length received by parameter must be an integer")
end
def exception(:not_binary) do
new("the value which you try to decode must be a binary value")
end
def exception(:not_valid_binary) do
new("the value which you try to decode must have a multiple of 4 byte-size")
end
def exception(:invalid_type) do
new("the type must be a module")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.FixedArrayError{message: msg}
end
defmodule XDR.VariableArrayError do
@moduledoc """
This module contains the definition of `XDR.VariableArrayError` exception that may be raised by the `XDR.VariableArray` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.VariableArrayError` exception with the message of the `error_type` passed.
"""
def exception(:not_list) do
new("the value which you try to encode must be a list")
end
def exception(:not_number) do
new("the max length must be an integer value")
end
def exception(:not_binary) do
new(
"The value which you pass through parameters must be a binary value, for example: <<0, 0, 0, 5>>"
)
end
def exception(:exceed_lower_bound) do
new("The minimum value of the length of the variable is 1")
end
def exception(:exceed_upper_bound) do
new("The maximum value of the length of the variable is 4_294_967_295")
end
def exception(:length_over_max) do
new(
"The number which represents the length from decode the opaque as UInt is bigger than the defined max"
)
end
def exception(:invalid_length) do
new("The length of the binary exceeds the max_length of the type")
end
def exception(:invalid_binary) do
new(
"The data which you try to decode has an invalid number of bytes, it must be equal to or greater than the size of the array multiplied by 4"
)
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.VariableArrayError{message: msg}
end
defmodule XDR.StructError do
@moduledoc """
This module contains the definition of `XDR.StructError` exception that may be raised by the `XDR.Struct` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.StructError` exception with the message of the `error_type` passed.
"""
def exception(:not_list) do
new("The :components received by parameter must be a keyword list")
end
def exception(:empty_list) do
new("The :components must not be empty, it must be a keyword list")
end
def exception(:not_binary) do
new("The :struct received by parameter must be a binary value, for example: <<0, 0, 0, 5>>")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.StructError{message: msg}
end
defmodule XDR.UnionError do
@moduledoc """
This module contains the definition of `XDR.UnionError` exception that may be raised by the `XDR.Union` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.UnionError` exception with the message of the `error_type` passed.
"""
def exception(:not_list) do
new(
"The :declarations received by parameter must be a keyword list which belongs to an XDR.Enum"
)
end
def exception(:not_binary) do
new(
"The :identifier received by parameter must be a binary value, for example: <<0, 0, 0, 5>>"
)
end
def exception(:not_number) do
new("The value which you try to decode is not an integer value")
end
def exception(:not_atom) do
new("The :identifier which you try to decode from the Enum Union is not an atom")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.UnionError{message: msg}
end
defmodule XDR.VoidError do
@moduledoc """
This module contains the definition of `XDR.VoidError` exception that may be raised by the `XDR.Void` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.VoidError` exception with the message of the `error_type` passed.
"""
def exception(:not_binary) do
new("The value which you try to decode must be a binary value, for example: <<0, 0, 0, 5>>")
end
def exception(:not_void) do
new("The value which you try to encode is not void")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.VoidError{message: msg}
end
defmodule XDR.OptionalError do
@moduledoc """
This module contains the definition of `XDR.OptionalError` exception that may be raised by the `XDR.Optional` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.OptionalError` exception with the message of the `error_type` passed.
"""
def exception(:not_valid) do
new("The value which you try to encode must be Int, UInt or Enum")
end
def exception(:not_binary) do
new("The value which you try to decode must be a binary value")
end
def exception(:not_module) do
new("The type of the optional value must be the module which it belongs")
end
@spec new(msg :: String.t()) :: struct()
defp new(msg), do: %XDR.OptionalError{message: msg}
end
defmodule XDR.StringError do
@moduledoc """
This module contains the definition of `XDR.StringError` exception that may be raised by the `XDR.String` module.
"""
defexception [:message]
@impl true
@doc """
Create a `XDR.StringError` exception with the message of the `error_type` passed.
"""
def exception(:not_bitstring) do
new("The value you are trying to encode must be a bitstring value")
end
def exception(:invalid_length) do
new("The length of the string exceeds the max length allowed")
end
def exception(:not_binary) do
new("The value you are trying to decode must be a binary value")
end
@spec new(msg :: binary()) :: struct()
defp new(msg), do: %XDR.StringError{message: msg}
end
|
lib/xdr/error.ex
| 0.927937 | 0.591251 |
error.ex
|
starcoder
|
defmodule Pixie.Extension do
@moduledoc """
Used to implement Bayeux extensions, which can be used to filter or change
incoming messages and the responses sent back to the client.
For example:
```elixir
defmodule AuthenticationExtension do
use Pixie.Extension
def incoming %Event{message: %{ext: %{username: u, password: p}=message}}=event do
case User.authenticate username, password do
:ok ->
%{event | message: %{message | ext: nil}}
:error ->
%{event | response: %{response | error: "Authentication Failed"}}
end
end
def incoming %Event{}=event do
%{event | response: %{response | error: "Authentication Failed"}}
end
```
Note that you *must* always provide a "catch all" function head that matches
all other events and returns them - otherwise the runtime will start raising
exceptions and generally make you feel sad.
You can dynamically add your extension to the extension stack at runtime using
`YourExtension.register` and `YourExtension.unregister`
"""
defmacro __using__(_opts) do
quote do
@behaviour Pixie.Extension
alias Pixie.Event
alias Pixie.Protocol.Error
def register do
Pixie.ExtensionRegistry.register __MODULE__
end
def unregister do
Pixie.ExtensionRegistry.unregister __MODULE__
end
defoverridable [register: 0, unregister: 0]
end
end
@doc """
Can be used to modify the `Pixie.Event` struct that is passed in.
The incoming message is provided in the `message` property, whereas the
response being sent back to the sending client is stored in the `response`
property.
Note that if you want to stop the message from being delivered then set it
to `nil`, likewise if you want to stop a response being sent to the client.
You must *always* return a Pixie.Event struct from `handle/1`.
"""
@callback incoming(event :: Pixie.Event.t) :: Pixie.Event.t
@doc """
Can be used to modify an outgoing message before it is passed to the channel
for final delivery to clients.
If you wish to stop this message being delivered then return `nil` otherwise
you must always return a message back to the caller.
"""
@callback outgoing(message :: Pixie.Message.Publish.t) :: Pixie.Message.Publish.t
end
|
lib/pixie/extension.ex
| 0.824603 | 0.636621 |
extension.ex
|
starcoder
|
defmodule Absinthe.Phase.Document.Validation.FieldsOnCorrectType do
@moduledoc false
# Validates document to ensure that all fields are provided on the correct type.
alias Absinthe.{Blueprint, Phase, Phase.Document.Validation.Utils, Schema, Type}
use Absinthe.Phase
@doc """
Run the validation.
"""
@spec run(Blueprint.t(), Keyword.t()) :: Phase.result_t()
def run(input, _options \\ []) do
result = Blueprint.prewalk(input, &handle_node(&1, input))
{:ok, result}
end
@spec handle_node(Blueprint.node_t(), Schema.t()) :: Blueprint.node_t()
defp handle_node(%Blueprint.Document.Operation{schema_node: nil} = node, _) do
error = %Phase.Error{
phase: __MODULE__,
message: "Operation \"#{node.type}\" not supported",
locations: [node.source_location]
}
node
|> flag_invalid(:unknown_operation)
|> put_error(error)
end
defp handle_node(
%{selections: selections, schema_node: parent_schema_node} = node,
%{schema: schema} = input
)
when not is_nil(parent_schema_node) do
possible_parent_types = possible_types(parent_schema_node, schema)
selections =
Enum.map(selections, fn
%Blueprint.Document.Field{schema_node: nil} = field ->
type = named_type(parent_schema_node, schema)
field
|> flag_invalid(:unknown_field)
|> put_error(
error(
field,
type.name,
suggested_type_names(field.name, type, input),
suggested_field_names(field.name, type, input)
)
)
%Blueprint.Document.Fragment.Spread{errors: []} = spread ->
fragment = Enum.find(input.fragments, &(&1.name == spread.name))
possible_child_types = possible_types(fragment.schema_node, schema)
if Enum.any?(possible_child_types, &(&1 in possible_parent_types)) do
spread
else
spread_error(spread, possible_parent_types, possible_child_types, schema)
end
%Blueprint.Document.Fragment.Inline{} = fragment ->
possible_child_types = possible_types(fragment.schema_node, schema)
if Enum.any?(possible_child_types, &(&1 in possible_parent_types)) do
fragment
else
spread_error(fragment, possible_parent_types, possible_child_types, schema)
end
other ->
other
end)
%{node | selections: selections}
end
defp handle_node(node, _) do
node
end
defp idents_to_names(idents, schema) do
for ident <- idents do
Absinthe.Schema.lookup_type(schema, ident).name
end
end
defp spread_error(spread, parent_types_idents, child_types_idents, schema) do
parent_types = idents_to_names(parent_types_idents, schema)
child_types = idents_to_names(child_types_idents, schema)
msg = """
Fragment spread has no type overlap with parent.
Parent possible types: #{inspect(parent_types)}
Spread possible types: #{inspect(child_types)}
"""
error = %Phase.Error{
phase: __MODULE__,
message: msg,
locations: [spread.source_location]
}
spread
|> flag_invalid(:invalid_spread)
|> put_error(error)
end
defp possible_types(%{type: type}, schema) do
possible_types(type, schema)
end
defp possible_types(type, schema) do
schema
|> Absinthe.Schema.lookup_type(type)
|> case do
%Type.Object{identifier: identifier} ->
[identifier]
%Type.Interface{identifier: identifier} ->
schema.__absinthe_interface_implementors__
|> Map.fetch!(identifier)
%Type.Union{types: types} ->
types
_ ->
[]
end
end
@spec named_type(Type.t(), Schema.t()) :: Type.named_t()
defp named_type(%Type.Field{} = node, schema) do
Schema.lookup_type(schema, node.type)
end
defp named_type(%{name: _} = node, _) do
node
end
# Generate the error for a field
@spec error(Blueprint.node_t(), String.t(), [String.t()], [String.t()]) :: Phase.Error.t()
defp error(field_node, parent_type_name, type_suggestions, field_suggestions) do
%Phase.Error{
phase: __MODULE__,
message:
error_message(field_node.name, parent_type_name, type_suggestions, field_suggestions),
locations: [field_node.source_location]
}
end
@doc """
Generate an error for a field
"""
@spec error_message(String.t(), String.t(), [String.t()], [String.t()]) :: String.t()
def error_message(field_name, type_name, type_suggestions \\ [], field_suggestions \\ [])
def error_message(field_name, type_name, [], []) do
~s(Cannot query field "#{field_name}" on type "#{type_name}".)
end
def error_message(field_name, type_name, [], field_suggestions) do
error_message(field_name, type_name) <>
Utils.MessageSuggestions.suggest_message(field_suggestions)
end
def error_message(field_name, type_name, type_suggestions, []) do
error_message(field_name, type_name) <>
Utils.MessageSuggestions.suggest_fragment_message(type_suggestions)
end
def error_message(field_name, type_name, type_suggestions, _) do
error_message(field_name, type_name, type_suggestions)
end
defp suggested_type_names(external_field_name, type, blueprint) do
internal_field_name =
case blueprint.adapter.to_internal_name(external_field_name, :field) do
nil -> external_field_name
internal_field_name -> internal_field_name
end
possible_types = find_possible_types(internal_field_name, type, blueprint.schema)
possible_interfaces =
find_possible_interfaces(internal_field_name, possible_types, blueprint.schema)
possible_interfaces
|> Enum.map(& &1.name)
|> Enum.concat(Enum.map(possible_types, & &1.name))
|> Enum.sort()
end
defp suggested_field_names(external_field_name, %{fields: _} = type, blueprint) do
internal_field_name =
case blueprint.adapter.to_internal_name(external_field_name, :field) do
nil -> external_field_name
internal_field_name -> internal_field_name
end
Map.values(type.fields)
|> Enum.map(& &1.name)
|> Absinthe.Utils.Suggestion.sort_list(internal_field_name)
|> Enum.map(&blueprint.adapter.to_external_name(&1, :field))
|> Enum.sort()
end
defp suggested_field_names(_, _, _) do
[]
end
defp find_possible_interfaces(field_name, possible_types, schema) do
possible_types
|> types_to_interface_idents
|> Enum.uniq()
|> sort_by_implementation_count(possible_types)
|> Enum.map(&Schema.lookup_type(schema, &1))
|> types_with_field(field_name)
end
defp sort_by_implementation_count(iface_idents, types) do
Enum.sort_by(iface_idents, fn iface ->
count =
Enum.count(types, fn
%{interfaces: ifaces} ->
Enum.member?(ifaces, iface)
_ ->
false
end)
count
end)
|> Enum.reverse()
end
defp types_to_interface_idents(types) do
Enum.flat_map(types, fn
%{interfaces: ifaces} ->
ifaces
_ ->
[]
end)
end
defp find_possible_types(field_name, type, schema) do
schema
|> Schema.concrete_types(Type.unwrap(type))
|> types_with_field(field_name)
end
defp types_with_field(types, field_name) do
Enum.filter(types, &type_with_field?(&1, field_name))
end
defp type_with_field?(%{fields: fields}, field_name) do
Map.values(fields)
|> Enum.find(&(&1.name == field_name))
end
defp type_with_field?(_, _) do
false
end
end
|
lib/absinthe/phase/document/validation/fields_on_correct_type.ex
| 0.760562 | 0.417093 |
fields_on_correct_type.ex
|
starcoder
|
defmodule Holidefs.Holiday do
@moduledoc """
A holiday itself.
"""
alias Holidefs.DateCalculator
alias Holidefs.Definition.Rule
alias Holidefs.Holiday
alias Holidefs.Options
defstruct [:name, :raw_date, :observed_date, :date, informal?: false]
@type t :: %Holidefs.Holiday{
name: String.t(),
raw_date: Date.t(),
observed_date: Date.t(),
date: Date.t(),
informal?: boolean
}
@doc """
Returns a list of holidays for the definition rule on the given year
"""
@spec from_rule(atom, Holidefs.Definition.Rule.t(), integer, Holidefs.Options.t()) :: [t]
def from_rule(code, %Rule{year_ranges: year_ranges} = rule, year, opts \\ %Options{}) do
if in_year_ranges?(year_ranges, year) do
build_from_rule(code, rule, year, opts)
else
[]
end
end
defp in_year_ranges?(nil, _) do
true
end
defp in_year_ranges?(list, year) when is_list(list) do
Enum.all?(list, &in_year_range?(&1, year))
end
defp in_year_range?(%{"before" => before_year}, year), do: year <= before_year
defp in_year_range?(%{"after" => after_year}, year), do: year >= after_year
defp in_year_range?(%{"limited" => years}, year), do: year in years
defp in_year_range?(%{"between" => between}, year) do
[start_year, end_year] = String.split(between, "..")
{start_year, ""} = Integer.parse(start_year)
{end_year, ""} = Integer.parse(end_year)
year in start_year..end_year
end
defp build_from_rule(
code,
%Rule{name: name, function: fun, informal?: informal?} = rule,
year,
opts
)
when is_function(fun) do
name = translate_name(code, name)
case fun.(year, rule) do
list when is_list(list) ->
for date <- list do
%Holiday{
name: name,
raw_date: date,
observed_date: load_observed(rule, date),
date: load_date(rule, date, opts),
informal?: informal?
}
end
%Date{} = date ->
[
%Holiday{
name: name,
raw_date: date,
observed_date: load_observed(rule, date),
date: load_date(rule, date, opts),
informal?: informal?
}
]
nil ->
[]
end
end
defp build_from_rule(
code,
%Rule{name: name, month: month, day: day, informal?: informal?} = rule,
year,
opts
)
when nil not in [month, day] do
{:ok, date} = Date.new(year, month, day)
[
%Holiday{
name: translate_name(code, name),
raw_date: date,
observed_date: load_observed(rule, date),
date: load_date(rule, date, opts),
informal?: informal?
}
]
end
defp build_from_rule(
code,
%Rule{
name: name,
month: month,
week: week,
weekday: weekday,
informal?: informal?
} = rule,
year,
opts
) do
date = DateCalculator.nth_day_of_week(year, month, week, weekday)
[
%Holiday{
name: translate_name(code, name),
raw_date: date,
observed_date: load_observed(rule, date),
date: load_date(rule, date, opts),
informal?: informal?
}
]
end
defp load_date(rule, date, %Options{observed?: observed?}) do
load_date(rule, date, observed?)
end
defp load_date(rule, date, true) do
load_observed(rule, date)
end
defp load_date(_rule, date, false) do
date
end
defp load_observed(%Rule{observed: nil}, date) do
date
end
defp load_observed(%Rule{observed: fun} = rule, date) when is_function(fun) do
fun.(date, rule)
end
@doc """
Returns the translated name of the given holiday
"""
@spec translate_name(atom, String.t()) :: String.t()
def translate_name(code, name) do
Gettext.dgettext(Holidefs.Gettext, Atom.to_string(code), name)
end
end
|
lib/holidefs/holiday.ex
| 0.846101 | 0.42937 |
holiday.ex
|
starcoder
|
defmodule JPMarc.Leader do
@moduledoc """
Tools for working with JPMARC Leader
"""
@typedoc """
Type that represents `JPMarc.Leader` struct
This constructed with `:length` as integer, `:status` as String, `:type` of String, `:level` as String, `:base` as integer, `:encoding` String, `:format` String
"""
@type t :: %__MODULE__{length: integer, status: String.t, type: String.t, level: String.t, base: integer, encoding: String.t, format: String.t}
@derive [Poison.Encoder]
defstruct length: 0, status: "n", type: "a", level: "m", base: 0, encoding: "z", format: "i"
@doc """
Decode the string representation to JPMarc.Leader struct
"""
@spec decode(String.t)::t
def decode(leader) do
<<length::bytes-size(5), status::bytes-size(1), type::bytes-size(1),
level::bytes-size(1), _::bytes-size(4), base::bytes-size(5), encoding::bytes-size(1), format::bytes-size(1), _::binary>> = leader
base = try do
_ = String.to_integer(base)
base
rescue
ArgumentError -> "00000"
end
%__MODULE__{length: String.to_integer(length), status: status, type: type, level: level, base: String.to_integer(base), encoding: encoding, format: format}
end
@doc """
Return the MARC Format of the leader
"""
@spec to_marc(t)::String.t
def to_marc(l) do
length = l.length |> Integer.to_string |> String.pad_leading(5, "0")
base = l.base |> Integer.to_string |> String.pad_leading(5, "0")
"#{length}#{l.status}#{l.type}#{l.level} a22#{base}#{l.encoding}#{l.format} 4500"
end
@doc """
Return a tuple representing its xml element
"""
@spec to_xml(JPMarc.Leader.t)::tuple
def to_xml(leader) do
{:leader, nil, JPMarc.Leader.to_marc(leader)}
end
@doc"""
Return a text representing the leader
"""
@spec to_text(t)::String.t
def to_text(leader) do
to_marc(leader)
end
defimpl Poison.Encoder, for: JPMarc.Leader do
def encode(leader, _options), do: "\"#{JPMarc.Leader.to_marc(leader)}\""
end
defimpl Inspect, for: JPMarc.Leader do
def inspect(leader, _opts) do
JPMarc.Leader.to_marc(leader)
end
end
defimpl String.Chars, for: JPMarc.Leader do
def to_string(leader) do
JPMarc.Leader.to_marc(leader)
end
end
end
|
lib/jpmarc/leader.ex
| 0.897666 | 0.44734 |
leader.ex
|
starcoder
|
defmodule Ecto.Type do
@moduledoc """
Defines functions and the `Ecto.Type` behaviour for implementing
custom types.
A custom type expects 4 functions to be implemented, all documented
and described below. We also provide two examples of how custom
types can be used in Ecto to augment existing types or providing
your own types.
## Augmenting types
Imagine you want to support your id field to be looked up as a
permalink. For example, you want the following query to work:
permalink = "10-how-to-be-productive-with-elixir"
from p in Post, where: p.id == ^permalink
If `id` is an integer field, Ecto will fail in the query above
because it cannot cast the string to an integer. By using a
custom type, we can provide special casting behaviour while
still keeping the underlying Ecto type the same:
defmodule Permalink do
def type, do: :integer
# Provide our own casting rules.
def cast(string) when is_binary(string) do
case Integer.parse(string) do
{int, _} -> {:ok, int}
:error -> :error
end
end
# We should still accept integers
def cast(integer) when is_integer(integer), do: {:ok, integer}
# Everything else is a failure though
def cast(_), do: :error
# When loading data from the database, we are guaranteed to
# receive an integer (as database are stricts) and we will
# just return it to be stored in the model struct.
def load(integer) when is_integer(integer), do: {:ok, integer}
# When dumping data to the database, we *expect* an integer
# but any value could be inserted into the struct, so we need
# guard against them.
def dump(integer) when is_integer(integer), do: {:ok, integer}
def dump(_), do: :error
end
Now, we can use our new field above as our primary key type in models:
defmodule Post do
use Ecto.Model
@primary_key {:id, Permalink, autogenerate: true}
schema "posts" do
...
end
end
## New types
In the previous example, we say we were augmenting an existing type
because we were keeping the underlying representation the same, the
value stored in the struct and the database was always an integer.
However, sometimes, we want to completely replace Ecto data types
stored in the models. This is for example how Ecto provides the
`Ecto.DateTime` struct as a replacement for the `:datetime` type.
Check the `Ecto.DateTime` implementation for an example on how
to implement such types.
"""
import Kernel, except: [match?: 2]
use Behaviour
@type t :: primitive | custom
@type primitive :: base | composite
@type custom :: atom
@typep base :: :integer | :float | :boolean | :string |
:binary | :decimal | :datetime | :time |
:date | :id | :binary_id | :map | :any
@typep composite :: {:array, base}
@base ~w(integer float boolean string binary decimal datetime time date id binary_id map any)a
@composite ~w(array)a
@doc """
Returns the underlying schema type for the custom type.
For example, if you want to provide your own datetime
structures, the type function should return `:datetime`.
"""
defcallback type :: base | custom
@doc """
Casts the given input to the custom type.
This callback is called on external input and can return any type,
as long as the `dump/1` function is able to convert the returned
value back into an Ecto native type. There are two situations where
this callback is called:
1. When casting values by `Ecto.Changeset`
2. When passing arguments to `Ecto.Query`
"""
defcallback cast(term) :: {:ok, term} | :error
@doc """
Loads the given term into a custom type.
This callback is called when loading data from the database and
receive an Ecto native type. It can return any type, as long as
the `dump/1` function is able to convert the returned value back
into an Ecto native type.
"""
defcallback load(term) :: {:ok, term} | :error
@doc """
Dumps the given term into an Ecto native type.
This callback is called with any term that was stored in the struct
and it needs to validate them and convert it to an Ecto native type.
"""
defcallback dump(term) :: {:ok, term} | :error
## Functions
@doc """
Checks if we have a primitive type.
iex> primitive?(:string)
true
iex> primitive?(Another)
false
iex> primitive?({:array, :string})
true
iex> primitive?({:array, Another})
true
"""
@spec primitive?(t) :: boolean
def primitive?({composite, _}) when composite in @composite, do: true
def primitive?(base) when base in @base, do: true
def primitive?(_), do: false
@doc """
Checks if the given atom can be used as composite type.
iex> composite?(:array)
true
iex> composite?(:string)
false
"""
@spec composite?(atom) :: boolean
def composite?(atom), do: atom in @composite
@doc """
Checks if the given atom can be used as base type.
iex> base?(:string)
true
iex> base?(:array)
false
iex> base?(Custom)
false
"""
@spec base?(atom) :: boolean
def base?(atom), do: atom in @base
@doc """
Retrieves the underlying type of a given type.
iex> type(:string)
:string
iex> type(Ecto.DateTime)
:datetime
iex> type({:array, :string})
{:array, :string}
iex> type({:array, Ecto.DateTime})
{:array, :datetime}
"""
@spec type(t) :: t
def type(type)
def type({:array, type}), do: {:array, type(type)}
def type(type) do
if primitive?(type) do
type
else
type.type
end
end
@doc """
Normalizes a type.
The only type normalizable is binary_id which comes
from the adapter.
"""
def normalize({comp, :binary_id}, %{binary_id: binary_id}), do: {comp, binary_id}
def normalize(:binary_id, %{binary_id: binary_id}), do: binary_id
def normalize(:binary_id, %{}), do: raise "adapter did not provide a type for :binary_id"
def normalize(type, _id_types), do: type
@doc """
Checks if a given type matches with a primitive type
that can be found in queries.
iex> match?(:whatever, :any)
true
iex> match?(:any, :whatever)
true
iex> match?(:string, :string)
true
iex> match?({:array, :string}, {:array, :any})
true
iex> match?(Ecto.DateTime, :datetime)
true
iex> match?(Ecto.DateTime, :string)
false
"""
@spec match?(t, primitive) :: boolean
def match?(schema_type, query_type)
def match?(_left, :any), do: true
def match?(:any, _right), do: true
def match?(type, primitive) do
if primitive?(type) do
do_match?(type, primitive)
else
do_match?(type.type, primitive)
end
end
defp do_match?({outer, left}, {outer, right}), do: match?(left, right)
defp do_match?(:decimal, type) when type in [:float, :integer], do: true
defp do_match?(:binary_id, :binary), do: true
defp do_match?(:id, :integer), do: true
defp do_match?(type, type), do: true
defp do_match?(_, _), do: false
@doc """
Dumps a value to the given type.
Opposite to casting, dumping requires the returned value
to be a valid Ecto type, as it will be sent to the
underlying data store.
iex> dump(:string, nil)
{:ok, %Ecto.Query.Tagged{value: nil, type: :string}}
iex> dump(:string, "foo")
{:ok, "foo"}
iex> dump(:integer, 1)
{:ok, 1}
iex> dump(:integer, "10")
:error
iex> dump(:binary, "foo")
{:ok, %Ecto.Query.Tagged{value: "foo", type: :binary}}
iex> dump(:binary, 1)
:error
iex> dump({:array, :integer}, [1, 2, 3])
{:ok, [1, 2, 3]}
iex> dump({:array, :integer}, [1, "2", 3])
:error
iex> dump({:array, :binary}, ["1", "2", "3"])
{:ok, %Ecto.Query.Tagged{value: ["1", "2", "3"], type: {:array, :binary}}}
"""
@spec dump(t, term) :: {:ok, term} | :error
def dump(type, nil) do
{:ok, %Ecto.Query.Tagged{value: nil, type: type(type)}}
end
def dump({:array, type}, value) do
dump_array(type, value, [], false)
end
def dump(type, value) do
cond do
not primitive?(type) ->
type.dump(value)
of_base_type?(type, value) ->
{:ok, tag(type, value)}
true ->
:error
end
end
defp tag(:binary, value),
do: %Ecto.Query.Tagged{type: :binary, value: value}
defp tag(_type, value),
do: value
defp dump_array(type, [h|t], acc, tagged) do
case dump(type, h) do
{:ok, %Ecto.Query.Tagged{value: h}} ->
dump_array(type, t, [h|acc], true)
{:ok, h} ->
dump_array(type, t, [h|acc], tagged)
:error ->
:error
end
end
defp dump_array(type, [], acc, true) do
{:ok, %Ecto.Query.Tagged{value: Enum.reverse(acc), type: {:array, type(type)}}}
end
defp dump_array(_type, [], acc, false) do
{:ok, Enum.reverse(acc)}
end
@doc """
Same as `dump/2` but raises if value can't be dumped.
"""
@spec dump!(t, term) :: term | no_return
def dump!(type, term) do
case dump(type, term) do
{:ok, value} -> value
:error -> raise ArgumentError, "cannot dump `#{inspect term}` to type #{inspect type}"
end
end
@doc """
Loads a value with the given type.
Load is invoked when loading database native types
into a struct.
iex> load(:string, nil)
{:ok, nil}
iex> load(:string, "foo")
{:ok, "foo"}
iex> load(:integer, 1)
{:ok, 1}
iex> load(:integer, "10")
:error
"""
@spec load(t, term) :: {:ok, term} | :error
def load(_type, nil), do: {:ok, nil}
def load(:boolean, 0), do: {:ok, false}
def load(:boolean, 1), do: {:ok, true}
def load(:map, value) when is_binary(value) do
{:ok, Application.get_env(:ecto, :json_library).decode!(value)}
end
def load({:array, type}, value) do
array(type, value, &load/2, [])
end
def load(type, value) do
cond do
not primitive?(type) ->
type.load(value)
of_base_type?(type, value) ->
{:ok, value}
true ->
:error
end
end
@doc """
Same as `load/2` but raises if value can't be loaded.
"""
@spec load!(t, term) :: term | no_return
def load!(type, term) do
case load(type, term) do
{:ok, value} -> value
:error -> raise ArgumentError, "cannot load `#{inspect term}` as type #{inspect type}"
end
end
@doc """
Casts a value to the given type.
`cast/2` is used by the finder queries and changesets
to cast outside values to specific types.
Note that nil can be cast to all primitive types as data
stores allow nil to be set on any column. Custom data types
may want to handle nil specially though.
iex> cast(:any, "whatever")
{:ok, "whatever"}
iex> cast(:any, nil)
{:ok, nil}
iex> cast(:string, nil)
{:ok, nil}
iex> cast(:integer, 1)
{:ok, 1}
iex> cast(:integer, "1")
{:ok, 1}
iex> cast(:integer, "1.0")
:error
iex> cast(:id, 1)
{:ok, 1}
iex> cast(:id, "1")
{:ok, 1}
iex> cast(:id, "1.0")
:error
iex> cast(:float, 1.0)
{:ok, 1.0}
iex> cast(:float, 1)
{:ok, 1.0}
iex> cast(:float, "1")
{:ok, 1.0}
iex> cast(:float, "1.0")
{:ok, 1.0}
iex> cast(:float, "1-foo")
:error
iex> cast(:boolean, true)
{:ok, true}
iex> cast(:boolean, false)
{:ok, false}
iex> cast(:boolean, "1")
{:ok, true}
iex> cast(:boolean, "0")
{:ok, false}
iex> cast(:boolean, "whatever")
:error
iex> cast(:string, "beef")
{:ok, "beef"}
iex> cast(:binary, "beef")
{:ok, "beef"}
iex> cast(:decimal, Decimal.new(1.0))
{:ok, Decimal.new(1.0)}
iex> cast(:decimal, Decimal.new("1.0"))
{:ok, Decimal.new(1.0)}
iex> cast({:array, :integer}, [1, 2, 3])
{:ok, [1, 2, 3]}
iex> cast({:array, :integer}, ["1", "2", "3"])
{:ok, [1, 2, 3]}
iex> cast({:array, :string}, [1, 2, 3])
:error
iex> cast(:string, [1, 2, 3])
:error
"""
@spec cast(t, term) :: {:ok, term} | :error
def cast(_type, nil), do: {:ok, nil}
def cast({:array, type}, term) when is_list(term) do
array(type, term, &cast/2, [])
end
def cast(:float, term) when is_binary(term) do
case Float.parse(term) do
{float, ""} -> {:ok, float}
_ -> :error
end
end
def cast(:float, term) when is_integer(term), do: {:ok, term + 0.0}
def cast(:boolean, term) when term in ~w(true 1), do: {:ok, true}
def cast(:boolean, term) when term in ~w(false 0), do: {:ok, false}
def cast(:decimal, term) when is_binary(term) or is_number(term) do
{:ok, Decimal.new(term)} # TODO: Add Decimal.parse/1
rescue
Decimal.Error -> :error
end
def cast(type, term) when type in [:id, :integer] and is_binary(term) do
case Integer.parse(term) do
{int, ""} -> {:ok, int}
_ -> :error
end
end
def cast(type, value) do
cond do
not primitive?(type) ->
type.cast(value)
of_base_type?(type, value) ->
{:ok, value}
true ->
:error
end
end
@doc """
Same as `cast/2` but raises if value can't be cast.
"""
@spec cast!(t, term) :: term | no_return
def cast!(type, term) do
case cast(type, term) do
{:ok, value} -> value
:error -> raise ArgumentError, "cannot cast `#{inspect term}` to type #{inspect type}"
end
end
## Helpers
# Checks if a value is of the given primitive type.
defp of_base_type?(:any, _), do: true
defp of_base_type?(:id, term), do: is_integer(term)
defp of_base_type?(:float, term), do: is_float(term)
defp of_base_type?(:integer, term), do: is_integer(term)
defp of_base_type?(:boolean, term), do: is_boolean(term)
defp of_base_type?(:binary, term), do: is_binary(term)
defp of_base_type?(:string, term), do: is_binary(term)
defp of_base_type?(:map, term), do: is_map(term) and not Map.has_key?(term, :__struct__)
defp of_base_type?(:decimal, %Decimal{}), do: true
defp of_base_type?(:date, {_, _, _}), do: true
defp of_base_type?(:time, {_, _, _}), do: true
defp of_base_type?(:time, {_, _, _, _}), do: true
defp of_base_type?(:datetime, {{_, _, _}, {_, _, _}}), do: true
defp of_base_type?(:datetime, {{_, _, _}, {_, _, _, _}}), do: true
defp of_base_type?(:binary_id, value) do
raise "cannot dump/cast/load :binary_id type, attempted value: #{inspect value}"
end
defp of_base_type?(:date, %{__struct__: Ecto.Date} = d) do
raise "trying to dump/cast Ecto.Date as a :date type: #{inspect d}. " <>
"Maybe you wanted to declare Ecto.Date in your schema?"
end
defp of_base_type?(:time, %{__struct__: Ecto.Time} = t) do
raise "trying to dump/cast Ecto.Time as a :time type: #{inspect t}. " <>
"Maybe you wanted to declare Ecto.Time in your schema?"
end
defp of_base_type?(:datetime, %{__struct__: Ecto.DateTime} = dt) do
raise "trying to dump/cast Ecto.DateTime as a :datetime type: #{inspect dt}. " <>
"Maybe you wanted to declare Ecto.DateTime in your schema?"
end
defp of_base_type?(struct, _) when struct in ~w(decimal date time datetime)a, do: false
defp array(type, [h|t], fun, acc) do
case fun.(type, h) do
{:ok, h} -> array(type, t, fun, [h|acc])
:error -> :error
end
end
defp array(_type, [], _fun, acc) do
{:ok, Enum.reverse(acc)}
end
end
|
lib/ecto/type.ex
| 0.93035 | 0.683989 |
type.ex
|
starcoder
|
defmodule Quill.Encoder do
@moduledoc """
Documentation for Quill.Encoder.
"""
# Encode output as Logfmt
def encode(map, config = %{log_format: :logfmt}) do
map
|> force_map()
|> ordered_keywords(config)
|> Logfmt.encode()
end
# Encode output as JSON
def encode(map, %{log_format: :json}) do
map
|> force_map()
|> Jason.encode!()
end
# Encode output as default (JSON)
def encode(map, _) do
map
|> force_map()
|> Jason.encode!()
end
defp force_map(value) when is_map(value) do
Enum.into(value, %{},
fn {k, v} -> {force_map(k), force_map(v)} end)
end
defp force_map(value) when is_list(value) do
Enum.map(value, &force_map/1)
end
defp force_map(%{__struct__: _} = value) do
value
|> Map.from_struct()
|> force_map()
end
defp force_map(value) when is_pid(value)
when is_port(value)
when is_reference(value)
when is_tuple(value)
when is_function(value), do: inspect(value)
defp force_map(value), do: value
defp force_string(value) when is_map(value)
when is_list(value), do: Jason.encode!(force_map(value))
defp force_string(value) when is_pid(value)
when is_port(value)
when is_reference(value)
when is_tuple(value)
when is_function(value), do: inspect(value)
defp force_string(value), do: value
defp ordered_keywords(value, %{priority_fields: fields}) do
[]
|> Keyword.merge(Enum.into(fields, [],
fn f -> {f, force_string(value[f])} end))
|> Keyword.merge(Enum.into(Map.drop(value, fields), [],
fn {k, v} -> {k, force_string(v)} end))
end
defp ordered_keywords(value, _) do
Enum.into(value, [],
fn {k, v} -> {k, force_string(v)} end)
end
end
|
lib/encoder/encoder.ex
| 0.67854 | 0.408159 |
encoder.ex
|
starcoder
|
defmodule Distro do
@moduledoc """
This modules handels the information that shall be passed between the different nodes. The GenServer keeps
a map of all the orders for all the elevators. The map is sorted by keys, the keys are the names of the
different nodes that the node can reach.
Communications is handled by calls and cast to the Distro modules on the other elevators.
"""
@behaviour Plug
use GenServer
@server_name :process_distro
@doc """
Start the GenServer with an empty map with one key, this node.
"""
def start_link([]) do
GenServer.start_link(__MODULE__, %{Node.self() => []}, [{:name, @server_name}])
end
@doc """
Inits the GenServer by spawning the two functions that checks for new nodes and removes nodes that
are gone.
"""
@impl true
def init(orders) do
Process.spawn(fn -> check_for_new_nodes() end, [])
Process.spawn(fn -> poll_for_dead_nodes() end, [])
{:ok, orders}
end
# ------------- API
@doc """
Adds an order to a node and sets the light for this order.
"""
def add_order({pid, node}, order, node_for_order) do
GenServer.cast({pid, node}, {:add_order, order, node_for_order})
end
@doc """
Removes an order from a node and turns of the light for this order.
"""
def remove_order({pid, node}, order, node_for_order) do
GenServer.cast({pid, node}, {:remove_order, order, node_for_order})
end
@doc """
Ruturns all the orders for this node.
"""
def get_all_orders(pid, node) do
GenServer.call({pid, node}, {:get_all_orders})
end
@doc """
Distributs a new order. Finds the best node through the get score functions, then adds the node to this
node and tells the other nodes which elevator is handeling the order.
"""
def new_order(pid, order) do
GenServer.cast(pid, {:new_order, order})
end
@doc """
Adds a new node and all its orders to this genserver.
"""
def add_node(pid, node) do
GenServer.cast(pid, {:add_node, node})
end
@doc """
Removes the given nodes from this genserver.
"""
def remove_nodes(pid, nodes) do
GenServer.cast(pid, {:remove_nodes, nodes})
end
@doc """
Returns the map stored in this Genserver.
"""
def get_map(pid) do
GenServer.call(pid, {:get_map})
end
@doc """
Returns true. This function is used to check if a node is responsive.
"""
def is_alive?(pid, node) do
GenServer.call({pid, node}, {:is_alive?})
end
@doc """
Checks if there is any handleable orders at a floor. Returns true or false.
"""
def check_for_orders_at_floor(pid, floor, direction) do
GenServer.call(pid, {:check_for_orders_at_floor, floor, direction})
end
@doc """
Gets the optimal traveldirection according to the current orders on the elevator.
"""
def get_direction(pid, elevator_state) do
GenServer.call(pid, {:get_direction, elevator_state})
end
# ------------- Casts and calls
@impl true
def handle_cast({:add_order, order, node_for_order}, orders) do
if order.direction != :cab || node_for_order == Node.self() do
ElevatorDriver.set_order_button_light(:process_driver, order.direction, order.floor, :on)
end
{:noreply,
Map.put(orders, node_for_order, Enum.uniq([order | Map.fetch!(orders, node_for_order)]))}
end
@impl true
def handle_cast({:remove_order, order, node_for_order}, orders) do
if Node.self() == node_for_order && Enum.any?(orders[Node.self()], fn x -> x == order end) do
Node.list()
|> Enum.each(fn x -> Distro.remove_order({:process_distro, x}, order, node_for_order) end)
end
if order.direction != :cab || node_for_order == Node.self() do
ElevatorDriver.set_order_button_light(:process_driver, order.direction, order.floor, :off)
end
{:noreply,
Map.put(
orders,
node_for_order,
Map.fetch!(orders, node_for_order) |> Enum.reject(fn x -> x == order end)
)}
end
def handle_cast({:new_order, order}, orders) do
#get_score returns a %NodeScore struct. Extracts the node with x.node
node_for_order =
(orders
|> Map.keys()
|> Enum.map(fn x -> get_score(Map.fetch!(orders, x), x, order) end)
|> Enum.min_by(fn x -> x.score end)
).node
Node.list() |> Enum.each(fn x ->
Distro.add_order({:process_distro, x}, order, node_for_order)
end)
ElevatorDriver.set_order_button_light(:process_driver, order.direction, order.floor, :on)
{:noreply,
Map.put(orders, node_for_order, Enum.uniq([order | Map.fetch!(orders, node_for_order)]))}
end
def handle_cast({:remove_nodes, nodes}, orders) do
{:noreply, orders |> Map.drop(nodes)}
end
@impl true
def handle_call({:get_all_orders}, _from, orders) do
{:reply, Map.fetch!(orders, Node.self()), orders}
end
def handle_call({:get_map}, _from, orders) do
{:reply, orders, orders}
end
def handle_call({:is_alive?}, _from, orders) do
{:reply, true, orders}
end
@impl true
def handle_cast({:add_node, node}, orders) do
{:noreply, Map.put(orders, node, Distro.get_all_orders(:process_distro, node))}
end
def handle_call({:check_for_orders_at_floor, direction, current_floor}, _from, orders) do
cond do
current_floor == 0 ->
{:reply, true, orders}
current_floor == 3 ->
{:reply, true, orders}
Map.fetch!(orders, Node.self())
|> Enum.any?(fn x -> stop_for_order?(x, direction, current_floor) end) ->
{:reply, true, orders}
!orders_beyond(direction, current_floor, Map.fetch!(orders, Node.self())) ->
{:reply, true, orders}
true ->
{:reply, false, orders}
end
end
def handle_call({:get_direction, elevator_state}, _from, orders) do
own_orders = Map.fetch!(orders, Node.self())
above = own_orders |> Enum.count(fn x -> x.floor > elevator_state.floor end)
below = own_orders |> Enum.count(fn x -> x.floor < elevator_state.floor end)
cond do
Enum.empty?(Enum.filter(own_orders, fn x -> x.floor != elevator_state.floor end)) ->
{:reply, :none, orders}
own_orders |> Enum.any?(fn x -> order_in_traveling_direction?(x, elevator_state) end) ->
{:reply, elevator_state.direction, orders}
above >= below ->
{:reply, :up, orders}
below > above ->
{:reply, :down, orders}
end
end
# ------------- Help functions
@doc """
Checks if a given order is in the traveldirection of the elevator.
"""
defp order_in_traveling_direction?(order, elev_state) do
cond do
elev_state.direction == :up ->
order.floor > elev_state.floor
elev_state.direction == :down ->
order.floor < elev_state.floor
true ->
false
end
end
@doc """
Checks if there are any orders beyond a given floor in the traveling direction of the elevator.
"""
defp orders_beyond(direction, floor, orders) do
case direction do
:up -> orders |> Enum.filter(fn x -> x.floor > floor end) |> Enum.any?()
:down -> orders |> Enum.filter(fn x -> x.floor < floor end) |> Enum.any?()
:idle -> false
:motor_dead -> false
end
end
@doc """
Checks if the elevator should stop for a given order.
"""
defp stop_for_order?(order, direction, current_floor) do
cond do
{order.direction, order.floor} == {:cab, current_floor} -> true
{order.direction, order.floor} == {direction, current_floor} -> true
true -> false
end
end
@doc """
Distributes all orders on a node to the other nodes. This is done if the elevator stops working.
"""
def flush_orders do
IO.puts("Flushing orders:")
Distro.get_all_orders(:process_distro, Node.self())
|> Enum.each(fn x -> Distro.new_order(:process_distro, x) end)
end
@doc """
Checks if any new nodes has joined the network every 500ms. If a new node is found this node is
added to the GenServer through the add_node function. This function spawns a new version of itself before
running.
"""
defp check_for_new_nodes do
Process.sleep(500)
Process.spawn(fn -> check_for_new_nodes() end, [])
NetworkUtils.all_nodes()
|> Enum.each(fn x ->
case Distro.get_map(:process_distro) |> Map.has_key?(x) do
false ->
# This makes the process crash if the genserver is not alive on other node, this is intended.
if Distro.is_alive?(:process_distro, x) do
Distro.add_node(:process_distro, x)
end
true ->
:nothing
end
end)
end
@doc """
Checks if any of the nodes that were on the network has died. If any nodes are found their orders are
redistributed to the other nodes on the network. This function spawn a new instance of itself.
"""
defp poll_for_dead_nodes do
Process.sleep(20)
Process.spawn(fn -> poll_for_dead_nodes end, [])
lost_nodes =
Distro.get_map(:process_distro)
|> Map.keys()
|> Enum.filter(fn x -> not (NetworkUtils.all_nodes() |> Enum.any?(fn y -> x == y end)) end)
case Enum.empty?(lost_nodes) do
false ->
IO.puts("Removing dead nodes:")
IO.puts(Kernel.inspect(lost_nodes))
lost_nodes
|> Enum.each(fn x ->
Distro.get_map(:process_distro)
|> Map.fetch!(x)
|> Enum.each(fn y ->
if not (y.direction == :cab) do
Distro.new_order(:process_distro, y)
end
end)
end)
Distro.remove_nodes(:process_distro, lost_nodes)
true ->
:nothing
end
end
@doc """
Returns a NodeScore struct for a given node and order. The node with the lowest score is most fitting.
"""
defp get_score(orders_on_node, node, order) do
cond do
not (node in NetworkUtils.all_nodes) ->
%NodeScore{node: node, score: 1000}
order.direction == :cab && node == Node.self() ->
%NodeScore{node: node, score: 0}
order.direction == :cab && node != Node.self() ->
%NodeScore{node: node, score: 100}
ElevatorState.get_state(:process_elevator, node).direction == :motor_dead ->
%NodeScore{node: node, score: 100}
order_in_traveling_direction?(order, ElevatorState.get_state(:process_elevator, node)) ->
%NodeScore{
node: node,
score: 1 + abs(order.floor - ElevatorState.get_state(:process_elevator, node).floor)
}
true ->
score_node =
orders_on_node |> Enum.map(fn x -> Kernel.abs(x.floor - order.floor) end) |> Enum.sum()
%NodeScore{
node: node,
score:
score_node +
Kernel.abs(ElevatorState.get_state(:process_elevator, node).floor - order.floor)
}
end
end
end
|
lib/distro.ex
| 0.840717 | 0.597021 |
distro.ex
|
starcoder
|
defmodule MCTS.Zipper do
@moduledoc """
A zipper tree.
"""
alias MCTS.{Breadcrumb, Node}
@enforce_keys [:focus]
defstruct(focus: nil, breadcrumbs: [])
@type t :: %__MODULE__{focus: Node.t(), breadcrumbs: [Breadcrumb.t()]}
@spec root?(__MODULE__.t()) :: boolean()
def root?(zipper = %__MODULE__{}) do
Enum.empty?(zipper.breadcrumbs)
end
@doc """
Returns the same zipper, untouched, if the current focus is the root node.
"""
@spec up(__MODULE__.t()) :: __MODULE__.t()
def up(zipper = %__MODULE__{}) do
if root?(zipper) do
zipper
else
[last_breadcrumb | remaining_breadcrumbs] = zipper.breadcrumbs
%{
zipper
| focus: %Node{
payload: last_breadcrumb.payload,
children: last_breadcrumb.left_nodes ++ [zipper.focus] ++ last_breadcrumb.right_nodes
},
breadcrumbs: remaining_breadcrumbs
}
end
end
@doc """
Moves the zipper's focus down to the child at the given index.
Raises a `RuntimeError` if the zipper's focus has no children, and an `ArgumentError` if no
child exists at the given index.
"""
@spec down(__MODULE__.t(), non_neg_integer()) :: __MODULE__.t()
def down(zipper = %__MODULE__{}, index) when is_integer(index) do
cond do
Node.leaf?(zipper.focus) ->
# Raise a custom exception here?
raise "focus node has no children"
index >= length(zipper.focus.children) ->
raise ArgumentError,
message: "no child node at index: #{index} (index may not be negative)"
true ->
{left_nodes, new_focus, right_nodes} = break(zipper.focus.children, index)
%{
zipper
| focus: new_focus,
breadcrumbs: [
%Breadcrumb{
payload: zipper.focus.payload,
left_nodes: left_nodes,
right_nodes: right_nodes
}
| zipper.breadcrumbs
]
}
end
end
@spec break([Node.t()], non_neg_integer()) :: {[Node.t()], Node.t(), [Node.t()]}
defp break(nodes, index) do
left_items =
if index == 0 do
[]
else
Enum.slice(nodes, 0, index)
end
{
left_items,
Enum.at(nodes, index),
Enum.slice(nodes, (index + 1)..-1)
}
end
end
|
apps/mcts/lib/mcts/zipper.ex
| 0.838101 | 0.453322 |
zipper.ex
|
starcoder
|
defmodule Sanbase.Signal.ResultBuilder.Transformer do
defmodule Data do
@derive Jason.Encoder
defstruct [
:identifier,
:current,
:previous,
:previous_average,
:absolute_change,
:percent_change
]
defimpl String.Chars, for: __MODULE__ do
def to_string(data), do: data |> Map.from_struct() |> inspect()
end
end
import Sanbase.Math, only: [percent_change: 2]
@doc ~s"""
## Examples
iex> data = [{"eos", [%{value: 1}, %{value: 2}, %{value: 5}]}]
...> Sanbase.Signal.ResultBuilder.Transformer.transform(data, :value)
[%Sanbase.Signal.ResultBuilder.Transformer.Data{
identifier: "eos", absolute_change: 3, current: 5, previous: 2, percent_change: 233.33, previous_average: 1.5
}]
iex> data = [{"eos", [%{value: 2}, %{value: 2}, %{value: 3}, %{value: 4}]}]
...> Sanbase.Signal.ResultBuilder.Transformer.transform(data, :value)
[%Sanbase.Signal.ResultBuilder.Transformer.Data{
absolute_change: 1, current: 4, previous: 3, identifier: "eos", percent_change: 71.67, previous_average: 2.33
}]
iex> data = []
...> Sanbase.Signal.ResultBuilder.Transformer.transform(data, :value)
[]
"""
def transform(data, value_key) do
Enum.map(data, fn
{identifier, [_, _ | _] = values} ->
[previous, current] = Enum.take(values, -2) |> Enum.map(&Map.get(&1, value_key))
previous_list = Enum.drop(values, -1) |> Enum.map(&Map.get(&1, value_key))
previous_average =
previous_list
|> Sanbase.Math.average(precision: 2)
%Data{
identifier: identifier,
current: current,
previous: previous,
previous_average: previous_average,
absolute_change: current - previous,
percent_change: percent_change(previous_average, current)
}
{identifier, [value]} ->
%Data{
identifier: identifier,
current: value,
previous: nil,
previous_average: nil,
absolute_change: nil,
percent_change: nil
}
end)
end
end
|
lib/sanbase/signals/trigger/settings/result_builder/transformer.ex
| 0.785432 | 0.431944 |
transformer.ex
|
starcoder
|
defmodule Cldr.Calendar.Conversion do
@moduledoc false
# This is a private module used only
# by ex_cldr during the consolidation phase
# of the CLDR base data
def convert_eras_to_iso_days(calendar_data) do
Enum.map(calendar_data, fn {calendar, content} ->
{calendar, adjust_eras(content)}
end)
|> Enum.into(%{})
end
defp adjust_eras(%{"eras" => eras} = content) do
eras =
eras
|> Enum.map(fn {era, dates} -> {era, adjust_era(dates)} end)
|> Enum.into(%{})
Map.put(content, "eras", eras)
end
defp adjust_eras(%{} = content) do
content
end
defp adjust_era(dates) do
Enum.map(dates, fn
{"start", date} -> {"start", to_iso_days(date)}
{"end", date} -> {"end", to_iso_days(date)}
{k, v} -> {k, v}
end)
|> Enum.into(%{})
end
def parse_time_periods(period_data) do
Enum.map(period_data, fn {language, periods} ->
{language, adjust_periods(periods)}
end)
|> Enum.into(%{})
end
defp adjust_periods(periods) do
Enum.map(periods, fn {period, times} ->
{period, adjust_times(times)}
end)
|> Enum.into(%{})
end
defp adjust_times(times) do
Enum.map(times, fn {key, time} ->
{key, Enum.map(String.split(time, ":"), &String.to_integer/1)}
end)
|> Enum.into(%{})
end
def to_iso_days(%{year: year, month: month, day: day}) do
gregorian_date_to_iso_days(year, month, day)
end
def to_iso_days(date) when is_binary(date) do
{year, month, day} =
case String.split(date, "-") do
[year, month, day] ->
{String.to_integer(year), String.to_integer(month), String.to_integer(day)}
["", year, month, day] ->
{String.to_integer("-#{year}"), String.to_integer(month), String.to_integer(day)}
end
gregorian_date_to_iso_days(year, month, day)
end
@doc """
Converts a `year`, `month` and `day` into a number of days
for the gregorian calendar
This should be done in the Calendar.ISO module but today that
module doesnt handle negative years which are needed here.
"""
def gregorian_date_to_iso_days(year, month, day) do
correction =
cond do
month <= 2 -> 0
leap_year?(year) -> -1
true -> -2
end
(gregorian_epoch_days() - 1 + 365 * (year - 1) + Float.floor((year - 1) / 4) -
Float.floor((year - 1) / 100) + Float.floor((year - 1) / 400) +
Float.floor((367 * month - 362) / 12) + correction + day)
|> trunc
end
@doc """
Returns true if the given year is a leap year.
"""
def leap_year?(year) when is_integer(year) do
mod(year, 4) === 0 and (mod(year, 100) > 0 or mod(year, 400) === 0)
end
defp gregorian_epoch_days do
1
end
def mod(x, y) when is_integer(x) and is_integer(y) do
mod(x * 1.0, y) |> round
end
def mod(x, y) do
x - y * Float.floor(x / y)
end
end
|
lib/cldr/utils/calendar_conversion.ex
| 0.778733 | 0.532851 |
calendar_conversion.ex
|
starcoder
|
defmodule Day10 do
defmodule Pos do
defstruct x: 0, y: 0
end
@spec angle(Pos.t(), Pos.t()) :: float
def angle(base, pos) do
# Y goes down and measuring angles clockwise from UP. Hence odd delta's
angle = ElixirMath.atan2(pos.x - base.x / 1, base.y - pos.y / 1)
if angle < 0, do: angle + 2 * :math.pi(), else: angle
end
defp count_visible(positions, base) do
Enum.reduce(positions, MapSet.new(), fn pos, angles ->
if pos == base do
angles
else
MapSet.put(angles, angle(base, pos))
end
end)
|> MapSet.size()
end
@spec search([Pos.t()]) :: {integer, Pos.t()}
defp search(positions) do
Enum.reduce(positions, {0, nil}, fn base, {best_count, best_base} ->
visible = count_visible(positions, base)
if visible > best_count do
{visible, base}
else
{best_count, best_base}
end
end)
end
@spec line_to_positions({String.t(), integer}) :: [Pos.t()]
defp line_to_positions({line, y_coord}) do
to_charlist(line)
|> Enum.with_index()
|> Enum.filter(fn {chr, _} -> chr == ?# end)
|> Enum.map(fn {_, x_coord} -> %Pos{x: x_coord, y: y_coord} end)
end
@spec reduce_to_map([String.t()]) :: [Pos.t()]
def reduce_to_map(space_map) do
Enum.with_index(space_map)
|> Enum.map(&line_to_positions/1)
|> List.flatten()
end
@spec search_map([String.t()]) :: {integer, Pos.t()}
def search_map(space_map) do
search(reduce_to_map(space_map))
end
@spec part1(String.t()) :: String.t()
def part1(file_name) do
{best, base} =
Files.read_lines!(file_name)
|> search_map()
"Base at #{base.x}, #{base.y}, can see #{best} asteroids"
end
@type a_vec :: {Pos.t(), integer}
@spec insert_in_list([a_vec()], a_vec()) :: [a_vec()]
def insert_in_list(boom_list, target) do
case boom_list do
[] ->
[target]
[nearest | hidden] ->
if elem(target, 1) < elem(nearest, 1) do
[target | boom_list]
else
[nearest | insert_in_list(hidden, target)]
end
end
end
@spec extract_booms([[a_vec()]]) :: [Pos.t()]
defp extract_booms(angle_lists) do
# Each item in the input is a non-empty list of the asteroids along a given vector.
# The lists are in angular order, so each time round the laser booms the first one
# on the list
case angle_lists do
[] ->
[]
# Can do this nested as we know lists are non-empty
[[boomer | saved_for_now] | other_vectors] ->
new_list =
if saved_for_now == [], do: other_vectors, else: other_vectors ++ [saved_for_now]
[elem(boomer, 0) | extract_booms(new_list)]
end
end
@spec boom_list([Pos.t()], Pos.t()) :: [Pos.t()]
def boom_list(positions, base) do
boom_hash =
Enum.reduce(positions, %{}, fn pos, order_hash ->
if base == pos do
order_hash
else
angle_key = angle(base, pos)
old_list = Map.get(order_hash, angle_key, [])
Map.put(
order_hash,
angle_key,
insert_in_list(old_list, {pos, abs(pos.x - base.x) + abs(pos.y - base.y)})
)
end
end)
Map.keys(boom_hash)
|> Enum.sort()
|> Enum.map(fn line_of_sight -> boom_hash[line_of_sight] end)
|> extract_booms()
end
@spec boom_order([String.t()]) :: [Pos.t()]
def boom_order(lines) do
positions = reduce_to_map(lines)
{_, base} = search(positions)
boom_list(positions, base)
end
@spec part2(String.t()) :: integer
def part2(file_name) do
pos =
Files.read_lines!(file_name)
|> boom_order()
# Two hard things in computing
|> Enum.at(199)
pos.x * 100 + pos.y
end
end
|
lib/day10.ex
| 0.765725 | 0.646809 |
day10.ex
|
starcoder
|
defmodule Mollie.Customers.Mandates do
import Mollie
alias Mollie.Client
@moduledoc """
Mandates allow you to charge a customer’s credit card, PayPal account or bank account recurrently.
It is only possible to create mandates for IBANs and PayPal billing agreements with this API.
To create mandates for credit cards, have your customers perform a ‘first payment’ with their credit card.
"""
@doc """
Retrieve all mandates for the given `customer_id`, ordered from newest to oldest.
Results are paginated.
## Example
Mollie.Customers.Mandates.list client, "cst_8wmqcHMN4U"
More info at: https://docs.mollie.com/reference/v2/mandates-api/list-mandates
"""
@spec list(Client.t(), binary, map | list) :: Mollie.response()
def list(client, customer_id, params \\ %{}) do
get("v2/customers/#{customer_id}/mandates", client, params)
end
@doc """
Retrieve a mandate by its ID and its customer’s ID.
The mandate will either contain IBAN or credit card details, depending on the type of mandate.
## Example
Mollie.Customers.Mandates.find client, "cst_8wmqcHMN4U", "mdt_pWUnw6pkBN"
More info at: https://docs.mollie.com/reference/v2/mandates-api/get-mandate
"""
@spec find(Client.t(), binary, binary, map | list) :: Mollie.response()
def find(client, customer_id, id, params \\ %{}) do
get("v2/customers/#{customer_id}/mandates/#{id}", client, params)
end
@doc """
Create a mandate for a specific customer.
Mandate body example
```
%{
"method" => "directdebit",
"consumerName" => "<NAME>",
"consumerAccount" => "NL55INGB0000000000"
}
```
## Example
Mollie.Customers.Mandates.create client, "cst_8wmqcHMN4U", mandate_body
More info at: https://docs.mollie.com/reference/v2/mandates-api/create-mandate
"""
@spec create(Client.t(), binary, map) :: Mollie.response()
def create(client, customer_id, body) do
post("v2/customers/#{customer_id}/mandates", client, body)
end
@doc """
Revoke a customer’s mandate.
You will no longer be able to charge the consumer’s bank account or credit card with this mandate and all connected subscriptions will be canceled.
## Example
Mollie.Customers.Mandates.revoke client, "cst_8wmqcHMN4U", "mdt_pWUnw6pkBN"
More info at: https://docs.mollie.com/reference/v2/mandates-api/revoke-mandate
"""
@spec revoke(Client.t(), binary, binary, map | list) :: Mollie.response()
def revoke(client, customer_id, id, params \\ %{}) do
delete("v2/customers/#{customer_id}/mandates/#{id}", client, params)
end
end
|
lib/mollie/customers/mandates.ex
| 0.727879 | 0.559049 |
mandates.ex
|
starcoder
|
defmodule Csp.MinConflicts do
@moduledoc """
[Min-conflicts](https://en.wikipedia.org/wiki/Min-conflicts_algorithm) algorithm implementation,
with Tabu search to allow overcoming local minimums.
"""
@doc """
Solves `csp` with min-conflicts algorithm, using tabu search to overcome local minimums.
## Options
Supported `opts`:
- `:max_iteration` - positive integer, the number of iteration to perform before giving up.
Defaults to `10_000`.
- `:optimize_initial_state` - boolean, defaults to `false`. If set to `true`, will use a greedy
algorithm to set an initial state minimizing the number of conflicts for each variable.
- `:tabu_depth` - positive integer or `nil`, defaults to `nil`. If set to an integer,
will limit tabu stack depth by the specified integer.
"""
@spec solve(Csp.t(), Keyword.t()) :: {:solved, Csp.assignment()} | :no_solution
def solve(csp, opts \\ []) do
max_iterations = Keyword.get(opts, :max_iterations, 10_000)
optimize_initial_state = Keyword.get(opts, :optimize_initial_state, false)
tabu_depth = Keyword.get(opts, :tabu_depth)
assignment = if optimize_initial_state, do: optimized_initial_state(csp), else: random_initial_state(csp)
{status, assignment, _tabu} =
1..max_iterations
|> Enum.reduce_while({:no_solution, assignment, []}, fn _iteration, {status, assignment, tabu} ->
# TODO: replace with Csp.solved?, since it's cheaper, and we always have full assignment here.
if Csp.consistent?(csp, assignment) do
{:halt, {:solved, assignment, tabu}}
else
variable = Csp.conflicted(csp, assignment) |> Enum.random()
random_value = Enum.random(csp.domains[variable])
# TODO: Make tabu a MapSet?
# TODO: replace find with manual reduce_while to take the first value that is not in tabu, or generate a default value if it's not found
# TODO: try with prohibiting the current value of the variable; or, better still, placing all current values in tabu before starting this
# TODO: more optimal representation for the n queens constraints as atoms
value =
Csp.order_by_conflicts(csp, variable, assignment)
|> Enum.find(random_value, fn value -> {variable, value} not in tabu end)
tabu = [{variable, value} | tabu]
tabu =
if tabu_depth && length(tabu) > tabu_depth do
Enum.take(tabu, tabu_depth)
else
tabu
end
{:cont, {status, Map.put(assignment, variable, value), tabu}}
end
end)
if status == :no_solution, do: status, else: {status, assignment}
end
## Helpers
@spec random_initial_state(Csp.t()) :: Csp.assignment()
defp random_initial_state(%Csp{} = csp) do
Enum.reduce(csp.domains, %{}, fn {variable, values}, assignment ->
value = Enum.random(values)
Map.put(assignment, variable, value)
end)
end
@spec optimized_initial_state(Csp.t()) :: Csp.assignment()
defp optimized_initial_state(%Csp{} = csp) do
Enum.reduce(csp.variables, %{}, fn variable, assignment ->
value =
try do
Csp.min_conflicts_value!(csp, variable, assignment)
rescue
KeyError ->
Map.fetch!(csp.domains, variable) |> Enum.random()
end
Map.put(assignment, variable, value)
end)
end
end
|
lib/csp/min_conflicts.ex
| 0.757346 | 0.805058 |
min_conflicts.ex
|
starcoder
|
defmodule HashRing.Managed do
@moduledoc """
This module defines the API for working with hash rings where the ring state is managed
in a GenServer process.
There is a performance penalty with working with the ring this way, but it is the best approach
if you need to share the ring across multiple processes, or need to maintain multiple rings.
If your rings map 1:1 with Erlang node membership, you can configure rings to automatically
monitor node up/down events and update the hash ring accordingly, with a default weight,
and either whitelist or blacklist nodes from the ring. You configure this at the ring level in your `config.exs`
Each ring is configured in `config.exs`, and can contain a list of nodes to seed the ring with,
and you can then dynamically add/remove nodes to the ring using the API here. Each node on the ring can
be configured with a weight, which affects the amount of the total keyspace it owns. The default weight
is `128`. It's best to base the weight of nodes on some concrete relative value, such as the amount of
memory a node has.
"""
@type ring :: atom()
@type key :: any()
@type weight :: pos_integer
@type node_list :: [node() | {node(), weight}]
@type pattern_list :: [String.t | Regex.t]
@type ring_options :: [
nodes: node_list,
monitor_nodes: boolean,
node_blacklist: pattern_list,
node_whitelist: pattern_list]
@valid_ring_opts [:name, :nodes, :monitor_nodes, :node_blacklist, :node_whitelist]
@doc """
Creates a new stateful hash ring with the given name.
This name is how you will refer to the hash ring via other API calls.
It takes an optional set of options which control how the ring behaves.
Valid options are as follows:
- `monitor_nodes: boolean`: will automatically monitor Erlang node membership,
if new nodes are connected or nodes are disconnected, the ring will be updated automatically.
In this configuration, nodes cannot be added or removed via the API. Those requests will be ignored.
- `node_blacklist: [String.t | Regex.t]`: Used in conjunction with `monitor_nodes: true`, this
is a list of patterns, either as literal strings, or as regex patterns (in either string or literal form),
and will be used to ignore nodeup/down events for nodes which are blacklisted. If a node whitelist
is provided, the blacklist has no effect.
- `node_whitelist: [String.t | Regex.t]`: The same as `node_blacklist`, except the opposite; only nodes
which match a pattern in the whitelist will result in the ring being updated.
An error is returned if the ring already exists or if bad ring options are provided.
## Examples
iex> {:ok, _pid} = HashRing.Managed.new(:test1, [nodes: ["a", {"b", 64}]])
...> HashRing.Managed.key_to_node(:test1, :foo)
"b"
iex> {:ok, pid} = HashRing.Managed.new(:test2)
...> {:error, {:already_started, existing_pid}} = HashRing.Managed.new(:test2)
...> pid == existing_pid
true
iex> HashRing.Managed.new(:test3, [nodes: "a"])
** (ArgumentError) {:nodes, "a"} is an invalid option for `HashRing.Managed.new/2`
"""
@spec new(ring) :: {:ok, pid} | {:error, {:already_started, pid}}
@spec new(ring, ring_options) :: {:ok, pid} | {:error, {:already_started, pid}} | {:error, {:invalid_option, term}}
def new(name, ring_options \\ []) when is_list(ring_options) do
opts = [{:name, name}|ring_options]
invalid = Enum.find(opts, fn
{key, value} when key in @valid_ring_opts ->
case key do
:name when is_atom(value) -> false
:nodes when is_list(value) -> Keyword.keyword?(value)
:monitor_nodes when is_boolean(value) -> false
:node_blacklist when is_list(value) -> false
:node_whitelist when is_list(value) -> false
_ -> true
end
end)
case invalid do
nil ->
case Process.whereis(:"libring_#{name}") do
nil ->
DynamicSupervisor.start_child(HashRing.Supervisor, {HashRing.Worker, opts})
pid ->
{:error, {:already_started, pid}}
end
_ ->
raise ArgumentError, message: "#{inspect invalid} is an invalid option for `HashRing.Managed.new/2`"
end
end
@doc """
Same as `HashRing.nodes/1`, returns a list of nodes on the ring.
## Examples
iex> {:ok, _pid} = HashRing.Managed.new(:nodes_test)
...> HashRing.Managed.add_nodes(:nodes_test, [:a, :b])
...> HashRing.Managed.nodes(:nodes_test)
[:b, :a]
"""
@spec nodes(ring) :: [term()]
def nodes(ring) do
HashRing.Worker.nodes(ring)
end
@doc """
Adds a node to the given hash ring.
An error is returned if the ring does not exist, or the node already exists in the ring.
## Examples
iex> {:ok, _pid} = HashRing.Managed.new(:test4)
...> HashRing.Managed.add_node(:test4, "a")
...> HashRing.Managed.key_to_node(:test4, :foo)
"a"
iex> HashRing.Managed.add_node(:no_exist, "a")
{:error, :no_such_ring}
"""
@spec add_node(ring, key) :: :ok | {:error, :no_such_ring}
def add_node(ring, node) when is_atom(ring) do
HashRing.Worker.add_node(ring, node)
end
@doc """
Same as `add_node/2`, but takes a weight value.
The weight controls the relative presence this node will have on the ring,
the default is 128, but it's best to give each node a weight value which maps
to a concrete resource such as memory or priority. It's not ideal to have a number
which is too high, as it will make the ring datastructure larger, but a good value
is probably in the range of 64-256.
## Examples
iex> {:ok, _pid} = HashRing.Managed.new(:test5)
...> HashRing.Managed.add_node(:test5, "a", 64)
...> HashRing.Managed.key_to_node(:test5, :foo)
"a"
iex> HashRing.Managed.add_node(:no_exist, "a")
{:error, :no_such_ring}
"""
@spec add_node(ring, key, weight) :: :ok |
{:error, :no_such_ring} |
{:error, {:invalid_weight, key, term}}
def add_node(ring, node, weight) when is_atom(ring)
and is_integer(weight)
and weight > 0 do
HashRing.Worker.add_node(ring, node, weight)
end
def add_node(ring, node, weight) when is_atom(ring) do
{:error, {:invalid_weight, node, weight}}
end
@doc """
Adds a list of nodes to the ring.
The list of nodes can contain either node names or `{node_name, weight}`
tuples. If there is an error with any of the node weights, an error will
be returned, and the ring will remain unchanged.
## Examples
iex> {:ok, _pid} = HashRing.Managed.new(:test6)
...> :ok = HashRing.Managed.add_nodes(:test6, ["a", {"b", 64}])
...> HashRing.Managed.key_to_node(:test6, :foo)
"b"
iex> {:ok, _pid} = HashRing.Managed.new(:test7)
...> HashRing.Managed.add_nodes(:test7, ["a", {"b", :wrong}])
{:error, [{:invalid_weight, "b", :wrong}]}
"""
@spec add_nodes(ring, node_list) :: :ok |
{:error, :no_such_ring} |
{:error, [{:invalid_weight, key, term}]}
def add_nodes(ring, nodes) when is_list(nodes) do
invalid = Enum.filter(nodes, fn
{_node, weight} when is_integer(weight) and weight > 0 ->
false
{_node, _weight} ->
true
node when is_binary(node) or is_atom(node) ->
false
_node ->
true
end)
case invalid do
[] ->
HashRing.Worker.add_nodes(ring, nodes)
_ ->
{:error, Enum.map(invalid, fn {k,v} -> {:invalid_weight, k, v} end)}
end
end
@doc """
Removes a node from the given hash ring.
An error is returned if the ring does not exist.
## Examples
iex> {:ok, _pid} = HashRing.Managed.new(:test8)
...> :ok = HashRing.Managed.add_nodes(:test8, ["a", {"b", 64}])
...> :ok = HashRing.Managed.remove_node(:test8, "b")
...> HashRing.Managed.key_to_node(:test8, :foo)
"a"
"""
@spec remove_node(ring, key) :: :ok | {:error, :no_such_ring}
def remove_node(ring, node) when is_atom(ring) do
HashRing.Worker.remove_node(ring, node)
end
@doc """
Maps a key to a node on the hash ring.
An error is returned if the ring does not exist.
"""
@spec key_to_node(ring, any()) :: key |
{:error, :no_such_ring} |
{:error, {:invalid_ring, :no_nodes}}
def key_to_node(ring, key) when is_atom(ring) do
HashRing.Worker.key_to_node(ring, key)
end
end
|
lib/managed_ring.ex
| 0.914851 | 0.627837 |
managed_ring.ex
|
starcoder
|
defmodule BitwiseIp.Block do
@moduledoc """
A struct representing a range of bitwise IP addresses.
Since 1993, [classless inter-domain routing
(CIDR)](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) has
been the basis for allocating blocks of IP addresses and efficiently routing
between them.
If you think about the standard human-readable notation for IP addresses, a
CIDR block is essentially a pattern with "wildcards" at the end. For example,
`1.2.3.x` would contain the 256 different IPv4 addresses ranging from
`1.2.3.0` through `1.2.3.255`. The CIDR representation would use the starting
address `1.2.3.0` plus a bitmask where the first three bytes (the
non-wildcards) are all ones. In IPv4 notation, the mask would be
`255.255.255.0`. But rather than use wildcards, CIDR blocks have their own
notation consisting of the starting address, a slash (`/`), and a prefix
length - the number of leading ones in the mask. So the `1.2.3.x` block would
actually be written as `1.2.3.0/24`.
As the basis for modern IP routing, these blocks are commonly used as virtual
collections. The CIDR representation allows us to efficiently test an
incoming IP address for membership in the block by bitwise `AND`-ing the mask
with the incoming address and comparing the result to the block's starting
address. The size of the block can also be computed in constant time using
bitwise arithmetic on the mask. For example, from the `/24` IPv4 mask we
could infer there are 2^8 = 256 addresses in the range corresponding to the
remaining 8 least significant bits.
Using this foundation, `BitwiseIp.Block` is able to implement the
`Enumerable` protocol with `BitwiseIp` structs as members. This allows you to
manipulate blocks as generic collections without actually allocating an
entire list:
```
iex> :rand.seed(:exs1024, {0, 0, 0})
iex> BitwiseIp.Block.parse!("1.2.3.0/24") |> Enum.random() |> to_string()
"1.2.3.115"
iex> BitwiseIp.Block.parse!("1.2.3.0/30") |> Enum.map(&to_string/1)
["1.2.3.0", "1.2.3.1", "1.2.3.2", "1.2.3.3"]
```
Note that, while CIDR blocks are efficient on their own, they're locked into
this very specific prefix representation. For example, you couldn't represent
the range `172.16.31.10` through `192.168.3.11` with a single block, since the binary
representation isn't amenable to a single prefix. This means you typically
have to manipulate multiple blocks at a time. To ensure lists of blocks are
handled efficiently, use the `BitwiseIp.Blocks` module.
"""
defstruct [:proto, :addr, :mask]
use Bitwise
alias __MODULE__
@typedoc """
A bitwise IP address block.
The block consists of all IP addresses that share the same prefix. To
represent this, we use a struct with the following fields:
* `:proto` - the protocol, either `:v4` or `:v6`
* `:addr` - the integer encoding of the network prefix
* `:mask` - the integer encoding of the subnet mask
Logically, this type is a combination of `t:BitwiseIp.t/0` and an integer
encoded by `BitwiseIp.Mask.encode/2`. However, rather than hold onto a
literal `BitwiseIp` struct, the `:proto` and `:addr` fields are inlined. This
proves to be more efficient for pattern matching than using a nested struct.
The network prefix's least significant bits are all assumed to be zero,
effectively making it the starting address of the block. That way, we can
avoid performing repetitive bitwise `AND` operations between the prefix &
mask in functions such as `member?/2`.
"""
@type t() :: v4() | v6()
@typedoc """
An IPv4 block.
The `:proto` and `:addr` are the same as in `t:BitwiseIp.v4/0`. The mask is a
32-bit unsigned integer where some number of leading bits are one and the
rest are zero. See `t:t/0` for more details.
"""
@type v4() :: %Block{proto: :v4, addr: integer(), mask: integer()}
@typedoc """
An IPv6 block.
The `:proto` and `:addr` are the same as in `t:BitwiseIp.v6/0`. The mask is a
128-bit unsigned integer where some number of leading bits are one and the
rest are zero. See `t:t/0` for more details.
"""
@type v6() :: %Block{proto: :v6, addr: integer(), mask: integer()}
@doc """
Efficiently checks if a bitwise IP is within a block.
In effect, we're testing if the given IP address has the same prefix as the
block. This involves a single bitwise `AND` and an integer comparison. We
extract the prefix from the IP by applying the block's bitmask, then check if
it's equal to the block's starting address. If the block and the IP have
different protocols, this function will return `false`.
Because `BitwiseIp.Block` implements the `Enumerable` protocol, you may also
use `in/2` to test for membership.
## Examples
```
iex> BitwiseIp.Block.parse!("192.168.0.0/16")
...> |> BitwiseIp.Block.member?(BitwiseIp.parse!("192.168.10.1"))
true
iex> BitwiseIp.Block.parse!("192.168.0.0/16")
...> |> BitwiseIp.Block.member?(BitwiseIp.parse!("172.16.0.1"))
false
iex> BitwiseIp.parse!("d:e:a:d:b:e:e:f") in BitwiseIp.Block.parse!("d::/16")
true
iex> BitwiseIp.parse!("127.0.0.1") in BitwiseIp.Block.parse!("::/0")
false
```
"""
@spec member?(t(), BitwiseIp.t()) :: boolean()
def member?(block, bitwise_ip)
def member?(
%Block{proto: proto, addr: prefix, mask: mask},
%BitwiseIp{proto: proto, addr: ip}
) do
prefix == (ip &&& mask)
end
def member?(_, _) do
false
end
@doc """
Efficiently checks if `block2` is a subset of `block1`.
Thanks to `BitwiseIp.Mask`, we encode masks as integers. So if mask A is less
than mask B, that means A had fewer leading bits, meaning the block will
contain *more* addresses than the block for B. Therefore, as a prerequisite,
we first check that `block1`'s mask is `<=` `block2`'s mask. If not, then
there's no chance that `block2` could be wholly contained in `block1`.
Then, if `block1`'s range is wide enough, we can test an arbitrary IP from
`block2` for membership in `block1`. Its inclusion would imply that
everything else in `block2` is also included, since `block1` is wider. We
have a suitable address to test in the form of the `:addr` field from
`block2`. The membership check involves the same bitwise `AND` + integer
comparison as `member?/2`.
If the blocks don't have matching protocols, this function returns `false`.
## Examples
```
iex> BitwiseIp.Block.parse!("1.0.0.0/8")
...> |> BitwiseIp.Block.subnet?(BitwiseIp.Block.parse!("1.2.0.0/16"))
true
iex> BitwiseIp.Block.parse!("1.2.0.0/16")
...> |> BitwiseIp.Block.subnet?(BitwiseIp.Block.parse!("1.0.0.0/8"))
false
iex> BitwiseIp.Block.parse!("1.2.0.0/16")
...> |> BitwiseIp.Block.subnet?(BitwiseIp.Block.parse!("1.2.0.0/16"))
true
iex> BitwiseIp.Block.parse!("1.2.0.0/16")
...> |> BitwiseIp.Block.subnet?(BitwiseIp.Block.parse!("2.3.0.0/16"))
false
```
"""
@spec subnet?(t(), t()) :: boolean()
def subnet?(block1, block2)
def subnet?(
%Block{proto: proto, addr: prefix, mask: mask},
%Block{proto: proto, addr: ip, mask: submask}
)
when mask <= submask do
prefix == (ip &&& mask)
end
def subnet?(_, _) do
false
end
@doc """
Computes the number of addresses contained in a block.
This value is wholly determined by the `:mask` field. Taking the bitwise
complement of the mask gives us an unsigned integer where all the lower bits
are ones. Since these are the bits that are covered by the block, we can
interpret this as the number of possible values, minus one for the zeroth
address.
For example, the IPv4 prefix `/29` leaves 3 bits to represent different
addresses in the block. So that's 2^3 = 8 possible addresses. To get there
from the mask `0b11111111111111111111111111111000`, we take its complement
and get `0b00000000000000000000000000000111`, which represents the integer
2^3 - 1 = 7. We add 1 and get the 8 possible addresses.
Because of the limited number of possible masks, we might want to implement
this as a static lookup using pattern matched function clauses, thereby
avoiding binary manipulation altogether. However, benchmarks indicate that
pattern matching against structs is much slower than the required bitwise
math. So, we negate the mask and add 1 to the resulting integer at run time.
## Examples
```
iex> BitwiseIp.Block.parse!("1.2.3.4/32") |> BitwiseIp.Block.size()
1
iex> BitwiseIp.Block.parse!("1.2.3.4/31") |> BitwiseIp.Block.size()
2
iex> BitwiseIp.Block.parse!("1.2.3.4/30") |> BitwiseIp.Block.size()
4
iex> BitwiseIp.Block.parse!("1.2.3.4/29") |> BitwiseIp.Block.size()
8
iex> BitwiseIp.Block.parse!("::/124") |> BitwiseIp.Block.size()
16
iex> BitwiseIp.Block.parse!("::/123") |> BitwiseIp.Block.size()
32
iex> BitwiseIp.Block.parse!("::/122") |> BitwiseIp.Block.size()
64
iex> BitwiseIp.Block.parse!("::/121") |> BitwiseIp.Block.size()
128
```
"""
@spec size(t()) :: integer()
def size(%Block{proto: :v4, mask: mask}) do
:binary.decode_unsigned(<<(~~~mask)::32>>) + 1
end
def size(%Block{proto: :v6, mask: mask}) do
:binary.decode_unsigned(<<(~~~mask)::128>>) + 1
end
@doc """
An error-raising variant of `parse/1`.
This function parses strings in [CIDR
notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation),
where an IP address is followed by a prefix length composed of a slash (`/`)
and a decimal number of leading bits in the subnet mask. The prefix length is
optional. If missing, it defaults to the full width of the IP address: 32
bits for IPv4, 128 for IPv6.
The constituent parts are parsed using `BitwiseIp.parse/1` and
`BitwiseIp.Mask.parse/2`. The address has the mask applied before
constructing the `BitwiseIp.Block` struct, thereby discarding any lower bits.
If the string is invalid, this function raises an `ArgumentError`.
`BitwiseIp.Block` implements the `String.Chars` protocol, so parsing can be
undone using `to_string/1`.
## Examples
```
iex> BitwiseIp.Block.parse!("192.168.0.0/16")
%BitwiseIp.Block{proto: :v4, addr: 3232235520, mask: 4294901760}
iex> BitwiseIp.Block.parse!("fc00::/8")
%BitwiseIp.Block{proto: :v6, addr: 334965454937798799971759379190646833152, mask: 338953138925153547590470800371487866880}
iex> BitwiseIp.Block.parse!("256.0.0.0/8")
** (ArgumentError) Invalid IP address "256.0.0.0" in CIDR "256.0.0.0/8"
iex> BitwiseIp.Block.parse!("dead::beef/129")
** (ArgumentError) Invalid IPv6 mask "129" in CIDR "dead::beef/129"
iex> BitwiseIp.Block.parse!("192.168.0.0/8") |> to_string()
"192.0.0.0/8"
iex> BitwiseIp.Block.parse!("::") |> to_string()
"::/128"
```
"""
@spec parse!(String.t()) :: t()
def parse!(cidr) do
case parse(cidr) do
{:ok, block} -> block
{:error, message} -> raise ArgumentError, message
end
end
@doc """
Parses a bitwise IP block from a string in CIDR notation.
This function parses strings in [CIDR
notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation),
where an IP address is followed by a prefix length composed of a slash (`/`)
and a decimal number of leading bits in the subnet mask. The prefix length is
optional. If missing, it defaults to the full width of the IP address: 32
bits for IPv4, 128 for IPv6.
The constituent parts are parsed using `BitwiseIp.parse/1` and
`BitwiseIp.Mask.parse/2`. The address has the mask applied before
constructing the `BitwiseIp.Block` struct, thereby discarding any lower bits.
This parsing is done in an error-safe way by returning a tagged tuple. To
raise an error, use `parse!/1` instead.
`BitwiseIp.Block` implements the `String.Chars` protocol, so parsing can be
undone using `to_string/1`.
## Examples
```
iex> BitwiseIp.Block.parse("192.168.0.0/16")
{:ok, %BitwiseIp.Block{proto: :v4, addr: 3232235520, mask: 4294901760}}
iex> BitwiseIp.Block.parse("fc00::/8")
{:ok, %BitwiseIp.Block{proto: :v6, addr: 334965454937798799971759379190646833152, mask: 338953138925153547590470800371487866880}}
iex> BitwiseIp.Block.parse("256.0.0.0/8")
{:error, "Invalid IP address \\"256.0.0.0\\" in CIDR \\"256.0.0.0/8\\""}
iex> BitwiseIp.Block.parse("dead::beef/129")
{:error, "Invalid IPv6 mask \\"129\\" in CIDR \\"dead::beef/129\\""}
iex> BitwiseIp.Block.parse("192.168.0.0/8") |> elem(1) |> to_string()
"192.0.0.0/8"
iex> BitwiseIp.Block.parse("::") |> elem(1) |> to_string()
"::/128"
```
"""
@spec parse(String.t()) :: {:ok, t()} | {:error, String.t()}
def parse(cidr) do
case parse_with_or_without_mask(cidr) do
{:error, e} -> {:error, "#{e} in CIDR #{inspect(cidr)}"}
ok -> ok
end
end
defp parse_with_or_without_mask(cidr) do
case String.split(cidr, "/", parts: 2) do
[ip] -> parse_without_mask(ip)
[ip, mask] -> parse_with_mask(ip, mask)
end
end
defp parse_with_mask(ip, mask) do
with {:ok, ip} <- BitwiseIp.parse(ip),
{:ok, mask} <- BitwiseIp.Mask.parse(ip.proto, mask) do
{:ok, %Block{proto: ip.proto, addr: ip.addr &&& mask, mask: mask}}
end
end
@v4 BitwiseIp.Mask.encode(:v4, 32)
@v6 BitwiseIp.Mask.encode(:v6, 128)
defp parse_without_mask(ip) do
with {:ok, ip} <- BitwiseIp.parse(ip) do
case ip.proto do
:v4 -> {:ok, %Block{proto: :v4, addr: ip.addr, mask: @v4}}
:v6 -> {:ok, %Block{proto: :v6, addr: ip.addr, mask: @v6}}
end
end
end
defimpl String.Chars do
def to_string(block) do
ip = %BitwiseIp{proto: block.proto, addr: block.addr}
mask = BitwiseIp.Mask.decode(block.proto, block.mask)
"#{ip}/#{mask}"
end
end
defimpl Enumerable do
def member?(block, ip) do
{:ok, Block.member?(block, ip)}
end
def count(block) do
{:ok, Block.size(block)}
end
def slice(%Block{proto: proto, addr: addr} = block) do
size = Block.size(block)
{:ok, size, &slice(proto, addr + &1, &2)}
end
defp slice(proto, addr, 1) do
[%BitwiseIp{proto: proto, addr: addr}]
end
defp slice(proto, addr, n) do
[%BitwiseIp{proto: proto, addr: addr} | slice(proto, addr + 1, n - 1)]
end
def reduce(%Block{proto: proto, addr: addr} = block, acc, fun) do
size = Block.size(block)
reduce(proto, addr, addr + size - 1, acc, fun)
end
defp reduce(_proto, _addr, _last, {:halt, acc}, _fun) do
{:halted, acc}
end
defp reduce(proto, addr, last, {:suspend, acc}, fun) do
{:suspended, acc, &reduce(proto, addr, last, &1, fun)}
end
defp reduce(proto, addr, last, {:cont, acc}, fun) do
if addr <= last do
ip = %BitwiseIp{proto: proto, addr: addr}
reduce(proto, addr + 1, last, fun.(ip, acc), fun)
else
{:done, acc}
end
end
end
end
|
lib/bitwise_ip/block.ex
| 0.948894 | 0.946547 |
block.ex
|
starcoder
|
defmodule Journey.Step do
@moduledoc ~S"""
The data structure for defining a step in a process.
## Example: Using Journey.Step to Define a Process
iex> _process = %Journey.Process{
...> process_id: "horoscopes-r-us",
...> steps: [
...> %Journey.Step{name: :first_name},
...> %Journey.Step{name: :birth_month},
...> %Journey.Step{name: :birth_day},
...> %Journey.Step{
...> name: :astrological_sign,
...> func: fn _values ->
...> # Everyone is a Taurus!
...> {:ok, "taurus"}
...> end,
...> blocked_by: [
...> %Journey.BlockedBy{step_name: :birth_month, condition: :provided},
...> %Journey.BlockedBy{step_name: :birth_day, condition: :provided}
...> ]
...> },
...> %Journey.Step{
...> name: :horoscope,
...> func: fn values ->
...> name = values[:first_name].value
...> sign = values[:astrological_sign].value
...> {
...> :ok,
...> "#{name}! You are a #{sign}! Now is the perfect time to smash the racist patriarchy!"
...> }
...> end,
...> blocked_by: [
...> %Journey.BlockedBy{step_name: :first_name, condition: :provided},
...> %Journey.BlockedBy{step_name: :astrological_sign, condition: :provided}
...> ]
...> }
...> ]
...> }
"""
@doc false
@derive Jason.Encoder
@enforce_keys [:name]
defstruct [
:name,
func: nil,
blocked_by: []
# TODO: add retry policy
]
@typedoc """
Stores the definition of a process step.
## name
The name of the step, some examples:
:first_name
"horoscope"
"offer_rate"
## func
The function that computes the value for the step.
The function accepts one parameter, which contains the current state of the execution.
When the function computes the value, it should return it as part of a tuple: `{:ok, value}`.
If the function was unable to compute the value, with a retriable error, it should return the tuple `{:retriable, error_details}`.
Any other value will be treated as a non-retriable error.
## blocked_by
A collection of conditions that must be be true for the computation to take place.
TODO: examples. make those into functions.
"""
@type t :: %__MODULE__{
name: String.t(),
func: (map() -> {:ok | :retriable | :error, any()}),
blocked_by: list()
}
end
|
lib/step.ex
| 0.610918 | 0.730362 |
step.ex
|
starcoder
|
defmodule Maple do
@moduledoc """
The purpose of this module is to parse a GraphQL schema and to dynamically create
easy to use client code functions at compile time with which a user can execute queries and
mutations on a GraphQL endpoint.
The module takes options from the configuration:
```
config :maple,
http_adapter: Maple.Clients.Http,
websocket_adapter: Maple.Clients.WebsocketApollo
```
- `:http_adapter` - The default HTTP adapter for completing transactions against the GraphQL
server. Default is: `Maple.Clients.Http`
- `:websocket_adapter` - The default Websocket adapter for completing transactions against the GraphQL
server using websockets. Default is: `Maple.Clients.WebsocketApollo`
"""
alias Maple.{Generators, Helpers}
defmacro __using__(_vars) do
options = [
http_adapter: Application.get_env(:maple, :http_adapter, Maple.Clients.Http),
websocket_adapter: Application.get_env(:maple, :websocket_adapter, Maple.Clients.WebsocketApollo)
]
schema =
options[:http_adapter]
|> apply(:schema, [])
|> Map.get("__schema")
mutation_type_name = schema["mutationType"]["name"]
query_type_name = schema["queryType"]["name"]
subscription_type_name = schema["subscriptionType"]["name"]
schema["types"]
|> Enum.reduce([], fn type, ast ->
cond do
type["name"] == mutation_type_name ->
#Create mutation functions
ast ++
Enum.map(type["fields"], fn func ->
function = Helpers.assign_function_params(func)
Generators.mutation(function, options[:http_adapter])
end)
type["name"] == query_type_name ->
#Create query functions
ast ++
Enum.map(type["fields"], fn func ->
function = Helpers.assign_function_params(func)
[Generators.query(function, options[:http_adapter])]
end)
type["name"] == subscription_type_name ->
#Create subscription functions
ast ++
Enum.map(type["fields"], fn func ->
function = Helpers.assign_function_params(func)
Generators.subscription(function, options[:websocket_adapter])
end)
!Enum.member?(Maple.Constants.types(), type["name"]) && type["fields"] ->
# Create structs
ast ++
[quote do
defmodule unquote(Module.concat(["Maple", "Types", type["name"]])) do
defstruct Enum.map(unquote(Macro.escape(type["fields"])), &(String.to_atom(&1["name"])))
end
end]
true -> ast
end
end)
end
end
|
lib/maple.ex
| 0.759939 | 0.79653 |
maple.ex
|
starcoder
|
defmodule Collision.Polygon.RegularPolygon do
@moduledoc """
A regular polygon is equiangular and equilateral -- all
angles and all sides be equal. With enough sides,
a regular polygon tends toward a circle.
"""
defstruct sides: 3, radius: 0, rotation_angle: 0.0, midpoint: %{x: 0, y: 0}, polygon: nil
alias Collision.Polygon.RegularPolygon
alias Collision.Polygon
alias Collision.Polygon.Helper
alias Collision.Polygon.Vertex
alias Collision.Polygon.Edge
alias Collision.Detection.SeparatingAxis
alias Collision.Vector.Vector2
@typedoc """
A regular polygon is defined by a number of sides, a circumradius,
a rotation angle, and a center point `{x, y}`.
"""
@type t :: Collision.Polygon.RegularPolygon.t
@type axis :: {Vertex.t, Vertex.t}
@typep degrees :: number
@typep radians :: number
@doc """
Construct a regular polygon from a tuple.
A polygon must have at least three sides.
## Examples
iex> Collision.Polygon.RegularPolygon.from_tuple({3, 2, 0, {0, 0}})
{:ok, %Collision.Polygon.RegularPolygon{n_sides: 3, radius: 2,
rotation_angle: 0.0, midpoint: %Collision.Polygon.Vertex{x: 0, y: 0}}}
"""
@spec from_tuple({integer, number, number, {number, number}}, atom) :: RegularPolygon.t
def from_tuple({s, _r, _a, {_x, _y}}, _d) when s < 3 do
{:error, "Polygon must have at least three sides"}
end
def from_tuple({s, r, a, {x, y}}, :degrees) do
angle_in_radians = Float.round(Helper.degrees_to_radians(a), 5)
from_tuple({s, r, angle_in_radians, {x, y}}, :radians)
end
def from_tuple({s, r, a, {x, y}}, :radians) do
vertices = calculate_vertices(s, r, a, %{x: x, y: y})
polygon = Polygon.from_vertices(vertices)
{:ok,
%RegularPolygon{
sides: s,
radius: r,
rotation_angle: a,
midpoint: %Vertex{x: x, y: y},
polygon: polygon
}}
end
def from_tuple({s, r, a, {x, y}}), do: from_tuple({s, r, a, {x, y}}, :degrees)
@doc """
Determine the vertices, or points, of the polygon.
## Examples
iex> Collision.Polygon.RegularPolygon.calculate_vertices(
...> %Collision.Polygon.RegularPolygon{
...> n_sides: 4, radius: 2, rotation_angle: 0, midpoint: %{x: 2, y: 0}
...> })
[{4.0, 0.0}, {2.0, 2.0}, {0.0, 0.0}, {2.0, -2.0}]
"""
@spec calculate_vertices(t | number, number, %{x: number, y: number}) :: [Vertex.t]
def calculate_vertices(%RegularPolygon{} = p) do
calculate_vertices(p.sides, p.radius, p.rotation_angle, p.midpoint)
end
def calculate_vertices(sides, _r, _a, _m) when sides < 3, do: {:invalid_number_of_sides}
def calculate_vertices(sides, radius, initial_rotation_angle, midpoint \\ %{x: 0, y: 0}) do
rotation_angle = 2 * :math.pi / sides
f_rotate_vertex = Polygon.rotate_vertex(initial_rotation_angle, midpoint)
0..sides - 1
|> Stream.map(fn (n) ->
calculate_vertex(radius, midpoint, rotation_angle, n)
end)
|> Stream.map(fn vertex -> f_rotate_vertex.(vertex) end)
|> Enum.map(&Vertex.round_vertex/1)
end
# Find the vertex of a side of a regular polygon given the polygon struct
# and an integer representing a side.
@spec calculate_vertex(number, %{x: number, y: number}, number, integer) :: Vertex.t
defp calculate_vertex(radius, %{x: x, y: y} = midpoint , angle, i) do
x1 = x + radius * :math.cos(i * angle)
y1 = y + radius * :math.sin(i * angle)
%Vertex{x: x1, y: y1}
end
@doc """
Translate a polygon in cartesian space.
"""
@spec translate_polygon([Vertex.t] | RegularPolygon.t, Vertex.t) :: [Vertex.t] | RegularPolygon.t
def translate_polygon(%RegularPolygon{} = p, %{x: _x, y: _y} = c) do
new_midpoint = translate_midpoint(c).(p.midpoint)
new_vertices = calculate_vertices(
p.sides, p.radius, p.rotation_angle, new_midpoint
)
%{p | midpoint: new_midpoint, polygon: Polygon.from_vertices(new_vertices)}
end
defp translate_midpoint(%{x: x_translate, y: y_translate}) do
fn %{x: x, y: y} -> %Vertex{x: x + x_translate, y: y + y_translate} end
end
@doc """
Translate a polygon's vertices.
## Examples
iex(1)> p = Collision.two_dimensional_polygon(4, 3, 0, {0,0})
%Collision.Polygon.RegularPolygon{midpoint: %Collision.Polygon.Vertex{x: 0, y: 0},
n_sides: 4, radius: 3, rotation_angle: 0.0}
iex(2)> Collision.Polygon.RegularPolygon.translate_polygon(p, %{x: -2, y: 2})
%Collision.Polygon.RegularPolygon{midpoint: %Collision.Polygon.Vertex{x: -2, y: 2},
n_sides: 4, radius: 3, rotation_angle: 0.0}
"""
def translate_vertices(polygon_vertices, %{x: _x, y: _y} = translation) do
Enum.map(polygon_vertices, translate_vertex(translation))
end
defp translate_vertex(%{x: x_translate, y: y_translate}) do
fn {x, y} -> {x + x_translate, y + y_translate} end
end
# TODO the api around rotation is ugly
@doc """
Rotate a regular polygon using rotation angle in degrees.
## Examples
iex(1)> p = Collision.two_dimensional_polygon(4, 3, 0, {0,0})
%Collision.Polygon.RegularPolygon{midpoint: %Collision.Polygon.Vertex{x: 0,
y: 0}, n_sides: 4, radius: 3, rotation_angle: 0.0}
iex(2)> vertices = Collision.Polygon.RegularPolygon.calculate_vertices(p)
[{3.0, 0.0}, {0.0, 3.0}, {-3.0, 0.0}, {0.0, -3.0}]
iex(3)> Collision.Polygon.RegularPolygon.rotate_polygon_degrees(vertices, 180)
[{-3.0, 0.0}, {0.0, -3.0}, {3.0, 0.0}, {0.0, 3.0}]
iex(4)> Collision.Polygon.RegularPolygon.rotate_polygon_degrees(vertices, 360)
[{3.0, 0.0}, {0.0, 3.0}, {-3.0, 0.0}, {0.0, -3.0}]
"""
@spec rotate_polygon_degrees([Vertex.t] | RegularPolygon.t, degrees) :: [Vertex.t]
def rotate_polygon_degrees(vertices, degrees) do
angle_in_radians = Helper.degrees_to_radians(degrees)
rotate_polygon(vertices, angle_in_radians)
end
@doc """
Rotate a regular polygon, rotation angle should be radians.
## Examples
iex(1)> p = Collision.two_dimensional_polygon(4, 3, 0, {0,0})
%Collision.Polygon.RegularPolygon{midpoint: %Collision.Polygon.Vertex{x: 0,
y: 0}, n_sides: 4, radius: 3, rotation_angle: 0.0}
iex(2)> v = Collision.Polygon.RegularPolygon.calculate_vertices(p)
iex(3)> Collision.Polygon.RegularPolygon.rotate_polygon(v, 3.14)
[{-3.0, 0.0}, {0.0, -3.0}, {3.0, 0.0}, {0.0, 3.0}]
"""
@spec rotate_polygon([Vertex.t], radians, %{x: number, y: number}) :: [Vertex.t]
def rotate_polygon(vertices, radians, rotation_point \\ %{x: 0, y: 0}) do
rotated = fn {x, y} ->
x_offset = x - rotation_point.x
y_offset = y - rotation_point.y
x_term = rotation_point.x + (x_offset * :math.cos(radians) - y_offset * :math.sin(radians))
y_term = rotation_point.y + (x_offset * :math.sin(radians) + y_offset * :math.cos(radians))
{Float.round(x_term, 2), Float.round(y_term, 2)}
end
Enum.map(vertices, fn vertex -> rotated.(vertex) end)
end
@doc """
Rounds the x and y components of an {x, y} tuple.
## Examples
iex> Collision.Polygon.RegularPolygon.round_vertices([{1.55555555, 1.2222222}])
[{1.55556, 1.22222}]
"""
@spec round_vertices([{number, number}]) :: [{number, number}]
def round_vertices(vertices) do
vertices
|> Enum.map(fn {x, y} ->
{Float.round(x, 5), Float.round(y, 5)}
end)
end
defimpl String.Chars, for: RegularPolygon do
def to_string(%RegularPolygon{} = p) do
"%RegularPolygon{sides: #{p.sides}, radius: #{p.radius}, " <>
"midpoint #{p.midpoint}, edges: #{p.polygon.edges}, " <>
"vertices: #{p.polygon.vertices}"
end
end
defimpl Collidable, for: RegularPolygon do
@spec collision?(RegularPolygon.t, RegularPolygon.t) :: boolean
def collision?(p1, p2) do
SeparatingAxis.collision?(p1.polygon.vertices, p2.polygon.vertices)
end
def resolution(%RegularPolygon{} = p1, %RegularPolygon{} = p2) do
SeparatingAxis.collision_mtv(p1.polygon, p2.polygon)
end
@spec resolve_collision(RegularPolygon.t, RegularPolygon.t) :: {RegularPolygon.t, RegularPolygon.t}
def resolve_collision(%RegularPolygon{} = p1, %RegularPolygon{} = p2) do
{mtv, magnitude} = resolution(p1, p2)
vector_from_p1_to_p2 = %Vector2{
x: p2.midpoint.x - p1.midpoint.x,
y: p2.midpoint.y - p1.midpoint.y}
translation_vector =
case Vector.dot_product(mtv, vector_from_p1_to_p2) do
x when x < 0 ->
Vector.scalar_mult(mtv, -1 * magnitude)
_ ->
Vector.scalar_mult(mtv, magnitude)
end
{p1, RegularPolygon.translate_polygon(p2, translation_vector)}
end
end
end
|
lib/collision/polygon/regular_polygon.ex
| 0.932622 | 0.951774 |
regular_polygon.ex
|
starcoder
|
defmodule RayTracer.RTuple do
@moduledoc """
This module wraps basic vector/point operations
"""
import __MODULE__.Helpers
@type t :: %__MODULE__{
values: list()
}
@type vector :: t
@type point :: t
defstruct values: {}
@spec sub(t, t) :: t
def sub(a, b) do
a |> zip_map(b, &(&1 - &2)) |> new
end
@spec add(t, t) :: t
def add(a, b) do
a |> zip_map(b, &(&1 + &2)) |> new
end
@spec negate(t) :: t
def negate(a) do
a |> map(&(-&1)) |> new
end
@spec mul(t, number) :: t
def mul(a, scalar) when is_number(scalar), do: mul(scalar, a)
@spec mul(number, t) :: t
def mul(scalar, a) when is_number(scalar) do
a |> map(&(&1 * scalar)) |> new
end
@spec div(t, number) :: t
def div(a, scalar) do
a |> map(&(&1 / scalar)) |> new
end
@spec length(vector) :: number
def length(a), do: magnitude(a)
@spec magnitude(vector) :: number
def magnitude(a) do
a |> map(&(&1 * &1)) |> Enum.sum() |> :math.sqrt
end
@spec normalize(t) :: t
def normalize(v) do
m = magnitude(v)
v |> map(&(&1 / m)) |> new
end
@spec dot(t, t) :: number
def dot(a, b) do
a
|> zip_map(b, &(&1 * &2))
|> Enum.sum()
end
@spec cross(t, t) :: t
def cross(a, b) do
vector(
y(a) * z(b) - z(a) * y(b),
z(a) * x(b) - x(a) * z(b),
x(a) * y(b) - y(a) * x(b)
)
end
@spec point?(t) :: boolean
def point?(v) do
v |> w == 1.0
end
@spec vector?(t) :: boolean
def vector?(v) do
v |> w == 0.0
end
@spec new(tuple()) :: t
def new(values) when is_tuple(values), do: new(Tuple.to_list(values))
@spec new([number]) :: t
def new(values) do
%RayTracer.RTuple{values: values}
end
@spec point(number, number, number) :: point
def point(x, y, z) do
{x, y, z, 1.0} |> new
end
@spec vector(number, number, number) :: vector
def vector(x, y, z) do
{x, y, z, 0.0} |> new
end
@spec x(t) :: number
def x(v) do
v |> value_at(0)
end
@spec y(t) :: number
def y(v) do
v |> value_at(1)
end
@spec z(t) :: number
def z(v) do
v |> value_at(2)
end
@spec w(t) :: number
def w(v) do
v |> value_at(3)
end
@spec set(t, integer, number) :: t
def set(%__MODULE__{values: values}, index, value) do
values
|> List.update_at(index, fn (_) -> value end)
|> new
end
@spec set_w(t, number) :: t
def set_w(t, v), do: set(t, 3, v)
@doc """
Computes a reflection of vector around the normal
"""
@spec reflect(vector, vector) :: vector
def reflect(v, n) do
v
|> sub(
n |> mul(2) |> mul(dot(n, v))
)
end
@spec value_at(t, integer) :: number
defp value_at(%__MODULE__{values: v}, index) do
v |> Enum.at(index)
end
end
|
lib/r_tuple.ex
| 0.906787 | 0.651112 |
r_tuple.ex
|
starcoder
|
defmodule StateChart.Document.State do
use StateChart.Definition do
field(:string, :id, 1)
field(:ref, :ref, 2)
enum Type, :type, 3, [
basic: 0,
composite: 1,
parallel: 2,
history: 3,
initial: 4,
final: 5
]
repeated(:ref, :initials, 4, [:initial])
repeated(StateChart.Runtime.Invoke, :invocations, 5)
repeated(:any, :on_entry, 6)
repeated(:any, :on_exit, 7)
repeated(Model.Transition, :transitions, 8)
repeated(:ref, :children, 9)
repeated(:ref, :ancestors, 10)
repeated(:ref, :descendants, 11)
field(:ref, :parent, 12)
field(:uint32, :depth, 13)
field(:uint32, :priority, 14)
field(:ref, :history, 15)
enum History, :history_type, 16, [
shallow: 0,
deep: 1
]
computed(:id, fn
(%{id: "", ref: ref}) ->
ref
(%{id: id}) ->
id
end)
computed(:ancestors_set, fn(%{ancestors: d}) ->
MapSet.new(d)
end)
computed(:descendants_set, fn(%{descendants: d}) ->
MapSet.new(d)
end)
computed(:depth, fn(%{ancestors_set: s}) ->
MapSet.size(s)
end)
end
def compare(%{depth: d, priority: p}, %{depth: d, priority: p}), do: true
def compare(%{depth: d, priority: p1}, %{depth: d, priority: p2}) do
p1 < p2
end
def compare(%{depth: d1}, %{depth: d2}) do
d1 >= d2
end
def on_enter(%{on_entry: on_entry}, context) do
Enum.reduce(on_entry, context, &StateChart.Context.execute(&2, &1))
end
def on_exit(%{on_exit: on_exit}, context) do
Enum.reduce(on_exit, context, &StateChart.Context.execute(&2, &1))
end
alias StateChart.Document.{Analyzer,Transition}
def finalize(
%{initials: initials,
transitions: transitions,
children: children,
ancestors: ancestors,
descendants: descendants,
parent: parent,
history: history} = state,
doc
) do
ancestors = Enum.map(ancestors, &Analyzer.deref(doc, &1))
descendants = Enum.map(descendants, &Analyzer.deref(doc, &1))
%{state |
initials: Enum.map(initials, &Analyzer.deref(doc, &1)),
transitions: Enum.map(transitions, &Transition.finalize(&1, doc)),
children: Enum.map(children, &Analyzer.deref(doc, &1)),
ancestors: ancestors,
ancestors_set: MapSet.new(ancestors),
descendants: descendants,
descendants_set: MapSet.new(descendants),
parent: Analyzer.deref(doc, parent),
history: Analyzer.deref(doc, history)
}
end
end
|
lib/state_chart/document/state.ex
| 0.517327 | 0.488039 |
state.ex
|
starcoder
|
defmodule Stargate.Supervisor do
@moduledoc """
This module defines a top-level supervisor for your Stargate client.
It takes a keyword list of configuration values for connecting to the
cluster and any producers, readers, or consumers desired, and orchestrates
the starting of the process registry and the websocket client(s).
"""
use Supervisor
@type process_key :: {atom(), String.t(), String.t(), String.t(), String.t()}
@doc """
Convenience function for working with the Stargate process registry.
"""
@spec via(atom(), process_key()) :: {:via, atom(), {atom(), process_key()}}
def via(registry, name) do
{:via, Registry, {registry, name}}
end
@doc """
Creates a Stargate Supervisor and links it to the current process.
Pass a keyword list of connection and client information to define
the types of websocket connection to establish with the Pulsar cluster.
# Example
opts = [
host: [{"broker-url.com", 8080}]
producer: [
... producer configs ...
],
consumer: [
... consumer configs ...
]
]
See the `Stargate.Producer` and `Stargate.Receiver` modules for the full
list of configuration options to each type of client connection.
"""
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(init_args) do
name = Keyword.get(init_args, :name, :default)
Supervisor.start_link(__MODULE__, init_args, name: :"sg_sup_#{name}")
end
@impl Supervisor
def init(init_args) do
name = Keyword.get(init_args, :name, :default)
registry = :"sg_reg_#{name}"
host = Keyword.fetch!(init_args, :host)
protocol = Keyword.get(init_args, :protocol, "ws")
children =
[
{Registry, name: registry, keys: :unique},
start_producer(registry, host, protocol, Keyword.get(init_args, :producer)),
start_consumer(registry, host, protocol, Keyword.get(init_args, :consumer)),
start_reader(registry, host, protocol, Keyword.get(init_args, :reader))
]
|> List.flatten()
Supervisor.init(children, strategy: :rest_for_one)
end
defp start_producer(_registry, _host, _protocol, nil), do: []
defp start_producer(registry, host, protocol, args) do
case Keyword.keyword?(args) do
true ->
producer_child_spec(registry, host, protocol, args)
false ->
Enum.map(args, fn producer -> producer_child_spec(registry, host, protocol, producer) end)
end
end
defp start_consumer(_registry, _host, _protocol, nil), do: []
defp start_consumer(registry, host, protocol, args) do
receiver_child_spec(:consumer, registry, host, protocol, args)
end
defp start_reader(_registry, _host, _protocol, nil), do: []
defp start_reader(registry, host, protocol, args) do
receiver_child_spec(:reader, registry, host, protocol, args)
end
defp producer_child_spec(registry, host, protocol, args) do
producer_args = merge_args(args, host: host, protocol: protocol, registry: registry)
{Stargate.Producer.Supervisor, producer_args}
end
defp receiver_child_spec(type, registry, host, protocol, args) do
receiver_args =
merge_args(args, type: type, registry: registry, host: host, protocol: protocol)
{Stargate.Receiver.Supervisor, receiver_args}
end
defp merge_args(args1, args2) do
Keyword.merge(args1, args2, fn _k, _v1, v2 -> v2 end)
end
end
|
lib/stargate/supervisor.ex
| 0.806281 | 0.439807 |
supervisor.ex
|
starcoder
|
defmodule Cloak.Cipher do
@moduledoc """
A behaviour for encryption/decryption modules. Use it to write your own custom
Cloak-compatible cipher modules.
## Example
Here's a sample custom cipher that adds "Hello, " to the start of every
ciphertext, and removes it on decryption.
defmodule MyCustomCipher do
@behaviour Cloak.Cipher
def encrypt(plaintext) do
"Hello, #\{to_string(plaintext)\}"
end
def decrypt("Hello, " <> plaintext) do
plaintext
end
def version do
"hello"
end
end
As long as you implement the 3 callbacks below, everything should work
smoothly.
## Configuration
Your custom cipher will be responsible for reading any custom configuration
that it requires from the `:cloak` application configuration.
For example, suppose we wanted to make the word "Hello" in the custom cipher
above configurable. We could add it to the `config.exs`:
config :cloak, MyCustomCipher,
default: true,
tag: "custom",
word: "Cheerio"
And then read it in our cipher:
defmodule MyCustomCipher do
@behaviour Cloak.Cipher
@word Application.get_env(:cloak, __MODULE__)[:word]
def encrypt(plaintext) do
"#\{@word\}, #\{to_string(plaintext)\}"
end
def decrypt(@word <> ", " <> plaintext) do
plaintext
end
def version do
@word
end
end
"""
@doc """
Encrypt a value. Your function should include any information it will need for
decryption with the output.
"""
@callback encrypt(plaintext :: any) :: binary
@doc """
Decrypt a value.
"""
@callback decrypt(ciphertext :: binary) :: binary
@doc """
Must return a string representing the default settings of your module as it is
currently configured.
This will be used by `Cloak.version/0` to generate a unique tag, which can
then be stored on each database table row to track which encryption
configuration it is currently encrypted with.
"""
@callback version :: String.t()
end
|
lib/cloak/ciphers/cipher.ex
| 0.845049 | 0.408955 |
cipher.ex
|
starcoder
|
defmodule Sanbase.Metric.Behaviour do
@moduledoc ~s"""
Behaviour describing a MetricAdapter module.
A MetricAdapter module describes how metrics and metadata for them are fetched.
After a new MetricAdapter module is created, in order to expose it through
the Sanbase.Metric module, it should be added to the list of modules defined
in Sanbase.Metric.Helper
"""
@type slug :: String.t()
@type metric :: String.t()
@type interval :: String.t()
@type opts :: Keyword.t()
@type available_data_types :: :timeseries | :histogram | :table
@type threshold :: number()
@type direction :: :asc | :desc
@type operator ::
:greater_than | :less_than | :greater_than_or_equal_to | :less_than_or_equal_to
@type selector :: slug | map()
@type metadata :: %{
metric: metric,
min_interval: interval(),
default_aggregation: atom(),
available_aggregations: list(atom()),
available_selectors: list(atom()),
data_type: available_data_types(),
complexity_weight: number()
}
@type histogram_value :: String.t() | float() | integer()
@type histogram_label :: String.t()
@type histogram_data_map :: %{
range: list(float()) | list(DateTime.t()),
value: float()
}
@type histogram_data :: list(histogram_data_map())
@type table_data_point :: %{
columns: list(String.t()),
rows: list(String.t()),
values: list(list(number()))
}
@type aggregation :: nil | :any | :sum | :avg | :min | :max | :last | :first | :median
@type slug_float_value_pair :: %{slug: slug, value: float}
@type timeseries_data_point :: %{datetime: Datetime.t(), value: float()}
@type timeseries_data_per_slug_point :: %{
datetime: Datetime.t(),
data: list(slug_float_value_pair())
}
# Return types
@type timeseries_data_result :: {:ok, list(timeseries_data_point)} | {:error, String.t()}
@type aggregated_timeseries_data_result :: {:ok, map()} | {:error, String.t()}
@type timeseries_data_per_slug_result ::
{:ok, list(timeseries_data_per_slug_point)} | {:error, String.t()}
@type table_data_result :: {:ok, table_data_point} | {:error, String.t()}
@type histogram_data_result :: {:ok, histogram_data} | {:error, String.t()}
@type slugs_by_filter_result :: {:ok, list(slug())} | {:error, String.t()}
@type slugs_order_result :: {:ok, list(slug())} | {:error, String.t()}
@type human_readable_name_result :: {:ok, String.t()} | {:error, String.t()}
@type first_datetime_result :: {:ok, DateTime.t()} | {:error, String.t()}
@type last_datetime_computed_at_result :: {:ok, DateTime.t()} | {:error, String.t()}
@type metadata_result :: {:ok, metadata()} | {:error, String.t()}
@type available_slugs_result :: {:ok, list(slug)} | {:error, String.t()}
@type available_metrics_result :: {:ok, list(metric)} | {:error, String.t()}
@type has_incomplete_data_result :: boolean()
@type complexity_weight_result :: number()
@type required_selectors_result :: map()
# Callbacks
@callback timeseries_data(
metric :: metric(),
selector :: selector,
from :: DatetTime.t(),
to :: DateTime.t(),
interval :: interval(),
opts :: opts
) ::
timeseries_data_result
@callback timeseries_data_per_slug(
metric :: metric(),
selector :: selector,
from :: DatetTime.t(),
to :: DateTime.t(),
interval :: interval(),
opts :: opts
) ::
timeseries_data_per_slug_result
@callback histogram_data(
metric :: metric(),
selector :: selector,
from :: DateTime.t(),
to :: DateTime.t(),
interval :: interval(),
limit :: non_neg_integer()
) :: histogram_data_result
@callback table_data(
metric :: metric(),
selector :: selector,
from :: DateTime.t(),
to :: DateTime.t(),
opts :: opts
) :: table_data_result
@callback aggregated_timeseries_data(
metric :: metric,
selector :: selector,
from :: DatetTime.t(),
to :: DateTime.t(),
opts :: opts
) :: aggregated_timeseries_data_result
@callback slugs_by_filter(
metric :: metric,
from :: DateTime.t(),
to :: DateTime.t(),
operator :: operator,
threshold :: threshold,
opts :: opts
) :: slugs_by_filter_result
@callback slugs_order(
metric :: metric,
from :: DateTime.t(),
to :: DateTime.t(),
direction :: direction,
opts :: opts
) :: slugs_order_result
@callback required_selectors() :: required_selectors_result
@callback has_incomplete_data?(metric :: metric) :: has_incomplete_data_result
@callback complexity_weight(metric :: metric) :: complexity_weight_result
@callback first_datetime(metric, selector) :: first_datetime_result
@callback last_datetime_computed_at(metric, selector) :: last_datetime_computed_at_result
@callback human_readable_name(metric) :: human_readable_name_result
@callback metadata(metric) :: metadata_result
@callback available_aggregations() :: list(aggregation)
@callback available_slugs() :: available_slugs_result
@callback available_slugs(metric) :: available_slugs_result
@callback available_metrics() :: list(metric)
@callback available_metrics(selector) :: available_metrics_result()
@callback available_timeseries_metrics() :: list(metric)
@callback available_histogram_metrics() :: list(metric)
@callback available_table_metrics() :: list(metric)
@callback free_metrics() :: list(metric)
@callback restricted_metrics() :: list(metric)
@callback deprecated_metrics_map() :: %{required(String.t()) => String.t()}
@callback access_map() :: map()
@callback min_plan_map() :: map()
@optional_callbacks [
histogram_data: 6,
table_data: 5,
timeseries_data_per_slug: 6,
deprecated_metrics_map: 0
]
end
|
lib/sanbase/metric/behaviour.ex
| 0.883337 | 0.403743 |
behaviour.ex
|
starcoder
|
defmodule Nerves.NetworkInterface do
require Logger
@moduledoc """
This module exposes a simplified view of Linux network configuration to
applications.
## Overview
This module should be added to a supervision tree or started via the
`start_link/0` call. Once running, the module provides functions to
list network interfaces, modify their state (up or down), get statistics
and set IP networking parameters. Network events, such as when an Ethernet
cable is connected, are reported via a Registry Nerves.NetworkInterface.
## Privilege
The functions that return information don't require that the `Nerves.NetworkInterface`'s
associated port process has privileged access to the system. If you
need to change any parameters or bring up or down an interface, you should
ensure that the port process is running as a privileged user.
"""
@type interface_name :: String.t
@type registration ::
{:ok, pid} |
{:error, {:already_registered, pid}}
@type operstate ::
:unknown
| :notpresent
| :down
| :lowerlayerdown
| :testing
| :dormant
| :up
@type ipv4_address :: String.t()
@type ipv6_address :: String.t()
@type dhcpv6_mode ::
:stateful
| :stateless
@type ifsettings_ipv4 :: %{
ifname: Nerves.NetworkInterface.interface_name(),
domain: String.t(),
ipv4_address: ipv4_address(),
ipv4_broadcast: ipv4_address(),
ipv4_gateway: ipv4_address(),
ipv4_subnet_mask: ipv4_address(),
nameservers: list(ipv4_address())
}
@type ifsettings_ipv6 :: %{
ifname: Nerves.NetworkInterface.interface_name(),
ipv6_domain: String.t(),
ipv6_address: ipv6_address(),
ipv6_nameservers: list(ipv6_address()),
old_ipv6_address: ipv6_address(),
dhcpv6_mode: dhcpv6_mode()
}
@type ifstatus :: %{
ifname: Nerves.NetworkInterface.interface_name(),
index: integer(),
"is_all-multicast": boolean(),
is_broadcast: boolean(),
is_lower_up: boolean(),
is_multicast: boolean(),
is_running: boolean(),
is_up: boolean(),
mac_address: String.t(),
mac_broadcast: String.t(),
mtu: integer(),
operstate: operstate(),
type: :ethernet | :other
}
@type ifevent ::
ifstatus()
| ifsettings_ipv4()
| ifsettings_ipv6()
@doc """
Return the list of network interfaces on this machine.
"""
defdelegate interfaces, to: Nerves.NetworkInterface.Worker
@doc """
Return link-level status on the specified interface.
For example, `Nerves.NetworkInterface.status pid, "eth0"` could return:
{:ok,
%{ifname: "eth0", index: 2, is_broadcast: true, is_lower_up: true,
is_multicast: true, is_running: true, is_up: true,
mac_address: <<224, 219, 85, 231, 139, 93>>,
mac_broadcast: <<255, 255, 255, 255, 255, 255>>, mtu: 1500, operstate: :up,
stats: %{collisions: 0, multicast: 427, rx_bytes: 358417207, rx_dropped: 0,
rx_errors: 0, rx_packets: 301021, tx_bytes: 22813761, tx_dropped: 0,
tx_errors: 0, tx_packets: 212480}, type: :ethernet}}
If the interface doesn't exist, `{:error, :enodev}` is returned.
"""
defdelegate status(ifname), to: Nerves.NetworkInterface.Worker
@doc """
Bring the specified interface up.
Returns `:ok` on success or `{:error, reason}` if an error occurs.
"""
defdelegate ifup(ifname), to: Nerves.NetworkInterface.Worker
@doc """
Bring the specified interface down.
Returns `:ok` on success or `{:error, reason}` if an error occurs.
"""
defdelegate ifdown(ifname), to: Nerves.NetworkInterface.Worker
@doc """
Return the IP configuration for the specified interface as a map. See
`setup/3` for options.
Returns `{:ok, config}` on success or `{:error, reason}` if an error occurs.
"""
defdelegate settings(ifname), to: Nerves.NetworkInterface.Worker
@doc """
Set IP settings for the specified interface. The following options are
available:
* `:ipv4_address` - the IPv4 address of the interface
* `:ipv4_broadcast` - the IPv4 broadcast address for the interface
* `:ipv4_subnet_mask` - the IPv4 subnet mask
* `:ipv4_gateway` - the default gateway
Options can be specified either as a keyword list or as a map.
Returns `:ok` on success or `{:error, reason}` if an error occurs.
"""
defdelegate setup(ifname, options), to: Nerves.NetworkInterface.Worker
@doc """
Register for Nerves.NetworkInterface events on a specific interface
The calling process is the process that will be registered for
all events. The events can be handled by implementing a `handle_info\2`.
`def handle_info({Nerves.NetworkInterface, :ifchanged, ifstate} = event, state)`
Use :all to register for events from all interfaces.
The registration registers for messages dispatched out of `Registry`.
For information on how `Registry` works please see that module's
documentation.
"""
@spec register(:all | interface_name) ::
registration |
[registration] |
[]
def register(:all) do
Enum.each(interfaces(), ®ister/1)
end
def register(ifname) do
Logger.debug("Registering for notifications for #{ifname}...")
Registry.register(Nerves.NetworkInterface, ifname, [])
end
end
|
lib/nerves_network_interface.ex
| 0.87819 | 0.487734 |
nerves_network_interface.ex
|
starcoder
|
defmodule ExOneroster.Web.DemographicView do
use ExOneroster.Web, :view
alias ExOneroster.Web.DemographicView
def render("index.json", %{demographics: demographics}) do
%{data: render_many(demographics, DemographicView, "demographic.json")}
end
def render("show.json", %{demographic: demographic}) do
%{data: render_one(demographic, DemographicView, "demographic.json")}
end
def render("demographic.json", %{demographic: demographic}) do
%{
id: demographic.id,
sourcedId: demographic.sourcedId,
status: demographic.status,
dateLastModified: demographic.dateLastModified,
metadata: demographic.metadata,
birthdate: demographic.birthdate,
sex: demographic.sex,
americanIndianOrAlaskaNative: demographic.americanIndianOrAlaskaNative,
asian: demographic.asian,
blackOrAfricanAmerican: demographic.blackOrAfricanAmerican,
nativeHawaiianOrOtherPacificIslander: demographic.nativeHawaiianOrOtherPacificIslander,
white: demographic.white,
demographicRaceTwoOrMoreRaces: demographic.demographicRaceTwoOrMoreRaces,
hispanicOrLatinoEthnicity: demographic.hispanicOrLatinoEthnicity,
countryOfBirthCode: demographic.countryOfBirthCode,
stateOfBirthAbbreviation: demographic.stateOfBirthAbbreviation,
cityOfBirth: demographic.cityOfBirth,
publicSchoolResidenceStatus: demographic.publicSchoolResidenceStatus
}
end
end
# 1.1 spec response
# {
# "demographics": {
# "sourcedId": "<sourcedid of this demographics record (same as user referenced)>",
# "status": "active | tobedeleted",
# "dateLastModified": "<date these demographics were last modified>",
# "birthdate": "<value> (e.g. 1980-01-01)",
# "sex": "<value> (e.g. Male)",
# "americanIndianOrAlaskaNative": "<value> (e.g. false)",
# "asian": "<value> (e.g. false)",
# "blackOrAfricanAmerican": "<value> (e.g. true)",
# "nativeHawaiianOrOtherPacificIslander": "<value>",
# "white": "<value>",
# "demographicRaceTwoOrMoreRaces": "<value>",
# "hispanicOrLatinoEthnicity": "<value>",
# "countryOfBirthCode": "<value> (e.g. US)",
# "stateOfBirthAbbreviation": "<value> (e.g. NY)",
# "cityOfBirth": "<value> (e.g. New York)",
# "publicSchoolResidenceStatus": "<value> (e.g. 01652)"
# }
# }
|
lib/ex_oneroster/web/views/demographic_view.ex
| 0.548674 | 0.603494 |
demographic_view.ex
|
starcoder
|
defmodule TimeZoneInfo.Worker do
@moduledoc false
# Holds the state for `TimeZoneInfo` and starts the initial update and when
# configured the automatic updates.
use GenServer
alias TimeZoneInfo.Updater
alias TimeZoneInfo.UtcDateTime
@timeout 3 * 60 * 1_000
@doc """
Starts a worker for `TimeZoneInfo`.
"""
def start_link(opts) do
opts = Keyword.put_new(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, nil, opts)
end
# API
@doc """
Runs the update process. This will also run at start up for the initialisation
of `TimeZoneInfo`. This function returns the same as `state/0`.
"""
def update(server \\ __MODULE__, opt) when opt in [:run, :force],
do: GenServer.call(server, {:update, opt}, @timeout)
@doc """
Returns the state of the worker.
Possible return values are:
- `:ok`: `TimeZoneInfo` is initialised and the update process is disabled.
- `{:next, milliseconds}`: `TimeZoneInfo` is initialised and the next update
runs after `milliseconds`.
- `{:error, reason}`
"""
def state(server \\ __MODULE__), do: GenServer.call(server, :state)
@doc """
Returns the tuple `{:next, datetime}` where `datetime` is the date time for
the next update. If `datetime` is `nil` no update process is started.
"""
def next(server \\ __MODULE__), do: GenServer.call(server, :next)
# Implementation
@impl true
def init(_) do
state = do_update(:run)
{:ok, state}
end
@impl true
def handle_info(:update, state) do
state = do_update(:run, state)
{:noreply, state}
end
@impl true
def handle_call({:update, opt}, _from, state) do
state = do_update(opt, state)
{:reply, reply(state), state}
end
def handle_call(:state, _from, state) do
{:reply, reply(state), state}
end
def handle_call(:next, _from, state) do
reply =
case reply(state) do
:ok ->
{:next, :never}
{:next, milliseconds} when is_integer(milliseconds) ->
datetime = DateTime.add(UtcDateTime.now(), milliseconds, :millisecond)
{:next, datetime}
error ->
error
end
{:reply, reply, state}
end
defp do_update(step, state \\ :init) do
case Updater.update(step) do
:ok ->
:ok
{:next, seconds} ->
now = UtcDateTime.now(:unix)
next = seconds - now
timer = Process.send_after(self(), :update, next * 1_000)
with {:next, last_timer} <- state, do: Process.cancel_timer(last_timer)
{:next, timer}
error ->
error
end
end
defp reply(state) do
case state do
:ok ->
:ok
{:next, timer} ->
case Process.read_timer(timer) do
false -> {:next, 0}
milliseconds -> {:next, milliseconds}
end
error ->
error
end
end
end
|
lib/time_zone_info/worker.ex
| 0.877036 | 0.546496 |
worker.ex
|
starcoder
|
defmodule Calendar.DateTime.Parse do
import Calendar.ParseUtil
@secs_between_year_0_and_unix_epoch 719528*24*3600 # From erlang calendar docs: there are 719528 days between Jan 1, 0 and Jan 1, 1970. Does not include leap seconds
@doc """
Parses an RFC 822 datetime string and shifts it to UTC.
Takes an RFC 822 `string` and `year_guessing_base`. The `year_guessing_base`
argument is used in case of a two digit year which is allowed in RFC 822.
The function tries to guess possible four digit versions of the year and
chooses the one closest to `year_guessing_base`. It defaults to 2015.
# Examples
# 2 digit year
iex> "5 Jul 15 20:26:13 PST" |> rfc822_utc
{:ok,
%DateTime{zone_abbr: "UTC", day: 6, hour: 4, minute: 26, month: 7,
second: 13, std_offset: 0, time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0,
year: 2015}}
# 82 as year
iex> "5 Jul 82 20:26:13 PST" |> rfc822_utc
{:ok,
%DateTime{zone_abbr: "UTC", day: 6, hour: 4, minute: 26, month: 7,
second: 13, std_offset: 0, time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0,
year: 1982}}
# 1982 as year
iex> "5 Jul 82 20:26:13 PST" |> rfc822_utc
{:ok,
%DateTime{zone_abbr: "UTC", day: 6, hour: 4, minute: 26, month: 7,
second: 13, std_offset: 0, time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0,
year: 1982}}
# 2 digit year and we use 2099 as the base guessing year
# which means that 15 should be interpreted as 2115 no 2015
iex> "5 Jul 15 20:26:13 PST" |> rfc822_utc(2099)
{:ok,
%DateTime{zone_abbr: "UTC", day: 6, hour: 4, minute: 26, month: 7,
second: 13, std_offset: 0, time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0,
year: 2115}}
"""
def rfc822_utc(string, year_guessing_base \\ 2015) do
string
|> capture_rfc822_string
|> change_captured_year_to_four_digit(year_guessing_base)
|> rfc2822_utc_from_captured
end
defp capture_rfc822_string(string) do
~r/(?<day>[\d]{1,2})[\s]+(?<month>[^\d]{3})[\s]+(?<year>[\d]{2,4})[\s]+(?<hour>[\d]{2})[^\d]?(?<min>[\d]{2})[^\d]?(?<sec>[\d]{2})[^\d]?(((?<offset_sign>[+-])(?<offset_hours>[\d]{2})(?<offset_mins>[\d]{2})|(?<offset_letters>[A-Z]{1,3})))?/
|> Regex.named_captures(string)
end
defp change_captured_year_to_four_digit(cap, year_guessing_base) do
changed_year = to_int(cap["year"])
|> two_to_four_digit_year(year_guessing_base)
|> to_string
%{cap | "year" => changed_year}
end
@doc """
Parses an RFC 2822 or RFC 1123 datetime string.
The datetime is shifted to UTC.
## Examples
iex> rfc2822_utc("Sat, 13 Mar 2010 11:23:03 -0800")
{:ok,
%DateTime{zone_abbr: "UTC", day: 13, hour: 19, minute: 23, month: 3, second: 3, std_offset: 0,
time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0, year: 2010}}
# PST is the equivalent of -0800 in the RFC 2822 standard
iex> rfc2822_utc("Sat, 13 Mar 2010 11:23:03 PST")
{:ok,
%DateTime{zone_abbr: "UTC", day: 13, hour: 19, minute: 23, month: 3, second: 3, std_offset: 0,
time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0, year: 2010}}
# Z is the equivalent of UTC
iex> rfc2822_utc("Sat, 13 Mar 2010 11:23:03 Z")
{:ok,
%DateTime{zone_abbr: "UTC", day: 13, hour: 11, minute: 23, month: 3, second: 3, std_offset: 0,
time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0, year: 2010}}
"""
def rfc2822_utc(string) do
string
|> capture_rfc2822_string
|> rfc2822_utc_from_captured
end
defp rfc2822_utc_from_captured(cap) do
month_num = month_number_for_month_name(cap["month"])
{:ok, offset_in_secs} = offset_in_seconds_rfc2822(cap["offset_sign"],
cap["offset_hours"],
cap["offset_mins"],
cap["offset_letters"])
{:ok, result} = Calendar.DateTime.from_erl({{cap["year"]|>to_int, month_num, cap["day"]|>to_int}, {cap["hour"]|>to_int, cap["min"]|>to_int, cap["sec"]|>to_int}}, "Etc/UTC")
Calendar.DateTime.add(result, offset_in_secs*-1)
end
defp offset_in_seconds_rfc2822(_, _, _, "UTC"), do: {:ok, 0 }
defp offset_in_seconds_rfc2822(_, _, _, "UT"), do: {:ok, 0 }
defp offset_in_seconds_rfc2822(_, _, _, "Z"), do: {:ok, 0 }
defp offset_in_seconds_rfc2822(_, _, _, "GMT"), do: {:ok, 0 }
defp offset_in_seconds_rfc2822(_, _, _, "EDT"), do: {:ok, -4*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "EST"), do: {:ok, -5*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "CDT"), do: {:ok, -5*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "CST"), do: {:ok, -6*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "MDT"), do: {:ok, -6*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "MST"), do: {:ok, -7*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "PDT"), do: {:ok, -7*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "PST"), do: {:ok, -8*3600 }
defp offset_in_seconds_rfc2822(_, _, _, letters) when letters != "", do: {:error, :invalid_letters}
defp offset_in_seconds_rfc2822(offset_sign, offset_hours, offset_mins, _letters) do
offset_in_secs = hours_mins_to_secs!(offset_hours, offset_mins)
offset_in_secs = case offset_sign do
"-" -> offset_in_secs*-1
_ -> offset_in_secs
end
{:ok, offset_in_secs}
end
@doc """
Takes unix time as an integer or float. Returns a DateTime struct.
## Examples
iex> unix!(1_000_000_000)
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {0, 0}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> unix!("1000000000")
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {0, 0}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> unix!("1000000000.010")
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {10_000, 3}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> unix!(1_000_000_000.9876)
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {987600, 6}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> unix!(1_000_000_000.999999)
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {999999, 6}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
"""
def unix!(unix_time_stamp) when is_integer(unix_time_stamp) do
unix_time_stamp + @secs_between_year_0_and_unix_epoch
|>:calendar.gregorian_seconds_to_datetime
|> Calendar.DateTime.from_erl!("Etc/UTC")
end
def unix!(unix_time_stamp) when is_float(unix_time_stamp) do
{whole, micro} = int_and_microsecond_for_float(unix_time_stamp)
whole + @secs_between_year_0_and_unix_epoch
|>:calendar.gregorian_seconds_to_datetime
|> Calendar.DateTime.from_erl!("Etc/UTC", micro)
end
def unix!(unix_time_stamp) when is_binary(unix_time_stamp) do
{int, frac} = Integer.parse(unix_time_stamp)
unix!(int) |> Map.put(:microsecond, parse_fraction(frac))
end
defp int_and_microsecond_for_float(float) do
float_as_string = :erlang.float_to_binary(float, [decimals: 6])
{int, frac} = Integer.parse(float_as_string)
{int, parse_fraction(frac)}
end
@doc """
Parse JavaScript style milliseconds since epoch.
# Examples
iex> js_ms!("1000000000123")
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {123000,3}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> js_ms!(1_000_000_000_123)
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {123000,3}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> js_ms!(1424102000000)
%DateTime{zone_abbr: "UTC", day: 16, hour: 15, microsecond: {0, 3}, minute: 53, month: 2, second: 20, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2015}
"""
def js_ms!(millisec) when is_integer(millisec) do
result = (millisec/1000.0) |> unix!
%DateTime{result| microsecond: {elem(result.microsecond, 0), 3}} # change usec precision to 3
end
def js_ms!(millisec) when is_binary(millisec) do
{int, ""} = millisec
|> Integer.parse
js_ms!(int)
end
@doc """
Parses a timestamp in RFC 2616 format.
iex> httpdate("Sat, 06 Sep 2014 09:09:08 GMT")
{:ok, %DateTime{year: 2014, month: 9, day: 6, hour: 9, minute: 9, second: 8, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0, microsecond: {0, 0}}}
iex> httpdate("invalid")
{:bad_format, nil}
iex> httpdate("Foo, 06 Foo 2014 09:09:08 GMT")
{:error, :invalid_datetime}
"""
def httpdate(rfc2616_string) do
~r/(?<weekday>[^\s]{3}),\s(?<day>[\d]{2})\s(?<month>[^\s]{3})[\s](?<year>[\d]{4})[^\d](?<hour>[\d]{2})[^\d](?<min>[\d]{2})[^\d](?<sec>[\d]{2})\sGMT/
|> Regex.named_captures(rfc2616_string)
|> httpdate_parsed
end
defp httpdate_parsed(nil), do: {:bad_format, nil}
defp httpdate_parsed(mapped) do
Calendar.DateTime.from_erl(
{
{mapped["year"]|>to_int,
mapped["month"]|>month_number_for_month_name,
mapped["day"]|>to_int},
{mapped["hour"]|>to_int, mapped["min"]|>to_int, mapped["sec"]|>to_int }
}, "Etc/UTC")
end
@doc """
Like `httpdate/1`, but returns the result without tagging it with :ok
in case of success. In case of errors it raises.
iex> httpdate!("Sat, 06 Sep 2014 09:09:08 GMT")
%DateTime{year: 2014, month: 9, day: 6, hour: 9, minute: 9, second: 8, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}
"""
def httpdate!(rfc2616_string) do
{:ok, dt} = httpdate(rfc2616_string)
dt
end
@doc """
Parse RFC 3339 timestamp strings as UTC. If the timestamp is not in UTC it
will be shifted to UTC.
## Examples
iex> rfc3339_utc("fooo")
{:bad_format, nil}
iex> rfc3339_utc("1996-12-19T16:39:57")
{:bad_format, nil}
iex> rfc3339_utc("1996-12-19T16:39:57Z")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
iex> rfc3339_utc("1996-12-19T16:39:57.123Z")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0, microsecond: {123000, 3}}}
iex> rfc3339_utc("1996-12-19T16:39:57,123Z")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0, microsecond: {123000, 3}}}
iex> rfc3339_utc("1996-12-19T16:39:57-08:00")
{:ok, %DateTime{year: 1996, month: 12, day: 20, hour: 0, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
# No seperation chars between numbers. Not RFC3339, but we still parse it.
iex> rfc3339_utc("19961219T163957-08:00")
{:ok, %DateTime{year: 1996, month: 12, day: 20, hour: 0, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
# Offset does not have colon (-0800). That makes it ISO8601, but not RFC3339. We still parse it.
iex> rfc3339_utc("1996-12-19T16:39:57-0800")
{:ok, %DateTime{year: 1996, month: 12, day: 20, hour: 0, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
"""
def rfc3339_utc(<<year::4-bytes, ?-, month::2-bytes , ?-, day::2-bytes , ?T, hour::2-bytes, ?:, min::2-bytes, ?:, sec::2-bytes, ?Z>>) do
# faster version for certain formats of of RFC3339
{{year|>to_int, month|>to_int, day|>to_int},{hour|>to_int, min|>to_int, sec|>to_int}} |> Calendar.DateTime.from_erl("Etc/UTC")
end
def rfc3339_utc(rfc3339_string) do
parsed = rfc3339_string
|> parse_rfc3339_string
if parsed do
parse_rfc3339_as_utc_parsed_string(parsed, parsed["z"], parsed["offset_hours"], parsed["offset_mins"])
else
{:bad_format, nil}
end
end
@doc """
Parses an RFC 3339 timestamp and shifts it to
the specified time zone.
iex> rfc3339("1996-12-19T16:39:57Z", "Etc/UTC")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
iex> rfc3339("1996-12-19T16:39:57.1234Z", "Etc/UTC")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0, microsecond: {123400, 4}}}
iex> rfc3339("1996-12-19T16:39:57-8:00", "America/Los_Angeles")
{:ok, %DateTime{zone_abbr: "PST", day: 19, hour: 16, minute: 39, month: 12, second: 57, std_offset: 0, time_zone: "America/Los_Angeles", utc_offset: -28800, year: 1996}}
iex> rfc3339("1996-12-19T16:39:57.1234-8:00", "America/Los_Angeles")
{:ok, %DateTime{zone_abbr: "PST", day: 19, hour: 16, minute: 39, month: 12, second: 57, std_offset: 0, time_zone: "America/Los_Angeles", utc_offset: -28800, year: 1996, microsecond: {123400, 4}}}
iex> rfc3339("invalid", "America/Los_Angeles")
{:bad_format, nil}
iex> rfc3339("1996-12-19T16:39:57-08:00", "invalid time zone name")
{:invalid_time_zone, nil}
"""
def rfc3339(rfc3339_string, "Etc/UTC") do
rfc3339_utc(rfc3339_string)
end
def rfc3339(rfc3339_string, time_zone) do
rfc3339_utc(rfc3339_string) |> do_parse_rfc3339_with_time_zone(time_zone)
end
defp do_parse_rfc3339_with_time_zone({utc_tag, _utc_dt}, _time_zone) when utc_tag != :ok do
{utc_tag, nil}
end
defp do_parse_rfc3339_with_time_zone({_utc_tag, utc_dt}, time_zone) do
utc_dt |> Calendar.DateTime.shift_zone(time_zone)
end
defp parse_rfc3339_as_utc_parsed_string(mapped, z, _offset_hours, _offset_mins) when z == "Z" or z=="z" do
parse_rfc3339_as_utc_parsed_string(mapped, "", "00", "00")
end
defp parse_rfc3339_as_utc_parsed_string(mapped, _z, offset_hours, offset_mins) when offset_hours == "00" and offset_mins == "00" do
Calendar.DateTime.from_erl(erl_date_time_from_regex_map(mapped), "Etc/UTC", parse_fraction(mapped["fraction"]))
end
defp parse_rfc3339_as_utc_parsed_string(mapped, _z, offset_hours, offset_mins) do
offset_in_secs = hours_mins_to_secs!(offset_hours, offset_mins)
offset_in_secs = case mapped["offset_sign"] do
"-" -> offset_in_secs*-1
_ -> offset_in_secs
end
erl_date_time = erl_date_time_from_regex_map(mapped)
parse_rfc3339_as_utc_with_offset(offset_in_secs, erl_date_time, parse_fraction(mapped["fraction"]))
end
@doc """
Parses an RFC 5545 datetime string of FORM #2 (UTC) or #3 (with time zone identifier)
## Examples
FORM #2 with a Z at the end to indicate UTC
iex> rfc5545("19980119T020321Z")
{:ok, %DateTime{calendar: Calendar.ISO, day: 19, hour: 2, microsecond: {0, 0}, minute: 3, month: 1, second: 21, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 1998, zone_abbr: "UTC"}}
FORM #3 has a time zone identifier
iex> rfc5545("TZID=America/New_York:19980119T020000")
{:ok, %DateTime{calendar: Calendar.ISO, day: 19, hour: 2, microsecond: {0, 0}, minute: 0, month: 1, second: 0, std_offset: 0, time_zone: "America/New_York", utc_offset: -18000, year: 1998, zone_abbr: "EST"}}
From RFC 5455: "If, based on the definition of the referenced time zone, the local
time described occurs more than once (when changing from daylight
to standard time), the DATE-TIME value refers to the first
occurrence of the referenced time. Thus, TZID=America/New_York:20071104T013000
indicates November 4, 2007 at 1:30 A.M. EDT (UTC-04:00)."
iex> rfc5545("TZID=America/New_York:20071104T013000")
{:ok, %DateTime{calendar: Calendar.ISO,
day: 4, hour: 1, microsecond: {0, 0}, minute: 30, month: 11, second: 0,
std_offset: 3600, time_zone: "America/New_York", utc_offset: -18000, year: 2007,
zone_abbr: "EDT"}}
iex> rfc5545("TZID=America/New_York:19980119T020000.123456")
{:ok, %DateTime{calendar: Calendar.ISO, day: 19, hour: 2, microsecond: {123456, 6}, minute: 0, month: 1, second: 0, std_offset: 0, time_zone: "America/New_York", utc_offset: -18000, year: 1998, zone_abbr: "EST"}}
RFC 5545 : "If the local time described does not occur (when
changing from standard to daylight time), the DATE-TIME value is
interpreted using the UTC offset before the gap in local times.
Thus, TZID=America/New_York:20070311T023000 indicates March 11,
2007 at 3:30 A.M. EDT (UTC-04:00), one hour after 1:30 A.M. EST
(UTC-05:00)."
The way this is implemented:
When there is a gap for "spring forward" the difference between the two offsets before and after is added.
E.g. usually the difference in offset between summer and winter time is one hour. Then one hour is added.
iex> rfc5545("TZID=America/New_York:20070311T023000")
{:ok, %DateTime{calendar: Calendar.ISO,
day: 11, hour: 3, microsecond: {0, 0}, minute: 30, month: 3, second: 0,
std_offset: 3600, time_zone: "America/New_York", utc_offset: -18000, year: 2007,
zone_abbr: "EDT"}}
"""
def rfc5545(
<<year::4-bytes, month::2-bytes, day::2-bytes, ?T, hour::2-bytes, min::2-bytes,
sec::2-bytes, ?Z>>
) do
{{year |> to_int, month |> to_int, day |> to_int},
{hour |> to_int, min |> to_int, sec |> to_int}}
|> Calendar.DateTime.from_erl("Etc/UTC")
end
def rfc5545("TZID=" <> string) do
[tz, iso8601] = String.split(string, ":")
{:ok, dt, nil} = Calendar.NaiveDateTime.Parse.iso8601(iso8601)
case Calendar.DateTime.from_erl(Calendar.NaiveDateTime.to_erl(dt), tz, dt.microsecond) do
{:ambiguous, %Calendar.AmbiguousDateTime{possible_date_times: possible_date_times}} ->
# Per the RFC, if the datetime happens more than once, choose the first one.
# The first one is the one with the highest total offset. So they are sorted by
# total offset (UTC and standard offsets) and the highest value is chosen.
chosen_dt =
possible_date_times
|> Enum.sort_by(fn dt -> dt.utc_offset + dt.std_offset end)
|> List.last()
{:ok, chosen_dt}
{:error, :invalid_datetime_for_timezone} ->
most_recent_datetime_before_gap(dt, tz)
not_ambiguous ->
not_ambiguous
end
end
defp most_recent_datetime_before_gap(naive_datetime, time_zone) do
case Calendar.DateTime.from_erl(
Calendar.NaiveDateTime.to_erl(naive_datetime),
time_zone,
naive_datetime.microsecond
) do
{:ok, naive_datetime} ->
# If there is no gap, just return the valid DateTime
{:ok, naive_datetime}
{:error, :invalid_datetime_for_timezone} ->
dt_before =
naive_datetime
# We assume there is a gap and there is no previous gap 26 hours before
|> NaiveDateTime.add(-3600 * 26)
|> NaiveDateTime.to_erl()
|> Calendar.DateTime.from_erl!(time_zone, naive_datetime.microsecond)
dt_after =
naive_datetime
# We assume there is a gap and there is no additional gap 26 hours after
|> NaiveDateTime.add(3600 * 26)
|> NaiveDateTime.to_erl()
|> Calendar.DateTime.from_erl!(time_zone, naive_datetime.microsecond)
offset_difference =
(dt_before.utc_offset + dt_before.std_offset) -
(dt_after.utc_offset + dt_after.std_offset)
|> abs
naive_datetime
|> NaiveDateTime.add(offset_difference)
|> NaiveDateTime.to_erl()
|> Calendar.DateTime.from_erl(time_zone, naive_datetime.microsecond)
end
end
defp parse_fraction("." <> frac), do: parse_fraction(frac)
defp parse_fraction("," <> frac), do: parse_fraction(frac)
defp parse_fraction(""), do: {0, 0}
# parse and return microseconds
defp parse_fraction(string) do
usec = String.slice(string, 0..5)
|> String.pad_trailing(6, "0")
|> Integer.parse
|> elem(0)
{usec, min(String.length(string), 6)}
end
defp parse_rfc3339_as_utc_with_offset(offset_in_secs, erl_date_time, fraction) do
greg_secs = :calendar.datetime_to_gregorian_seconds(erl_date_time)
new_time = greg_secs - offset_in_secs
|> :calendar.gregorian_seconds_to_datetime
Calendar.DateTime.from_erl(new_time, "Etc/UTC", fraction)
end
defp erl_date_time_from_regex_map(mapped) do
erl_date_time_from_strings({{mapped["year"],mapped["month"],mapped["day"]},{mapped["hour"],mapped["min"],mapped["sec"]}})
end
defp erl_date_time_from_strings({{year, month, date},{hour, min, sec}}) do
{ {year|>to_int, month|>to_int, date|>to_int},
{hour|>to_int, min|>to_int, sec|>to_int} }
end
defp parse_rfc3339_string(rfc3339_string) do
~r/(?<year>[\d]{4})[^\d]?(?<month>[\d]{2})[^\d]?(?<day>[\d]{2})[^\d](?<hour>[\d]{2})[^\d]?(?<min>[\d]{2})[^\d]?(?<sec>[\d]{2})([\.\,](?<fraction>[\d]+))?((?<z>[zZ])|((?<offset_sign>[\+\-])(?<offset_hours>[\d]{1,2}):?(?<offset_mins>[\d]{2})))/
|> Regex.named_captures(rfc3339_string)
end
end
|
lib/calendar/date_time/parse.ex
| 0.835383 | 0.531209 |
parse.ex
|
starcoder
|
defmodule Sneex.Address.CycleCalculator do
@moduledoc """
This module provides the mechanism to run through all of the modifiers for calculating the total cycles.
It also provides various constructors for building modifiers.
"""
alias Sneex.Cpu
use Bitwise
defstruct [:cycles, :check_func]
@type t :: %__MODULE__{cycles: integer(), check_func: function()}
# Cycle calculator
@spec calc_cycles(Cpu.t(), list(__MODULE__.t())) :: integer()
def calc_cycles(cpu, mods) do
Enum.reduce(mods, 0, check_mod_builder(cpu))
end
defp check_mod_builder(cpu = %Cpu{}) do
fn %__MODULE__{cycles: c, check_func: f}, count ->
adj = cpu |> f.() |> check_mod(c)
count + adj
end
end
defp check_mod(true, c), do: c
defp check_mod(_, _), do: 0
# Constructors
@spec constant(integer()) :: __MODULE__.t()
def constant(cycles), do: %__MODULE__{cycles: cycles, check_func: fn _cpu -> true end}
@spec acc_is_16_bit(integer()) :: __MODULE__.t()
def acc_is_16_bit(cycles),
do: %__MODULE__{cycles: cycles, check_func: build_check_cpu_func(&Cpu.acc_size/1, :bit16)}
@spec index_is_16_bit(integer()) :: __MODULE__.t()
def index_is_16_bit(cycles),
do: %__MODULE__{cycles: cycles, check_func: build_check_cpu_func(&Cpu.index_size/1, :bit16)}
@spec native_mode(integer()) :: __MODULE__.t()
def native_mode(cycles),
do: %__MODULE__{cycles: cycles, check_func: build_check_cpu_func(&Cpu.emu_mode/1, :native)}
@spec low_direct_page_is_not_zero(integer()) :: __MODULE__.t()
def low_direct_page_is_not_zero(cycles),
do: %__MODULE__{cycles: cycles, check_func: &check_lower_byte_of_direct_page/1}
@spec check_page_boundary(integer(), Sneex.BasicTypes.long(), :x | :y) :: __MODULE__.t()
def check_page_boundary(cycles, initial_addr, :x) do
func = build_page_boundary_func(initial_addr, &Cpu.x/1)
%__MODULE__{cycles: cycles, check_func: func}
end
def check_page_boundary(cycles, initial_addr, :y) do
func = build_page_boundary_func(initial_addr, &Cpu.y/1)
%__MODULE__{cycles: cycles, check_func: func}
end
@spec check_page_boundary_and_emulation_mode(
integer(),
Sneex.BasicTypes.long(),
Sneex.BasicTypes.long()
) ::
__MODULE__.t()
def check_page_boundary_and_emulation_mode(cycles, initial_addr, new_addr) do
func = fn cpu ->
is_emu? = :emulation == Cpu.emu_mode(cpu)
cross_page? = check_page_boundary(initial_addr, new_addr)
is_emu? and cross_page?
end
%__MODULE__{cycles: cycles, check_func: func}
end
defp build_page_boundary_func(initial_addr, accessor) do
fn cpu ->
index = cpu |> accessor.()
indexed_addr = initial_addr + index
check_page_boundary(initial_addr, indexed_addr)
end
end
defp build_check_cpu_func(accessor, value) do
get_data = &(&1 |> accessor.())
&(value == get_data.(&1))
end
defp check_lower_byte_of_direct_page(cpu) do
lower_byte = cpu |> Cpu.direct_page() |> band(0x00FF)
0 != lower_byte
end
defp check_page_boundary(addr1, addr2) when band(addr1, 0xFFFF00) == band(addr2, 0xFFFF00),
do: false
defp check_page_boundary(_addr1, _addr2), do: true
end
|
lib/sneex/address/cycle_calculator.ex
| 0.814385 | 0.410077 |
cycle_calculator.ex
|
starcoder
|
defmodule Textmatrix.Line do
@moduledoc """
%Textmatrix.Line{} describes a single line in the Matrix.
A line buffer consist of character slots which can either hold a single
character or nil.
"""
# the default filling character is a space
@default_empty_char 32
alias Textmatrix.Line
@type t() :: %Line{
chars: list(char)
}
defstruct chars: []
@doc """
new/0 initializes a new empty line struct
"""
@spec new() :: Line.t()
def new do
%Textmatrix.Line{}
end
@doc """
write_string/3 takes the line, a start position and a string and writes
the characters of the given string to the given line buffer.
"""
@spec write_string(Line.t(), integer(), binary()) :: Line.t()
def write_string(%Line{} = line, x, string) when is_binary(string) do
line = ensure_capacity(line, x + String.length(string))
chars_and_indexes =
string
|> String.to_charlist()
|> Enum.with_index(x)
Enum.reduce(chars_and_indexes, line, fn {char, index}, line ->
write_char(line, index, char)
end)
end
@spec write_char(Line.t(), integer(), char()) :: Line.t()
defp write_char(%Line{} = line, x, char) when is_integer(char) do
chars = List.update_at(line.chars, x, fn _v -> char end)
%Line{line | chars: chars}
end
@doc """
ensure_capacity/2 resizes the given line buffer to the desired size. When the
provider buffer is of sufficient size, it returns the given buffer as is.
"""
@spec ensure_capacity(Line.t(), integer) :: Line.t()
def ensure_capacity(%Line{} = line, min_length) do
if length(line.chars) < min_length do
missing_slots = min_length - length(line.chars) - 1
missing_slots = Enum.map(0..missing_slots, fn _ -> nil end)
%Line{line | chars: line.chars ++ missing_slots}
else
line
end
end
@doc """
to_string/1 convert the given %Line{} struct to a single line string.
"""
@spec to_string(Line.t()) :: binary()
def to_string(%Line{} = line, emptychar \\ @default_empty_char) do
line.chars
|> Enum.map(&translate_char(&1, emptychar))
|> IO.chardata_to_string()
end
defp translate_char(nil, emptychar), do: emptychar
defp translate_char(char, _), do: char
defimpl String.Chars, for: __MODULE__ do
def to_string(buffer),
do: Textmatrix.Line.to_string(buffer)
end
end
|
lib/textmatrix/line.ex
| 0.80077 | 0.57532 |
line.ex
|
starcoder
|
defmodule Mix.Tasks.Pseudoloc do
@moduledoc """
Mix task for pseudolocalizing the `Gettext` data files.
```
$ mix pseudoloc priv/gettext
```
"""
use Mix.Task
alias Gettext.PO
alias Gettext.PO.{PluralTranslation, Translation}
alias Mix.Shell
@shortdoc "Creates a pseudolocalized translation"
@impl Mix.Task
@doc false
def run(args)
def run([]) do
Mix.raise(
"Must be supplied with the directory where the gettext POT files are stored, typically priv/gettext"
)
end
def run(args) do
gettext_path = hd(args)
pseudo_path = Path.join(gettext_path, "ps/LC_MESSAGES")
File.mkdir_p!(pseudo_path)
Mix.Task.run("gettext.extract")
Mix.Task.run("gettext.merge", args)
pseudo_path
|> get_source_files()
|> Enum.each(&localize_file/1)
end
# ----- Private functions -----
defp get_source_files(path) do
path
|> Path.join("*.po")
|> Path.wildcard()
end
defp localize_file(path) do
Shell.IO.info("Pseduolocalize #{path}")
data =
path
|> PO.parse_file!()
|> update_translations()
|> PO.dump()
File.write!(path, data)
end
defp update_translation(translation = %Translation{}) do
localized_text = Pseudoloc.localize_string(hd(translation.msgid))
%Translation{translation | msgstr: [localized_text]}
end
defp update_translation(translation = %PluralTranslation{}) do
localized_singular = Pseudoloc.localize_string(hd(translation.msgid))
localized_plural = Pseudoloc.localize_string(hd(translation.msgid_plural))
{_, localized_msgstr} =
Enum.reduce(Map.keys(translation.msgstr), {:cont, %{}}, fn key, {_, map} ->
if key == 0 do
{:cont, Map.put(map, 0, [localized_singular])}
else
{:cont, Map.put(map, key, [localized_plural])}
end
end)
%PluralTranslation{translation | msgstr: localized_msgstr}
end
defp update_translations(po = %PO{}) do
localized = Enum.map(po.translations, fn translation -> update_translation(translation) end)
%PO{po | translations: localized}
end
end
|
lib/mix/tasks/pseudoloc.ex
| 0.774455 | 0.569314 |
pseudoloc.ex
|
starcoder
|
defmodule NotQwerty123.RandomPassword do
@moduledoc """
Module to generate random passwords.
Users are often advised to use random passwords for authentication.
However, creating truly random passwords is difficult for people to
do well and is something that computers are usually better at.
This module provides the `gen_password` function, which generates
a random password.
"""
import NotQwerty123.PasswordStrength
@alpha Enum.concat(?A..?Z, ?a..?z)
@digits '0123456789'
@punc '!#$%&\'()*+,-./:;<=>?@[\\]^_{|}~"'
@alphabet @alpha ++ @digits ++ @punc
@char_map Enum.map_reduce(@alphabet, 0, fn x, acc ->
{{acc, x}, acc + 1}
end)
|> elem(0)
|> Enum.into(%{})
@doc """
Generate a random password.
## Options
There are two options:
* `:length` - length of the password, in characters
* the default is 8
* the minimum length is 6
* `:characters` - the character set - `:letters`, `:letters_digits` or `:letters_digits_punc`
* the default is `:letters_digits`, which will use letters and digits in the password
* `:digits` will only use digits
* `:letters` will use uppercase and lowercase letters
* `:letters_digits_punc` will use letters, digits and punctuation characters
"""
def gen_password(opts \\ []) do
{len, chars} =
{Keyword.get(opts, :length, 8), Keyword.get(opts, :characters, :letters_digits)}
for(val <- rand_numbers(len, chars), do: Map.get(@char_map, val))
|> to_string()
|> ensure_strong(opts)
end
defp rand_numbers(len, chars) when len > 5 do
{start_range, end_range} =
case chars do
:digits -> {52, 62}
:letters -> {0, 52}
:letters_digits_punc -> {0, 93}
_ -> {0, 62}
end
:crypto.rand_seed()
for _ <- 1..len, do: Enum.random(start_range..(end_range - 1))
end
defp rand_numbers(_, _) do
raise ArgumentError, message: "The password should be at least 6 characters long."
end
defp ensure_strong(password, opts) do
case strong_password?(password) do
{:ok, password} -> password
_ -> gen_password(opts)
end
end
end
|
lib/not_qwerty123/random_password.ex
| 0.728555 | 0.551272 |
random_password.ex
|
starcoder
|
defmodule VistaClient.Session do
@derive Jason.Encoder
@moduledoc """
Struct to represent an Session.
# Definition _Sessions_
A scrrening show of a film in a cinema on a
screen at a certain time. This is what people book tickets for.
# JSON example:
```
{
"ID": "1001-14164",
"CinemaId": "1001",
"ScheduledFilmId": "HO00000720",
"SessionId": "14164",
"AreaCategoryCodes": [],
"Showtime": "2019-02-26T20:00:00",
"IsAllocatedSeating": false,
"AllowChildAdmits": true,
"SeatsAvailable": 66,
"AllowComplimentaryTickets": true,
"EventId": "",
"PriceGroupCode": "0033",
"ScreenName": "Kino 2",
"ScreenNameAlt": "433402",
"ScreenNumber": 2,
"CinemaOperatorCode": "1001",
"FormatCode": "0000000001",
"FormatHOPK": "0000000001",
"SalesChannels": ";CALL;RSP;GSALE;CELL;KIOSK;PDA;WWW;POSBK;POS;IVR;",
"SessionAttributesNames": ["OV"],
"ConceptAttributesNames": [],
"AllowTicketSales": true,
"HasDynamicallyPricedTicketsAvailable": false,
"PlayThroughId": null,
"SessionBusinessDate": "2019-02-26T00:00:00",
"SessionDisplayPriority": 0,
"GroupSessionsByAttribute": false,
"SoldoutStatus": 0,
"TypeCode": "N"
}
```
"""
defstruct [
:id_string,
:film_id_string,
:film,
:screen_name,
:seats_available,
:cinema_id,
:cinema,
:attributes,
:version,
:showtime, # <-- when the movie starts
:date # <-- for which day this counts (screening at 1AM counts for day before, usually)
]
import VistaClient.Extractors, only: [
{:extract_id, 2},
{:extract_attributes, 1},
{:extract_datetime, 2},
{:extract_date, 2}
]
def from_map(map) do
with {:f, {:ok, film_id_string}} <- {:f, Map.fetch(map, "ScheduledFilmId")},
{:s, {:ok, session_id_string}} <- {:s, Map.fetch(map, "ID")},
{:n, {:ok, screen_name}} <- {:n, Map.fetch(map, "ScreenName")},
{:a, {:ok, seats_available}} <- {:a, Map.fetch(map, "SeatsAvailable")},
{:ok, cinema_id} <- extract_id(map, "CinemaId"),
{:ok, showtime} <- extract_datetime(map, "Showtime"),
{:ok, date} <- extract_date(map, "SessionBusinessDate"),
{:ok, {version, attributes}} <- extract_attributes(map) do
%__MODULE__{
id_string: session_id_string,
screen_name: screen_name,
seats_available: seats_available,
film_id_string: film_id_string,
cinema_id: cinema_id,
attributes: attributes,
version: version,
showtime: showtime,
date: date
}
else
{:f, _} -> {:error, {:missing_key, "ScheduledFilmId"}}
{:s, _} -> {:error, {:missing_key, "ID"}}
{:n, _} -> {:error, {:missing_key, "ScreenName"}}
{:a, _} -> {:error, {:missing_key, "ScreenName"}}
error -> error
end
end
def handle_validity(structs, filtered, _) when structs == filtered, do: {:ok, structs}
def handle_validity(s, f, [ignore_errors: true]) when s == f, do: {:ok, f}
def handle_validity(s, f, [ignore_errors: false]) when s != f, do: {:error, :contains_unparsable_session}
def from_map_list(sessions, opts \\ [ignore_errors: false]) do
with structs <- Enum.map(sessions, &from_map/1),
filtered <- Enum.filter(structs, fn %__MODULE__{} -> true; _ -> false end),
{:ok, structs} <- handle_validity(structs, filtered, opts), do: {:ok, structs}
end
# HELPERS
@doc """
Takes a session and returns the seconds until the screening starts.
- Can be negative (session already started or has run
- Depends on erlang.localtime to be correct
#FIXME: Abandon Naive DateTime
Let's abandon localtimes and naive datettimes and just use UTC passing
DateTime structs around.
"""
@spec showing_in(%__MODULE__{}) :: integer()
def showing_in(%__MODULE__{showtime: showtime}) do
# we really hould abandon naive datetimes
{{year, month, day}, {hour, minute, second}} = :erlang.localtime()
now_dt = %DateTime{
calendar: Calendar.ISO,
day: day,
hour: hour,
microsecond: {0, 6},
minute: minute,
month: month,
second: second,
std_offset: 0,
time_zone: "Etc/UTC",
utc_offset: 0,
year: year,
zone_abbr: "UTC"
}
show_dt = DateTime.from_naive!(showtime, "Etc/UTC")
DateTime.diff(show_dt, now_dt)
end
end
|
lib/structs/session.ex
| 0.641871 | 0.71108 |
session.ex
|
starcoder
|
defmodule Scitree.Validations do
@moduledoc """
Validations to ensure data is consistent and in the
format expected by Yggdrasil.
"""
alias Scitree.Config
@type data :: {{String.t(), atom(), [term()]}}
@spec validate(data, Config.t(), list()) ::
:ok
| {:error, atom}
| {:error, {:incompatible_column_for_task, col_type :: atom(), valid_types :: [atom()]}}
| {:error, {:unsupported_validation, name :: atom()}}
def validate(data, config \\ %Config{}, validations) do
Enum.find_value(validations, :ok, fn validation ->
with {:ok, validator} <- get_validator(validation),
:ok <- validator.(data, config) do
false
else
error -> error
end
end)
end
defp get_validator(:label), do: {:ok, &validate_label/2}
defp get_validator(:dataset_size), do: {:ok, &validate_dataset_size/2}
defp get_validator(:learner), do: {:ok, &validate_config_learner/2}
defp get_validator(:task), do: {:ok, &validate_task/2}
defp get_validator(name), do: {:error, {:unsupported_validation, name}}
@doc """
Checks if the configuration label is in the dataset.
## Examples
iex> Scitree.Validations.validate_label(data, config)
{:error, :unidentified_label}
"""
@spec validate_label(data, Config.t()) :: :ok | {:error, :unidentified_label}
def validate_label(data, %Config{label: label}) do
data
|> Enum.any?(fn {title, _type, _value} -> title == label end)
|> if do
:ok
else
{:error, :unidentified_label}
end
end
@doc """
Checks if all columns are the same size.
## Examples
iex> Scitree.Validations.validate_dataset_size(data, config)
{:error, :incompatible_column_sizes}
"""
@spec validate_dataset_size(data, Config.t()) :: :ok | {:error, :incompatible_column_sizes}
def validate_dataset_size(data, _config) do
{_title, _type, first} = hd(data)
size = Enum.count(first)
data
|> Enum.all?(fn {_title, _type, vals} -> Enum.count(vals) == size end)
|> if do
:ok
else
{:error, :incompatible_column_sizes}
end
end
@doc """
Checks if config learner is valid.
## Examples
iex> Scitree.Validations.validate_config_learner(data, config)
{:error, :unknown_learner}
"""
@spec validate_config_learner(data, Config.t()) :: :ok | {:error, :unknown_learner}
def validate_config_learner(_data, config) do
if config.learner in [:cart, :gradient_boosted_trees, :random_forest] do
:ok
else
{:error, :unknown_learner}
end
end
@doc """
Check if the task config is compatible with the type of the
dataset's label column.
## Examples
iex> Scitree.Validations.validate_task({{"my column", :numerical, 1}}, %{label: "my column", task: :classification})
{:error, {:incompatible_column_for_task, :numerical, [:categorical, :string]}
"""
@spec validate_task(data, Config.t()) ::
:ok | {:error, {:incompatible_column_for_task, atom(), [atom()]}}
def validate_task(data, config) do
col_type =
data
|> Enum.find_value(fn {title, type, _value} ->
if title == config.label do
type
end
end)
valid_types = validate_task_types_for_config(config.task)
if col_type in valid_types do
:ok
else
{:error, {:incompatible_column_for_task, col_type, valid_types}}
end
end
defp validate_task_types_for_config(:classification), do: [:categorical, :string]
defp validate_task_types_for_config(:regression), do: [:numerical]
defp validate_task_types_for_config(:ranking), do: [:numerical]
defp validate_task_types_for_config(:categorical_uplift), do: [:categorical]
defp validate_task_types_for_config(_), do: []
end
|
lib/scitree/validations.ex
| 0.830594 | 0.555556 |
validations.ex
|
starcoder
|
defmodule JsonSchema.Parser.ErrorUtil do
@moduledoc """
Contains helper functions for reporting parser errors.
"""
alias JsonSchema.{Parser, Types}
alias Parser.{ParserError, Util}
@doc """
Returns the name of the type of the given value.
## Examples
iex> get_type([1,2,3])
"list"
iex> get_type(%{"type" => "string"})
"object"
iex> get_type("name")
"string"
iex> get_type(42)
"integer"
"""
@spec get_type(any) :: String.t()
def get_type(value) when is_list(value), do: "list"
def get_type(value) when is_map(value), do: "object"
def get_type(value) when is_binary(value), do: "string"
def get_type(value) when is_boolean(value), do: "boolean"
def get_type(value) when is_float(value), do: "float"
def get_type(value) when is_integer(value), do: "integer"
def get_type(value) when is_nil(value), do: "null"
def get_type(_value), do: "unknown"
@spec unsupported_schema_version(String.t(), [String.t()]) :: ParserError.t()
def unsupported_schema_version(supplied_value, supported_versions) do
root_path = URI.parse("#")
stringified_value = sanitize_value(supplied_value)
error_msg = """
Unsupported JSON schema version found at '#'.
"$schema": #{stringified_value}
#{error_markings(stringified_value)}
Was expecting one of the following types:
#{inspect(supported_versions)}
Hint: See the specification section 7. "The '$schema' keyword"
<http://json-schema.org/latest/json-schema-core.html#rfc.section.7>
"""
ParserError.new(root_path, :unsupported_schema_version, error_msg)
end
@spec missing_property(Types.typeIdentifier(), String.t()) :: ParserError.t()
def missing_property(identifier, property) do
full_identifier = print_identifier(identifier)
error_msg = """
Could not find property '#{property}' at '#{full_identifier}'
"""
ParserError.new(identifier, :missing_property, error_msg)
end
@spec invalid_type(Types.typeIdentifier(), String.t(), String.t(), String.t()) ::
ParserError.t()
def invalid_type(identifier, property, expected_type, actual_value) do
actual_type = get_type(actual_value)
stringified_value = sanitize_value(actual_value)
full_identifier = print_identifier(identifier)
error_msg = """
Expected value of property '#{property}' at '#{full_identifier}'
to be of type '#{expected_type}' but found a value of type '#{actual_type}'
"#{property}": #{stringified_value}
#{error_markings(stringified_value)}
"""
ParserError.new(identifier, :type_mismatch, error_msg)
end
@spec schema_name_collision(Types.typeIdentifier()) :: ParserError.t()
def schema_name_collision(identifier) do
full_identifier = print_identifier(identifier)
error_msg = """
Found more than one schema with id: '#{full_identifier}'
"""
ParserError.new(identifier, :name_collision, error_msg)
end
@spec name_collision(Types.typeIdentifier()) :: ParserError.t()
def name_collision(identifier) do
full_identifier = print_identifier(identifier)
error_msg = """
Found more than one property with identifier '#{full_identifier}'
"""
ParserError.new(identifier, :name_collision, error_msg)
end
@spec name_not_a_regex(Types.typeIdentifier(), String.t()) :: ParserError.t()
def name_not_a_regex(identifier, property) do
full_identifier = print_identifier(identifier)
error_msg = """
Could not parse pattern '#{property}' at '#{full_identifier}' into a valid Regular Expression.
Hint: See specification section 6.5.5. "patternProperties"
<https://json-schema.org/latest/json-schema-validation.html#rfc.section.6.5.5>
"""
ParserError.new(identifier, :name_not_a_regex, error_msg)
end
@spec invalid_uri(Types.typeIdentifier(), String.t(), String.t()) :: ParserError.t()
def invalid_uri(identifier, property, actual) do
full_identifier = print_identifier(identifier)
stringified_value = sanitize_value(actual)
error_msg = """
Could not parse property '#{property}' at '#{full_identifier}' into a valid URI.
"id": #{stringified_value}
#{error_markings(stringified_value)}
Hint: See URI specification section 3. "Syntax Components"
<https://tools.ietf.org/html/rfc3986#section-3>
"""
ParserError.new(identifier, :invalid_uri, error_msg)
end
@spec unresolved_reference(
Types.typeIdentifier(),
URI.t()
) :: ParserError.t()
def unresolved_reference(identifier, parent) do
printed_path = to_string(parent)
stringified_value = sanitize_value(identifier)
error_msg = """
The following reference at `#{printed_path}` could not be resolved
"$ref": #{stringified_value}
#{error_markings(stringified_value)}
Hint: See the specification section 9. "Base URI and dereferencing"
<http://json-schema.org/latest/json-schema-core.html#rfc.section.9>
"""
ParserError.new(parent, :unresolved_reference, error_msg)
end
@spec unknown_type(String.t()) :: ParserError.t()
def unknown_type(type_name) do
error_msg = "Could not find parser for type: '#{type_name}'"
ParserError.new(type_name, :unknown_type, error_msg)
end
@spec unexpected_type(Types.typeIdentifier(), String.t()) :: ParserError.t()
def unexpected_type(identifier, error_msg) do
ParserError.new(identifier, :unexpected_type, error_msg)
end
@spec unknown_enum_type(String.t()) :: ParserError.t()
def unknown_enum_type(type_name) do
error_msg = "Unknown or unsupported enum type: '#{type_name}'"
ParserError.new(type_name, :unknown_enum_type, error_msg)
end
@spec unknown_primitive_type(String.t()) :: ParserError.t()
def unknown_primitive_type(type_name) do
error_msg = "Unknown or unsupported primitive type: '#{type_name}'"
ParserError.new(type_name, :unknown_primitive_type, error_msg)
end
@spec unknown_node_type(
Types.typeIdentifier(),
String.t(),
Types.schemaNode()
) :: ParserError.t()
def unknown_node_type(identifier, name, schema_node) do
full_identifier =
identifier
|> Util.add_fragment_child(name)
|> to_string()
stringified_value = sanitize_value(schema_node["type"])
error_msg = """
The value of "type" at '#{full_identifier}' did not match a known node type
"type": #{stringified_value}
#{error_markings(stringified_value)}
Was expecting one of the following types
["null", "boolean", "object", "array", "number", "integer", "string"]
Hint: See the specification section 6.25. "Validation keywords - type"
<http://json-schema.org/latest/json-schema-validation.html#rfc.section.6.25>
"""
ParserError.new(full_identifier, :unknown_node_type, error_msg)
end
@spec print_identifier(Types.typeIdentifier()) :: String.t()
defp print_identifier(identifier) do
to_string(identifier)
end
@spec sanitize_value(any) :: String.t()
defp sanitize_value(raw_value) do
cond do
is_map(raw_value) and raw_value.__struct__ == URI ->
URI.to_string(raw_value)
is_map(raw_value) ->
Jason.encode!(raw_value)
true ->
inspect(raw_value)
end
end
@spec error_markings(String.t()) :: [String.t()]
defp error_markings(value) do
red(String.duplicate("^", String.length(value)))
end
@spec red(String.t()) :: [String.t()]
defp red(str) do
IO.ANSI.format([:red, str])
end
end
|
lib/parser/error_util.ex
| 0.843863 | 0.406921 |
error_util.ex
|
starcoder
|
defmodule ZenMonitor.Local.Dispatcher do
@moduledoc """
`ZenMonitor.Local.Dispatcher` is a GenStage Consumer responsible for throttled delivery of down
messages.
`ZenMonitor.Local` acts as a GenStage Producer, it stores all of the down messages that need to
be dispatched based off of what has been enqueued by the `ZenMonitor.Local.Connector`.
The Dispatcher will deliver these messages throttled by a maximum rate which is controlled by
the {:zen_monitor, :demand_interval} and {:zen_monitor, :demand_amount} settings.
To calculate the maximum number of messages processed per second you can use the following
formula:
maximum_mps = (demand_amount) * (1000 / demand_interval)
For example, if the demand_amount is 1000, and demand_interval is 100 (milliseconds) the maximum
messages per second are:
maximum_mps = (1000) * (1000 / 100)
-> (1000) * 10
-> 10_000
For convenience a `ZenMonitor.Local.Dispatcher.maximum_mps/0` is provided that will perform this
calculation.
"""
use GenStage
use Instruments.CustomFunctions, prefix: "zen_monitor.local.dispatcher"
alias ZenMonitor.Local.Tables
@demand_interval 100
@demand_amount 1000
## Client
def start_link(_opts \\ []) do
GenStage.start_link(__MODULE__, [], name: __MODULE__)
end
@doc """
Gets the demand interval from the Application Environment
The demand interval is the number of milliseconds to wait between demanding more events from the
GenStage Producer (`ZenMonitor.Local`)
This can be controlled at boot and runtime with the {:zen_monitor, :demand_interval} setting,
see `ZenMonitor.Local.Dispatcher.demand_interval/1` for runtime convenience functionality.
"""
@spec demand_interval() :: integer
def demand_interval do
Application.get_env(:zen_monitor, :demand_interval, @demand_interval)
end
@doc """
Puts the demand interval into the Application Environment
This is a simple convenience function for overwrite the {:zen_monitor, :demand_interval} setting
at runtime
"""
@spec demand_interval(value :: integer) :: :ok
def demand_interval(value) do
Application.put_env(:zen_monitor, :demand_interval, value)
end
@doc """
Gets the demand amount from the Application Environment
The demand amount is the number of events tor request from the GenStage Producer
(`ZenMonitor.Local`) every demand interval
This can be controlled at boot and runtime with the {:zen_monitor, :demand_amount} setting, see
`ZenMonitor.Local.Dispatcher.demand_amount/1` for runtime convenience functionality.
"""
@spec demand_amount() :: integer
def demand_amount do
Application.get_env(:zen_monitor, :demand_amount, @demand_amount)
end
@doc """
Puts the demand amount into the Application Environment
This is a simple convenience function for overwriting the {:zen_monitor, :demand_amount} setting
at runtime.
"""
@spec demand_amount(value :: integer) :: :ok
def demand_amount(value) do
Application.put_env(:zen_monitor, :demand_amount, value)
end
@doc """
Calculate the current maximum messages per second
This is a convenience function to help operators understand the current throughput of the
Dispatcher.
"""
@spec maximum_mps() :: float
def maximum_mps do
demand_amount() * (1000 / demand_interval())
end
## Server
def init(_opts) do
Process.flag(:message_queue_data, :off_heap)
{:consumer, nil, subscribe_to: [{ZenMonitor.Local, min_demand: 1}]}
end
@doc """
Handles the events for dispatch
Dispatch is a simple two step procedure followed for each message to be dispatched.
1. Check if the message is still valid. Messages can become invalid if the monitor was
demonitored after the message was enqueued.
2a. If valid: forward the message to the subscriber
2b. If invalid: skip message
Event dispatch will calculate an "unfulfilled" demand based off the number of messages skipped
and demand that the producer provide additional events so that MPS is maintained and prevent the
Dispatcher from being starved because of invalid messages.
"""
def handle_events(events, _from, producer) do
delivered = length(events)
increment("events.delivered", delivered)
messages =
for {subscriber, {:DOWN, ref, :process, _, _} = message} <- events,
still_monitored?(subscriber, ref) do
send(subscriber, message)
end
# Ensure that filtering does not starve out the Dispatcher
# Calculate the effective demand by taking the smaller of the current demand_amount and the
# length of events delivered.
effective_demand = min(delivered, demand_amount())
processed = length(messages)
increment("events.processed", processed)
# The unfulfilled demand is the difference between the effective demand and the actual events
unfulfilled = effective_demand - processed
# Ask the producer to fulfill the unfulfilled demand (if this number is 0 or negative, the
# ask helper will handle that for us and not ask for anything)
ask(producer, unfulfilled)
{:noreply, [], producer}
end
@doc """
Handles the callback for the subscription being established with the producer.
This is the start of the demand loop, once the producer confirms subscription, the initial call
to schedule_demand/0 happens.
"""
def handle_subscribe(:producer, _, from, _state) do
schedule_demand()
{:manual, from}
end
@doc """
Handles the periodic generate_demand message
Asks the producer for demand_amount of events then schedules the next demand generation.
"""
def handle_info(:generate_demand, producer) do
ask(producer, demand_amount())
schedule_demand()
{:noreply, [], producer}
end
## Private
@spec ask(producer :: pid, amount :: integer) :: :ok
defp ask(_producer, amount) when amount <= 0, do: :ok
defp ask(producer, amount) do
GenStage.ask(producer, amount)
end
@spec still_monitored?(subscriber :: pid, ref :: reference) :: boolean
defp still_monitored?(subscriber, ref) do
:ets.take(Tables.references(), {subscriber, ref}) != []
end
@spec schedule_demand() :: reference
defp schedule_demand do
Process.send_after(self(), :generate_demand, demand_interval())
end
end
|
lib/zen_monitor/local/dispatcher.ex
| 0.913039 | 0.649801 |
dispatcher.ex
|
starcoder
|
defmodule QueryBuilder.Extension do
@moduledoc ~S"""
Use this module to create an extension module to `QueryBuilder` for app specific query utilities.
Use your query builder extension module wherever you would normally use `QueryBuilder`
Example:
```
defmodule MyApp.QueryBuilder do
use QueryBuilder.Extension
defmacro __using__(opts) do
quote do
require QueryBuilder
QueryBuilder.__using__(unquote(opts))
end
end
# Add app specific query functions
#---------------------------------
def where_initcap(query, field, value) do
text_equals_condition = fn field, value, get_binding_fun ->
{field, binding} = get_binding_fun.(field)
Ecto.Query.dynamic([{^binding, x}], fragment("initcap(?)", ^value) == field(x, ^field))
end
query
|> where(&text_equals_condition.(field, value, &1))
end
end
defmodule MyApp.Accounts.User do
use MyApp.QueryBuilder
schema "users" do
field :name, :string
field :active, :boolean
end
end
defmodule MyApp.Accounts do
alias MyApp.QueryBuilder, as: QB
def list_users(opts \\ []) do
# Query list can include custom query functions as well:
# [where_initcap: {:name, "john"}, where: {:active, true}]
MyApp.Accounts.User
|> QB.from_list(opts)
|> Repo.all()
end
end
```
"""
defmacro __using__(_opts) do
quote do
# Expose all QueryBuilder functions: QueryBuilder.__info__(:functions)
defdelegate left_join(query, assoc_fields, filters \\ [], or_filters \\ []),
to: QueryBuilder
defdelegate maybe_where(query, bool, filters), to: QueryBuilder
defdelegate maybe_where(query, condition, fields, filters, or_filters \\ []),
to: QueryBuilder
defdelegate new(ecto_query), to: QueryBuilder
defdelegate order_by(query, value), to: QueryBuilder
defdelegate order_by(query, assoc_fields, value), to: QueryBuilder
defdelegate preload(query, assoc_fields), to: QueryBuilder
defdelegate where(query, filters), to: QueryBuilder
defdelegate where(query, assoc_fields, filters, or_filters \\ []), to: QueryBuilder
defdelegate offset(query, value), to: QueryBuilder
defdelegate limit(query, value), to: QueryBuilder
@doc ~S"""
Allows to pass a list of operations through a keyword list.
Example:
```
QueryBuilder.from_list(query, [
where: [name: "John", city: "Anytown"],
preload: [articles: :comments]
])
```
"""
def from_list(query, nil), do: query
def from_list(query, []), do: query
def from_list(query, [{operation, arguments} | tail]) do
arguments =
cond do
is_tuple(arguments) -> Tuple.to_list(arguments)
is_list(arguments) -> [arguments]
true -> List.wrap(arguments)
end
apply(__MODULE__, operation, [query | arguments])
|> from_list(tail)
end
end
end
end
|
lib/query_builder/extension.ex
| 0.918407 | 0.574723 |
extension.ex
|
starcoder
|
defmodule MvOpentelemetry do
@moduledoc """
Top level module for Opentelemetry instrumentation, as used at Mindvalley.
Used to publish Opentelemetry events to applicable processors, for example
to Honeycomb.
Opentelemetry resources and processor are configured outside of the scope
of this module, use Opentelemetry directly.
# Example usage
```
# Somewhere in your application startup, for example in Application.start/2:
def start(_type, _args) do
:ok = MvOpentelemetry.register_tracer(:ecto, span_prefix: [:my_app, :repo])
:ok = MvOpentelemetry.register_tracer(:ecto, span_prefix: [:my_app, :replica_repo])
:ok = MvOpentelemetry.register_tracer(:plug)
:ok = MvOpentelemetry.register_tracer(:live_view)
end
```
"""
defmodule Error do
defexception [:message, :module]
end
@doc """
Registers tracer for given functional area. Allowed areas are: :ecto, :plug, :absinthe,
:dataloader and :live_view
"""
@spec register_tracer(:ecto | :plug | :live_view | :absinthe | :dataloader) :: :ok
def register_tracer(atom), do: register_tracer(atom, [])
@doc """
Registers tracer for given functional area with options.
Allowed areas are: :absinthe, :dataloader, :ecto, :phoenix and :live_view.
You can also provide following options:
## Ecto
- `span_prefix` REQUIRED telemetry prefix to listen to. If you're unsure of what to put here,
[:my_app, :repo] is the right choice.
- `default_attributes` OPTIONAL property list of attributes you want to attach to all traces
from this group, for example [{"service.component", "my_app"}]. Defaults to []
## LiveView
- `prefix` OPTIONAL telemetry prefix that will be emited in events, for example
"my_app.phoenix". Defaults to "phoenix"
- `name` OPTIONAL atom to identify tracers in case you want to listen to events from
live_view twice.
- `default_attributes` OPTIONAL property list of attributes you want to attach to all traces
from this group, for example [{"service.component", "my_app"}]. Defaults to []
- `query_params_whitelist` OPTIONAL list of query param names you want to allow to log in your
traces, i.e ["user_id", "product_id"]. Defaults to logging all.
## Absinthe
- `prefix` OPTIONAL telemetry prefix that will be emited in events, defaults to "graphql"
- `default_attributes` OPTIONAL property list of attributes you want to attach to all traces
from this group, for example [{"service.component", "ecto"}]. Defaults to []
- `include_field_resolution` OPTIONAL boolean for subscribing to field resolution events.
These tend to be noisy and produce a lot of spans, so the default is set to `false`
## Dataloader
- `default_attributes` OPTIONAL property list of attributes you want to attach to all traces
from this group, for example [{"service.component", "ecto"}]. Defaults to []
## Plug
- `span_prefix` OPTIONAL telemetry prefix to listen to. Defaults to [:phoenix, :endpoint]
- `default_attributes` OPTIONAL property list of attributes you want to attach to all traces
from this group, for example [{"service.component", "ecto"}]. Defaults to []
- `query_params_whitelist` OPTIONAL list of query param names you want to allow to log in your
traces, i.e ["user_id", "product_id"]. Defaults to logging all.
"""
@spec register_tracer(:absinthe | :dataloader | :ecto | :plug | :live_view, Access.t()) :: :ok
def register_tracer(:absinthe, opts), do: MvOpentelemetry.Absinthe.register_tracer(opts)
def register_tracer(:dataloader, opts), do: MvOpentelemetry.Dataloader.register_tracer(opts)
def register_tracer(:ecto, opts), do: MvOpentelemetry.Ecto.register_tracer(opts)
def register_tracer(:plug, opts), do: MvOpentelemetry.Plug.register_tracer(opts)
def register_tracer(:live_view, opts), do: MvOpentelemetry.LiveView.register_tracer(opts)
end
|
lib/mv_opentelemetry.ex
| 0.909506 | 0.811041 |
mv_opentelemetry.ex
|
starcoder
|
defmodule FlubGw do
@moduledoc """
FlubGw: a gateway system for distributing Flub outside of a node network.
# Some definitions
**Channel**
A channel is as defined for `Flub.sub`.
**Route**
A route is a connection between two endpoints. A route is created with a
channel to subscribe to on the local end of the route. Flub messages
received on the local end are sent through the route to the remote end and
re-published there via `Flub.pub`.
**Gateway**
A gateway is a server that accepts messages from a `route` and re-publishes
then remotely
"""
require Logger
@type tcp_route :: {:tcp, dest_host: String.t, dest_port: non_neg_integer()}
@type http_route :: {:http, dest_host: String.t, dest_port: non_neg_integer()}
@type route :: tcp_route() | http_route()
@type tcp_gateway :: {:tcp, local_host: String.t, local_port: non_neg_integer()}
@type gateway :: tcp_gateway()
@spec add_direct_route(route(), atom(), [route_opts: list(), sub_opts: list()]) :: :ok | {:error, any()}
@doc """
Adds a route for flub messages on `channel` to the remote flub
gateway in `route`.
route_opts:
1. sub_to_status: True to Flub.sub to route status reports. You will receive
reports like this: `%Flub.Message{data: %{route: route(), status: :up | :down}, channel: :flubgw_route_status}`
2. autoremove: True to automatically remove this route when the calling pid
dies. False keeps the route around until it is manually removed. Do not set
autoremove to true and then call `remove_direct_route` or
`eliminate_direct_route`; doing so will cause a double-removal situation.
Defaults to true.
sub_opts:
* See `Flub.sub` documentation.
Returns `:ok` on success, `{:error, reason}` on failure.
"""
def add_direct_route(route, channel, [route_opts: route_opts, sub_opts: _sub_opts] = opts) do
# sub to status if requested
if(Keyword.get(route_opts, :sub_to_status, false)) do
Flub.sub(:flubgw_route_status)
end
case FlubGw.Route.Manager.add_direct_route(route, channel, opts) do
:ok ->
# do ghoul outside of route manager for ghoul safety
if(Keyword.get(route_opts, :autoremove, true)) do
Ghoul.summon({route, channel}, on_death: fn({route, channel}, _reason, _ghoul_state) ->
remove_direct_route(route, channel)
end)
end
:ok
{:error, reason} ->
# unsub to status if requested
if(Keyword.get(route_opts, :sub_to_status, false)) do
Flub.unsub(:flubgw_route_status)
end
{:error, reason}
end
end
@spec remove_direct_route(route(), atom()) :: :ok | {:error, any()}
@doc """
Removes a route added by `add_direct_route`. Removing a direct route is only
required if gateway routing is transient. That is: there is no harm in adding
a route that you intend to use forever and not bothering to ever remove it.
Returns `:ok` on success, `{:error, reason}` on failure.
"""
def remove_direct_route(route, channel), do: FlubGw.Route.Manager.remove_direct_route(route, channel)
@spec eliminate_direct_route(route(), atom()) :: :ok | {:error, any()}
@doc """
Eliminates a route added by one or more calls to `add_direct_route`. Calling
this will force the route's reference count to zero and thereby remove it
completely.
Returns `:ok`.
"""
def eliminate_direct_route(route, channel), do: FlubGw.Route.Manager.eliminate_direct_route(route, channel)
@spec start_gateway(gateway()) :: :ok | {:error, any()}
@doc """
Start a gateway server for `gateway`. The server will accept incoming route
connections and republish any messages received.
Returns `:ok` on success, `{:error, reason}` on failure.
"""
def start_gateway({:tcp, local_host: addr, local_port: port} = _gateway) do
case FlubGw.TcpGateway.Listener.Worker.Supervisor.start_child(addr, port) do
{:ok, _} -> :ok
err -> {:error, err}
end
end
@spec stop_gateway(gateway()) :: :ok | {:error, any()}
@doc """
Stop a gateway server for `gateway`.
Returns `:ok` on success, `{:error, reason}` on failure.
"""
def stop_gateway({:tcp, local_host: addr, local_port: port} = _gateway) do
case FlubGw.TcpGateway.Listener.Worker.Supervisor.stop_child(addr, port) do
:ok -> :ok
err -> {:error, err}
end
end
end
|
lib/flub_gw.ex
| 0.869784 | 0.423875 |
flub_gw.ex
|
starcoder
|
defmodule Logger.Backends.Gelf do
@moduledoc """
GELF Logger Backend
A logger backend that will generate Graylog Extended Log Format messages. The
current version only supports UDP messages.
## Configuration
In the config.exs, add gelf_logger as a backend like this:
```
config :logger,
backends: [:console, {Logger.Backends.Gelf, :gelf_logger}]
```
In addition, you'll need to pass in some configuration items to the backend
itself:
```
config :logger, :gelf_logger,
host: "127.0.0.1",
port: 12201,
format: "$message",
application: "myapp",
compression: :gzip, # Defaults to :gzip, also accepts :zlib or :raw
metadata: [:request_id, :function, :module, :file, :line],
hostname: "hostname-override",
json_encoder: Poison,
tags: [
list: "of",
extra: "tags"
]
```
In addition, if you want to use your custom metadata formatter as a "callback",
you'll need to add below configuration entry:
```
format: {Module, :function}
```
Please bear in mind that your formating function MUST return a tuple in following
format: `{level, message, timestamp, metadata}`
In addition to the backend configuration, you might want to check the
[Logger configuration](https://hexdocs.pm/logger/Logger.html) for other
options that might be important for your particular environment. In
particular, modifying the `:utc_log` setting might be necessary
depending on your server configuration.
This backend supports `metadata: :all`.
### Note on the JSON encoder:
Currently, the logger defaults to Poison but it can be switched out for any
module that has an encode!/1 function.
## Usage
Just use Logger as normal.
## Improvements
- [x] Tests
- [ ] TCP Support
- [x] Options for compression (none, zlib)
- [x] Send timestamp instead of relying on the Graylog server to set it
- [x] Find a better way of pulling the hostname
And probably many more. This is only out here because it might be useful to
someone in its current state. Pull requests are always welcome.
## Notes
Credit where credit is due, this would not exist without
[protofy/erl_graylog_sender](https://github.com/protofy/erl_graylog_sender).
"""
@behaviour :gen_event
def init({_module, name}) do
if user = Process.whereis(:user) do
Process.group_leader(self(), user)
{:ok, GelfLogger.Config.configure(name, [])}
else
{:error, :ignore}
end
end
def handle_call({:configure, options}, state) do
if state.socket do
:gen_udp.close(state.socket)
end
{:ok, :ok, GelfLogger.Config.configure(state[:name], options)}
end
def handle_event({_level, gl, _event}, state) when node(gl) != node() do
{:ok, state}
end
def handle_event({level, _gl, {Logger, msg, ts, md}}, %{level: min_level} = state) do
if is_nil(min_level) or Logger.compare_levels(level, min_level) != :lt do
GelfLogger.Worker.handle_cast([level, msg, ts, md], state)
end
{:ok, state}
end
def handle_event(:flush, state) do
{:ok, state}
end
def handle_info({:io_reply, ref, :ok}, %{ref: ref} = state) do
Process.demonitor(ref, [:flush])
{:ok, state}
end
def handle_info({:io_reply, _ref, {:error, error}}, _state) do
raise "failure while logging gelf messages: " <> inspect(error)
end
def handle_info({:DOWN, ref, _, pid, reason}, %{ref: ref}) do
raise "device #{inspect(pid)} exited: " <> Exception.format_exit(reason)
end
def handle_info(_, state) do
{:ok, state}
end
def code_change(_old_vsn, state, _extra) do
{:ok, state}
end
def terminate(_reason, _state) do
:ok
end
end
|
lib/logger/backends/gelf.ex
| 0.705481 | 0.816845 |
gelf.ex
|
starcoder
|
defmodule Cldr.Number.Parser do
@moduledoc """
Functions for parsing numbers and currencies from
a string.
"""
@type per :: :percent | :permille
@number_format "[-+]?[0-9]([0-9_]|[,](?=[0-9]))*(\\.?[0-9_]+([eE][-+]?[0-9]+)?)?"
@doc """
Scans a string in a locale-aware manner and returns
a list of strings and numbers.
## Arguments
* `string` is any `String.t`
* `options` is a keyword list of options
## Options
* `:number` is one of `:integer`, `:float`,
`:decimal` or `nil`. The default is `nil`
meaning that the type auto-detected as either
an `integer` or a `float`.
* `:backend` is any module that includes `use Cldr`
and is therefore a CLDR backend module. The default
is `Cldr.default_backend!/0`.
* `:locale` is any locale returned by `Cldr.known_locale_names/1`
or a `t:Cldr.LanguageTag`. The default is `options[:backend].get_locale/1`.
## Returns
* A list of strings and numbers
## Notes
Number parsing is performed by `Cldr.Number.Parser.parse/2`
and any options provided are passed to that function.
## Examples
iex> Cldr.Number.Parser.scan("£1_000_000.34")
["£", 1000000.34]
iex> Cldr.Number.Parser.scan("I want £1_000_000 dollars")
["I want £", 1000000, " dollars"]
iex> Cldr.Number.Parser.scan("The prize is 23")
["The prize is ", 23]
iex> Cldr.Number.Parser.scan("The lottery number is 23 for the next draw")
["The lottery number is ", 23, " for the next draw"]
iex> Cldr.Number.Parser.scan("The loss is -1.000 euros", locale: "de", number: :integer)
["The loss is ", -1000, " euros"]
iex> Cldr.Number.Parser.scan "1kg"
[1, "kg"]
iex> Cldr.Number.Parser.scan "A number is the arab script ١٢٣٤٥", locale: "ar"
["A number is the arab script ", 12345]
"""
@spec scan(String.t(), Keyword.t()) ::
list(String.t() | integer() | float() | Decimal.t()) |
{:error, {module(), String.t()}}
def scan(string, options \\ []) do
{locale, backend} = Cldr.locale_and_backend_from(options)
with {:ok, locale} <- Cldr.validate_locale(locale, backend),
{:ok, symbols} <- Cldr.Number.Symbol.number_symbols_for(locale, backend),
{:ok, number_system} <- digits_number_system_from(locale) do
symbols =
symbols_for_number_system(symbols, number_system)
scanner =
@number_format
|> localize_format_string(locale, backend, symbols)
|> Regex.compile!([:unicode])
normalized_string =
transliterate(string, number_system, :latn, backend)
scanner
|> Regex.split(normalized_string, include_captures: true, trim: true)
|> Enum.map(&parse_element(&1, options))
end
end
defp parse_element(element, options) do
case parse(element, options) do
{:ok, number} -> number
{:error, _} -> element
end
end
@doc """
Parse a string in a locale-aware manner and return
a number.
## Arguments
* `string` is any `t:String`
* `options` is a keyword list of options
## Options
* `:number` is one of `:integer`, `:float`,
`:decimal` or `nil`. The default is `nil`
meaning that the type auto-detected as either
an `integer` or a `float`.
* `:backend` is any module that includes `use Cldr`
and is therefore a CLDR backend module. The default
is `Cldr.default_backend/0`.
* `:locale` is any locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag.t`. The default is `options[:backend].get_locale/1`.
## Returns
* A number of the requested or default type or
* `{:error, {exception, message}}` if no number could be determined
## Notes
This function parses a string to return a number but
in a locale-aware manner. It will normalise digits,
grouping characters and decimal separators.
It will transliterate digits that are in the
number system of the specific locale. For example, if
the locale is `th` (Thailand), then Thai digits are
transliterated to the Latin script before parsing.
Some number systems do not have decimal digits and in this
case an error will be returned, rather than continue
parsing and return misleading results.
It also caters for different forms of
the `+` and `-` symbols that appear in Unicode and
strips any `_` characters that might be used for
formatting in a string.
It then parses the number using the Elixir standard
library functions.
If the option `:number` is used and the parsed number
cannot be coerced to this type without losing precision
then an error is returned.
## Examples
iex> Cldr.Number.Parser.parse("+1.000,34", locale: "de")
{:ok, 1000.34}
iex> Cldr.Number.Parser.parse("-1_000_000.34")
{:ok, -1000000.34}
iex> Cldr.Number.Parser.parse("1.000", locale: "de", number: :integer)
{:ok, 1000}
iex> Cldr.Number.Parser.parse "١٢٣٤٥", locale: "ar"
{:ok, 12345}
# 1_000.34 cannot be coerced into an integer
# without precision loss so an error is returned.
iex> Cldr.Number.Parser.parse("+1.000,34", locale: "de", number: :integer)
{:error,
{Cldr.Number.ParseError,
"The string \\"+1.000,34\\" could not be parsed as a number"}}
iex> Cldr.Number.Parser.parse "一万二千三百四十五", locale: "ja-u-nu-jpan"
{:error,
{Cldr.UnknownNumberSystemError,
"The number system :jpan is not known or does not have digits"}}
"""
@spec parse(String.t(), Keyword.t()) ::
{:ok, integer() | float() | Decimal.t()} |
{:error, {module(), String.t()}}
def parse(string, options \\ []) when is_binary(string) and is_list(options) do
{locale, backend} = Cldr.locale_and_backend_from(options)
with {:ok, locale} <- Cldr.validate_locale(locale, backend),
{:ok, symbols} <- Cldr.Number.Symbol.number_symbols_for(locale, backend),
{:ok, number_system} <- digits_number_system_from(locale) do
symbols =
symbols_for_number_system(symbols, number_system)
normalized_string =
string
|> transliterate(number_system, :latn, backend)
|> normalize_number_string(locale, backend, symbols)
|> String.trim()
case parse_number(normalized_string, Keyword.get(options, :number)) do
{:error, _} -> {:error, parse_error(string)}
success -> success
end
end
end
defp parse_number(string, nil) do
with {:error, string} <- parse_number(string, :integer),
{:error, string} <- parse_number(string, :float) do
{:error, string}
end
end
defp parse_number(string, :integer) do
case Integer.parse(string) do
{integer, ""} -> {:ok, integer}
_other -> {:error, string}
end
end
defp parse_number(string, :float) do
case Float.parse(string) do
{float, ""} -> {:ok, float}
_other -> {:error, string}
end
end
defp parse_number(string, :decimal) do
case Cldr.Decimal.parse(string) do
{:error, ""} -> {:error, string}
{decimal, ""} -> {:ok, decimal}
_other -> {:error, string}
end
end
@doc """
Resolve curencies from strings within
a list.
Currencies can be identified at the
beginning and/or the end of a string.
## Arguments
* `list` is any list in which currency
names and symbols are expected
* `options` is a keyword list of options
## Options
* `:backend` is any module() that includes `use Cldr` and therefore
is a `Cldr` backend module(). The default is `Cldr.default_backend!/0`
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `t:Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `options[:backend].get_locale()`
* `:only` is an `atom` or list of `atoms` representing the
currencies or currency types to be considered for a match.
The equates to a list of acceptable currencies for parsing.
See the notes below for currency types.
* `:except` is an `atom` or list of `atoms` representing the
currencies or currency types to be not considered for a match.
This equates to a list of unacceptable currencies for parsing.
See the notes below for currency types.
* `:fuzzy` is a float greater than `0.0` and less than or
equal to `1.0` which is used as input to
`String.jaro_distance/2` to determine is the provided
currency string is *close enough* to a known currency
string for it to identify definitively a currency code.
It is recommended to use numbers greater than `0.8` in
order to reduce false positives.
## Returns
* An ISO4217 currency code as an atom or
* `{:error, {exception, message}}`
## Notes
The `:only` and `:except` options accept a list of
currency codes and/or currency types. The following
types are recognised.
If both `:only` and `:except` are specified,
the `:except` entries take priority - that means
any entries in `:except` are removed from the `:only`
entries.
* `:all`, the default, considers all currencies
* `:current` considers those currencies that have a `:to`
date of nil and which also is a known ISO4217 currency
* `:historic` is the opposite of `:current`
* `:tender` considers currencies that are legal tender
* `:unannotated` considers currencies that don't have
"(some string)" in their names. These are usually
financial instruments.
## Examples
iex> Cldr.Number.Parser.scan("100 US dollars")
...> |> Cldr.Number.Parser.resolve_currencies
[100, :USD]
iex> Cldr.Number.Parser.scan("100 eurosports")
...> |> Cldr.Number.Parser.resolve_currencies(fuzzy: 0.8)
[100, :EUR]
iex> Cldr.Number.Parser.scan("100 dollars des États-Unis")
...> |> Cldr.Number.Parser.resolve_currencies(locale: "fr")
[100, :USD]
"""
@spec resolve_currencies([String.t(), ...], Keyword.t()) ::
list(Cldr.Currency.code() | String.t())
def resolve_currencies(list, options \\ []) when is_list(list) and is_list(options) do
resolve(list, &resolve_currency/2, options)
end
@doc """
Resolve and tokenize percent and permille
sybols from strings within a list.
Percent and permille symbols can be identified
at the beginning and/or the end of a string.
## Arguments
* `list` is any list in which percent and
permille symbols are expected
* `options` is a keyword list of options
## Options
* `:backend` is any module() that includes `use Cldr` and therefore
is a `Cldr` backend module(). The default is `Cldr.default_backend!/0`
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `t:Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `options[:backend].get_locale()`
## Examples
iex> Cldr.Number.Parser.scan("100%")
...> |> Cldr.Number.Parser.resolve_pers()
[100, :percent]
"""
@doc since: "2.21.0"
@spec resolve_pers([String.t(), ...], Keyword.t()) ::
list(per() | String.t())
def resolve_pers(list, options \\ []) when is_list(list) and is_list(options) do
resolve(list, &resolve_per/2, options)
end
@doc """
Maps a list of terms (usually strings and atoms)
calling a resolver function that operates
on each binary term.
If the resolver function returns `{:error, term}`
then no change is made to the term, otherwise
the return value of the resolver replaces the
original term.
## Arguments
* `list` is a list of terms. Typically this is the
result of calling `Cldr.Number.Parser.scan/1`.
* `resolver` is a function that takes two
arguments. The first is one of the terms
in the `list`. The second is `options`.
* `options` is a keyword list of options
that is passed to the resolver function.
## Note
* The resolver is called only on binary
elements of the list.
## Returns
* `list` as modified through the application
of the resolver function on each bianry term.
## Examples
See `Cldr.Number.Parser.resolve_currencies/2` and
`Cldr.Number.Parser.resolve_pers/2` which both
use this function.
"""
@spec resolve(list(any()), fun(), Keyword.t()) :: list()
def resolve(list, resolver, options) do
Enum.map(list, fn
string when is_binary(string) ->
case resolver.(string, options) do
{:error, _} -> string
other -> other
end
other -> other
end)
|> List.flatten()
end
@doc false
defguard is_token(arg) when is_atom(arg) or is_number(arg)
@doc """
Removes any whitespace strings from between
tokens in a list.
Tokens are numbers or atoms.
"""
@whitespace ~r/^\s*$/u
def remove_whitespace_between_tokens([first, second, third | rest])
when is_token(first) and is_token(third) do
if String.match?(second, @whitespace) do
[first | remove_whitespace_between_tokens([third | rest])]
else
[first | remove_whitespace_between_tokens([second, third | rest])]
end
end
def remove_whitespace_between_tokens([first | rest]) do
[first | remove_whitespace_between_tokens(rest)]
end
def remove_whitespace_between_tokens(first) do
first
end
@doc """
Resolve a currency from the beginning
and/or the end of a string
## Arguments
* `list` is any list in which currency
names and symbols are expected
* `options` is a keyword list of options
## Options
* `:backend` is any module() that includes `use Cldr` and therefore
is a `Cldr` backend module(). The default is `Cldr.default_backend!/0`
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `options[:backend].get_locale()`
* `:only` is an `atom` or list of `atoms` representing the
currencies or currency types to be considered for a match.
The equates to a list of acceptable currencies for parsing.
See the notes below for currency types.
* `:except` is an `atom` or list of `atoms` representing the
currencies or currency types to be not considered for a match.
This equates to a list of unacceptable currencies for parsing.
See the notes below for currency types.
* `:fuzzy` is a float greater than `0.0` and less than or
equal to `1.0` which is used as input to
`String.jaro_distance/2` to determine is the provided
currency string is *close enough* to a known currency
string for it to identify definitively a currency code.
It is recommended to use numbers greater than `0.8` in
order to reduce false positives.
## Returns
* An ISO417 currency code as an atom or
* `{:error, {exception, message}}`
## Notes
The `:only` and `:except` options accept a list of
currency codes and/or currency types. The following
types are recognised.
If both `:only` and `:except` are specified,
the `:except` entries take priority - that means
any entries in `:except` are removed from the `:only`
entries.
* `:all`, the default, considers all currencies
* `:current` considers those currencies that have a `:to`
date of nil and which also is a known ISO4217 currency
* `:historic` is the opposite of `:current`
* `:tender` considers currencies that are legal tender
* `:unannotated` considers currencies that don't have
"(some string)" in their names. These are usually
financial instruments.
## Examples
iex> Cldr.Number.Parser.resolve_currency("US dollars")
[:USD]
iex> Cldr.Number.Parser.resolve_currency("100 eurosports", fuzzy: 0.75)
[:EUR]
iex> Cldr.Number.Parser.resolve_currency("dollars des États-Unis", locale: "fr")
[:USD]
iex> Cldr.Number.Parser.resolve_currency("not a known currency", locale: "fr")
{:error,
{Cldr.UnknownCurrencyError,
"The currency \\"not a known currency\\" is unknown or not supported"}}
"""
@spec resolve_currency(String.t(), Keyword.t()) ::
Cldr.Currency.code() | list(Cldr.Currency.code() | String.t()) |
{:error, {module(), String.t()}}
def resolve_currency(string, options \\ []) when is_binary(string) do
{locale, backend} = Cldr.locale_and_backend_from(options)
{only_filter, options} =
Keyword.pop(options, :only, Keyword.get(options, :currency_filter, [:all]))
{except_filter, options} = Keyword.pop(options, :except, [])
{fuzzy, _options} = Keyword.pop(options, :fuzzy, nil)
with {:ok, locale} <- backend.validate_locale(locale),
{:ok, currency_strings} <-
Cldr.Currency.currency_strings(locale, backend, only_filter, except_filter),
{:ok, currency} <- find_and_replace(currency_strings, string, fuzzy) do
currency
else
{:error, {Cldr.Number.ParseError, _}} ->
{:error, unknown_currency_error(string)}
other ->
other
end
end
@doc """
Resolve and tokenize percent or permille
from the beginning and/or the end of a string
## Arguments
* `list` is any list in which percent
and permille symbols are expected
* `options` is a keyword list of options
## Options
* `:backend` is any module() that includes `use Cldr` and therefore
is a `Cldr` backend module(). The default is `Cldr.default_backend!/0`
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `options[:backend].get_locale()`
## Returns
* An `:percent` or `permille` or
* `{:error, {exception, message}}`
## Examples
iex> Cldr.Number.Parser.resolve_per "11%"
["11", :percent]
iex> Cldr.Number.Parser.resolve_per "% of linguists"
[:percent, " of linguists"]
iex> Cldr.Number.Parser.resolve_per "% of linguists %"
[:percent, " of linguists ", :percent]
"""
@doc since: "2.21.0"
@spec resolve_per(String.t(), Keyword.t()) ::
per() | list(per() | String.t()) | {:error, {module(), String.t()}}
def resolve_per(string, options \\ []) when is_binary(string) do
{locale, backend} = Cldr.locale_and_backend_from(options)
{fuzzy, _options} = Keyword.pop(options, :fuzzy, nil)
with {:ok, locale} <- backend.validate_locale(locale),
{:ok, per_strings} <- per_strings(locale, backend),
{:ok, per} <- find_and_replace(per_strings, string, fuzzy) do
per
else
{:error, {Cldr.Number.ParseError, _}} ->
{:error, {Cldr.Number.ParseError, "No percent or permille found"}}
other ->
other
end
end
defp per_strings(locale, backend) do
with {:ok, number_system} <- digits_number_system_from(locale),
{:ok, symbols} <- Cldr.Number.Symbol.number_symbols_for(locale, backend) do
symbols = symbols_for_number_system(symbols, number_system)
parse_map = backend.lenient_parse_map(:general, locale.cldr_locale_name)
{:ok, Map.new(per_map(parse_map, symbols.percent_sign) ++ per_map(parse_map, symbols.per_mille))}
end
end
defp per_map(parse_map, char) do
parse_map
|> Map.fetch!(char)
|> Map.fetch!(:source)
|> String.replace("[", "")
|> String.replace("]", "")
|> String.graphemes()
|> Enum.map(&{&1, :percent})
end
# Replace localised symbols with canonical forms
defp normalize_number_string(string, locale, backend, symbols) do
string
|> String.replace("_", "")
|> backend.normalize_lenient_parse(:number, locale)
|> backend.normalize_lenient_parse(:general, locale)
|> String.replace(symbols.group, "")
|> String.replace(" ", "")
|> String.replace(symbols.decimal, ".")
|> String.replace("_", "-")
end
defp transliterate(string, from, to, backend) do
module = Module.concat(backend, Number.Transliterate)
case module.transliterate_digits(string, from, to) do
{:error, _} -> string
string -> string
end
end
defp digits_number_system_from(locale) do
number_system = Cldr.Number.System.number_system_from_locale(locale)
with {:ok, _digits} <- Cldr.Number.System.number_system_digits(number_system) do
{:ok, number_system}
end
end
defp symbols_for_number_system(symbols, number_system) do
Map.fetch!(symbols, number_system) || Map.fetch!(symbols, :latn)
end
# Replace canonical forms with localised symbols
defp localize_format_string(string, locale, backend, symbols) do
parse_map = backend.lenient_parse_map(:number, locale.cldr_locale_name)
plus_matchers = Map.get(parse_map, "+").source |> String.replace(["[", "]"], "")
minus_matchers = Map.get(parse_map, "_").source |> String.replace(["[", "]"], "")
grouping_matchers = Map.get(parse_map, ",").source |> String.replace(["[", "]"], "")
string
|> String.replace("[-+]", "[" <> plus_matchers <> minus_matchers <> "]")
|> String.replace(",", grouping_matchers <> maybe_add_space(symbols.group))
|> String.replace("\\.", "\\" <> symbols.decimal)
end
# If the grouping symbol is a pop space then
# also allow normal space as a group symbol when parsing
@pop_space " " # 0x202c
@space " " # 0x20
defp maybe_add_space(@pop_space), do: @pop_space <> @space
defp maybe_add_space(other), do: other
@doc """
Find a substring at the beginning and/or end of a
string, and replace it.
Ignore any whitespace found at the start or end of the
string when looking for a match. A match is considered
only if there is no alphabetic character adjacent to
the match.
When multiple matches are found, the longest match
is replaced.
## Arguments
* `string_map` is a map where the keys are the strings
to be matched and the values are the replacement.
* `string` is the string in which the find and replace
operation takes place.
* `fuzzy` is floating point number between 0.0 and 1.0
that is used to implement a fuzzy match using
`String.jaro_distance/2`. The default is `nil` which
means the match is exact at the beginning and/or the
end of the `string`.
## Returns
* `{:ok, list}` where list is `string` broken into the
replacement(s) and the remainder after find and replace. Or
* `{:error, {exception, reason}}` will be returned if
the `fuzzy` parameter is invalid or if no search was found
and no replacement made. In the later case, `exception`
will be `Cldr.Number.ParseError`.
## Examples
iex> Cldr.Number.Parser.find_and_replace(%{"this" => "that"}, "This is a string")
{:ok, ["that", " is a string"]}
iex> Cldr.Number.Parser.find_and_replace(%{"string" => "term"}, "This is a string")
{:ok, ["This is a ", "term"]}
iex> Cldr.Number.Parser.find_and_replace(%{"string" => "term", "this" => "that"}, "This is a string")
{:ok, ["that", " is a ", "term"]}
iex> Cldr.Number.Parser.find_and_replace(%{"unknown" => "term"}, "This is a string")
{:error, {Cldr.Number.ParseError, "No match was found"}}
"""
@doc since: "2.22.0"
@spec find_and_replace(%{binary() => term()}, binary(), float() | nil) ::
{:ok, list()} | {:error, {module(), binary()}}
def find_and_replace(string_map, string, fuzzy \\ nil)
def find_and_replace(string_map, string, nil) when is_map(string_map) and is_binary(string) do
if code = Map.get(string_map, normalize_search_string(string)) do
{:ok, [code]}
else
[starting_code, remainder] = starting_string(string_map, string)
[remainder, ending_code] = ending_string(string_map, remainder)
if starting_code == "" && ending_code == "" do
{:error, {Cldr.Number.ParseError, "No match was found"}}
else
{:ok, Enum.reject([starting_code, remainder, ending_code], &(&1 == ""))}
end
end
end
def find_and_replace(string_map, search, fuzzy)
when is_float(fuzzy) and fuzzy > 0.0 and fuzzy <= 1.0 do
canonical_search = String.downcase(search)
{distance, code} =
string_map
|> Enum.map(fn {k, v} -> {String.jaro_distance(k, canonical_search), v} end)
|> Enum.sort(fn {k1, _v1}, {k2, _v2} -> k1 > k2 end)
|> hd
if distance >= fuzzy do
{:ok, [code]}
else
{:error, {Cldr.Number.ParseError, "No match was found"}}
end
end
def find_and_replace(_currency_strings, _currency, fuzzy) do
{:error,
{
ArgumentError,
"option :fuzzy must be a number > 0.0 and <= 1.0. Found #{inspect(fuzzy)}"
}}
end
defp starting_string(string_map, search) do
[whitespace, trimmed] =
search
|> String.downcase()
|> String.split(~r/^\s*/, parts: 2, include_captures: true, trim: true)
case starts_with(string_map, trimmed) do
[] ->
["", search]
list ->
{string, match_length, code} = longest_match(list)
[_, remainder] = String.split(trimmed, string, parts: 2)
if String.match?(remainder, ~r/^[[:alpha:]]/u) do
["", search]
else
match_length = match_length + :erlang.byte_size(whitespace)
<< _ :: binary-size(match_length), remainder :: binary>> = search
[code, remainder]
end
end
end
defp ending_string(string_map, search) do
trimmed =
search
|> String.downcase()
|> String.trim_trailing()
case ends_with(string_map, trimmed) do
[] ->
[search, ""]
list ->
{string, match_length, code} = longest_match(list)
[remainder, _] = String.split(trimmed, string, parts: 2)
if String.match?(remainder, ~r/[[:alpha:]]$/u) do
[search, ""]
else
match = :erlang.byte_size(trimmed) - match_length
<< remainder :: binary-size(match), _rest :: binary>> = search
[remainder, code]
end
end
end
defp normalize_search_string(string) do
string
|> String.downcase()
|> String.trim()
end
defp starts_with(strings, search) do
Enum.filter(strings, &String.starts_with?(search, elem(&1, 0)))
end
defp ends_with(strings, search) do
Enum.filter(strings, &String.ends_with?(search, elem(&1, 0)))
end
defp longest_match(matches) do
{match, code} =
matches
|> Enum.sort(fn a, b -> String.length(elem(a, 0)) > String.length(elem(b, 0)) end)
|> hd
{match, :erlang.byte_size(match), code}
end
defp unknown_currency_error(currency) do
{Cldr.UnknownCurrencyError, "The currency #{inspect(currency)} is unknown or not supported"}
end
defp parse_error(string) do
{Cldr.Number.ParseError, "The string #{inspect string} could not be parsed as a number"}
end
end
|
lib/cldr/number/parse.ex
| 0.905317 | 0.699729 |
parse.ex
|
starcoder
|
defmodule Rolodex.Mocks.User do
use Rolodex.Schema
@configs [
private: :boolean,
archived: :boolean,
active: :boolean
]
schema "User", desc: "A user record" do
field(:id, :uuid, desc: "The id of the user", required: true)
field(:email, :string, desc: "The email of the user", required: true)
# Nested object
field(:comment, Rolodex.Mocks.Comment)
# Nested schema with a cyclical dependency
field(:parent, Rolodex.Mocks.Parent)
# List of one type
field(:comments, :list, of: [Rolodex.Mocks.Comment])
# Can use the list shorthand
field(:short_comments, [Rolodex.Mocks.Comment])
# List of multiple types
field(:comments_of_many_types, :list,
of: [:string, Rolodex.Mocks.Comment],
desc: "List of text or comment"
)
# A field with multiple possible types
field(:multi, :one_of, of: [:string, Rolodex.Mocks.NotFound])
# Can use a for comprehension to define many fields
for {name, type} <- @configs, do: field(name, type)
end
end
defmodule Rolodex.Mocks.Parent do
use Rolodex.Schema
schema "Parent" do
field(:child, Rolodex.Mocks.User)
end
end
defmodule Rolodex.Mocks.Comment do
use Rolodex.Schema
schema "Comment", desc: "A comment record" do
field(:id, :uuid, desc: "The comment id")
field(:text, :string)
end
end
defmodule Rolodex.Mocks.NotFound do
use Rolodex.Schema
schema "NotFound", desc: "Not found response" do
field(:message, :string)
end
end
defmodule Rolodex.Mocks.NestedDemo do
use Rolodex.Schema
schema "NestedDemo" do
field(:nested, Rolodex.Mocks.FirstNested)
end
end
defmodule Rolodex.Mocks.FirstNested do
use Rolodex.Schema
schema "FirstNested" do
field(:nested, Rolodex.Mocks.SecondNested)
end
end
defmodule Rolodex.Mocks.SecondNested do
use Rolodex.Schema
schema "SecondNested" do
field(:id, :uuid)
end
end
defmodule Rolodex.Mocks.WithPartials do
use Rolodex.Schema
schema "WithPartials" do
field(:created_at, :datetime)
partial(Rolodex.Mocks.Comment)
partial(mentions: [:uuid])
end
end
defmodule Rolodex.Mocks.ParamsSchema do
use Rolodex.Schema
alias Rolodex.Mocks.WithPartials
schema "ParamsSchema" do
field(:account_id, :uuid)
field(:team_id, :integer,
maximum: 10,
minimum: 0,
required: true,
default: 2
)
partial(WithPartials)
end
end
|
test/support/mocks/schemas.ex
| 0.707101 | 0.405684 |
schemas.ex
|
starcoder
|
defmodule Flippant do
@moduledoc """
Feature toggling for Elixir applications.
Flippant defines features in terms of `actors`, `groups`, and `rules`:
* **Actors** - Typically an actor is a `%User{}` or some other persistent
struct that identifies who is using your application.
* **Groups** - Groups identify and qualify actors. For example, the `admins`
group would identify actors that are admins, while `beta-testers` may
identify a few actors that are testing a feature. It is entirely up to you
to define groups in your application.
* **Rules** - Rules bind groups with individual features. These are evaluated
against actors to see if a feature should be enabled.
Let's walk through setting up a few groups and rules.
### Groups
First, a group that nobody can belong to. This is useful for disabling a
feature without deleting it. Groups are registered with a `name` and an
evalutation `function`. In this case the name of our group is "nobody",
and the function always returns `false`:
Flippant.register("nobody", fn(_actor, _values) -> false end)
Now the opposite, a group that everybody can belong to:
Flippant.register("everybody", fn(_actor, _values) -> true end)
To be more specific and define staff only features we define a "staff" group:
Flippant.register("staff", fn
%User{staff?: staff?}, _values -> staff?
end)
Lastly, we'll roll out a feature out to a percentage of the actors. It
expects a list of integers between `1` and `10`. If the user's id modulo `10`
is in the list, then the feature is enabled:
Flippant.register("adopters", fn
_actor, [] -> false
%User{id: id}, samples -> rem(id, 10) in samples
end)
With some core groups defined we can set up some rules now.
### Rules
Rules are comprised of a name, a group, and an optional set of values. Starting
with a simple example that builds on the groups we have already created, we'll
enable the "search" feature:
# Any staff can use the "search" feature
Flippant.enable("search", "staff")
# 30% of "adopters" can use the "search" feature as well
Flippant.enable("search", "adopters", [0, 1, 2])
Because rules are built of binaries and simple data they can be defined or
refined at runtime. In fact, this is a crucial part of feature toggling.
Rules can be added, removed or modified at runtime.
# Turn search off for adopters
Flippant.disable("search", "adopters")
# On second thought, enable it again for 10% of users
Flippant.enable("search", "adopters", [3])
With a set of groups and rules defined we can check whether a feature is
enabled for a particular actor:
staff_user = %User{id: 1, staff?: true}
early_user = %User{id: 2, staff?: false}
later_user = %User{id: 3, staff?: false}
Flippant.enabled?("search", staff_user) #=> true, staff
Flippant.enabled?("search", early_user) #=> false, not an adopter
Flippant.enabled?("search", later_user) #=> true, is an adopter
If an actor qualifies for multiple groups and *any* of the rules evaluate to
true that feature will be enabled for them. Think of the "nobody" and
"everybody" groups that were defined earlier:
Flippant.enable("search", "everybody")
Flippant.enable("search", "nobody")
Flippant.enabled?("search", %User{}) #=> true
### Breakdown
Evaluating rules requires a round trip to the database. Clearly, with a lot
of rules it is inefficient to evaluate each one individually. The
`breakdown/1` function helps with this scenario:
Flippant.enable("search", "staff")
Flippant.enable("delete", "everybody")
Flippant.enable("invite", "nobody")
Flippant.breakdown(%User{id: 1, staff?: true})
#=> %{"search" => true, "delete" => true, "invite" => false}
The breakdown is a simple map of binary keys to boolean values. This is
particularly useful for single page applications where you can serialize the
breakdown on boot or send it back from an endpoint as JSON.
### Adapters
Feature rules are stored in adapters. Flippant comes with a few base adapters:
* `Flippant.Adapters.Memory` - An in-memory adapter, ideal for testing (see below).
* `Flippant.Adapters.Postgres` - A postgrex powered PostgreSQL adapter.
* `Flippant.Adapters.Redis` - A redix powered Redis adapter.
For adapter specific options reference the `start_link/1` function of each.
Some adapters, notably the `Postgres` adapter, may require setup before they
can be used. To simplify the setup process you can run `Flippant.setup()`, or
see the adapters documentation for migration details.
### Testing
Testing is simplest with the `Memory` adapter. Within `config/test.exs` override
the `:adapter`:
config :flippant, adapter: Flippant.Adapters.Memory
The memory adapter will be cleared whenever the application is restarted, or
it can be cleared between test runs using `Flippant.clear(:features)`.
### Defining Groups on Application Start
Group definitions are stored in a process, which requires the Flippant
application to be started. That means they can't be defined within a
configuration file and should instead be linked from `Application.start/2`.
You can make `Flippant.register/2` calls directly from the application
module, or put them into a separate module and start it as a temporary
worker. Here we're starting a temporary worker with the rest of an
application:
defmodule MyApp do
use Application
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
worker(MyApp.Flippant, [], restart: :temporary)
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
end
Note that the worker is defined with `restart: :temporary`. Now, define the
`MyApp.Flippant` module:
defmodule MyApp.Flippant do
def start_link do
Flippant.register("everybody", &everybody?/2)
Flippant.register("nobody", &nobody?/2)
Flippant.register("staff", &staff?/2)
:ignore
end
def everybody?(_, _), do: true
def nobody?(_, _), do: false
def staff?(%User{staff?: staff?}, _), do: staff?
end
### Backups and Portability
The `dump/1` and `load/1` functions are handy for storing feature backups on
disk. The backup may be used to transfer features between database servers,
or even between adapters. For example, if you've decided to move away from
using Redis and would like to switch to Postgres instead, you could transfer
the data with a few commands:
# Dump from the Redis instance
Flippant.dump("flippant.dump")
# Restart the application
Application.stop(:flippant)
Application.put_env(:flippant, :adapter, Flippant.Adapter.Postgres)
Application.ensure_started(:flippant)
# Load to the postgres instance
Flippant.load("flippant.dump")
"""
use Supervisor
alias Flippant.Config
@doc """
Start a Flippant process linked to the current process.
"""
@spec start_link([{:name, module()}]) :: Supervisor.on_start()
def start_link(opts \\ []) do
opts = Keyword.put_new(opts, :name, __MODULE__)
conf = Config.new(opts)
:ok = Config.put(opts[:name], conf)
Supervisor.start_link(__MODULE__, conf, name: opts[:name])
end
@impl true
def init(conf) do
Supervisor.init([{conf.adapter, conf.adapter_opts}], strategy: :one_for_one)
end
# Adapter
@doc """
Retrieve the `pid` of the configured adapter process.
This will return `nil` if the adapter hasn't been started.
"""
@spec adapter(name :: atom()) :: pid | nil
def adapter(name \\ __MODULE__) do
%{adapter: adapter} = Config.get(name)
Process.whereis(adapter)
end
@doc """
Add a new feature without any rules.
Adding a feature does not enable it for any groups, that can be done using
`enable/2` or `enable/3`.
## Examples
Flippant.add("search")
#=> :ok
"""
@spec add(name :: atom(), feature :: binary()) :: :ok
def add(name \\ __MODULE__, feature) when is_binary(feature) do
name
|> adapter()
|> GenServer.cast({:add, normalize(feature)})
end
@doc """
Generate a mapping of all features and associated rules.
Breakdown without any arguments defaults to `:all`, and will list all
registered features along with their group and value metadata. It is the only
way to retrieve a snapshot of all the features in the system. The operation
is optimized for round-trip efficiency.
Alternatively, breakdown takes a single `actor` argument, typically a
`%User{}` struct or some other entity. It generates a map outlining which
features are enabled for the actor.
## Examples
Assuming the groups `awesome`, `heinous`, and `radical`, and the features
`search`, `delete` and `invite` are enabled, the breakdown would look like:
Flippant.breakdown()
#=> %{"search" => %{"awesome" => [], "heinous" => []},
"delete" => %{"radical" => []},
"invite" => %{"heinous" => []}}
Getting the breakdown for a particular actor:
actor = %User{ id: 1, awesome?: true, radical?: false}
Flippant.breakdown(actor)
#=> %{"delete" => true, "search" => false}
"""
@spec breakdown(actor :: map | struct | :all) :: map
def breakdown(actor \\ :all), do: breakdown(__MODULE__, actor)
@spec breakdown(name :: atom(), actor :: map | struct | :all) :: map
def breakdown(name, actor) do
name
|> adapter()
|> GenServer.call({:breakdown, actor})
end
@doc """
Purge registered features.
This is particularly useful in testing when you want to reset to a clean
slate after a test.
## Examples
Flippant.clear()
#=> :ok
"""
@spec clear(name :: atom()) :: :ok
def clear(name \\ __MODULE__) do
name
|> adapter()
|> GenServer.cast(:clear)
end
@doc """
Disable a feature for a particular group.
The feature is kept, but any rules for that group are removed.
## Examples
Disable the `search` feature for the `adopters` group:
Flippant.disable("search", "adopters")
#=> :ok
Alternatively, individual values may be disabled for a group. This is useful
when a group should stay enabled and only a single value (i.e. user id) needs
to be removed.
Disable `search` feature for a user in the `adopters` group:
Flippant.disable("search", "adopters", [123])
#=> :ok
"""
@spec disable(binary, binary) :: :ok
def disable(feature, group, values \\ [])
when is_binary(feature) and is_binary(group) and is_list(values) do
disable(__MODULE__, feature, group, values)
end
@spec disable(name :: atom(), binary, binary) :: :ok
def disable(name, feature, group, values)
when is_binary(feature) and is_binary(group) and is_list(values) do
name
|> adapter()
|> GenServer.cast({:remove, normalize(feature), group, values})
end
@doc """
Dump the full feature breakdown to a file.
The `dump/1` command aggregates all features using `breakdown/0`, encodes
them as json, and writes the result to a file on disk.
Dumps are portable between adapters, so a dump may be subsequently used to
load the data into another adapter.
## Examples
Dump a daily backup:
Flippant.dump((Date.utc_today() |> Date.to_string()) <> ".dump")
#=> :ok
"""
@spec dump(name :: atom(), binary()) :: :ok | {:error, File.posix()}
def dump(name \\ __MODULE__, path) when is_binary(path) do
dumped =
name
|> adapter()
|> GenServer.call({:breakdown, :all})
|> Jason.encode!()
File.write(path, dumped)
end
@doc """
Enable a feature for a particular group.
Features can be enabled for a group along with a set of values. The values
will be passed along to the group's registered function when determining
whether a feature is enabled for a particular actor.
Values are useful when limiting a feature to a subset of actors by `id` or
some other distinguishing factor. Value serialization can be customized by
using an alternate module implementing the `Flippant.Serializer` behaviour.
## Examples
Enable the `search` feature for the `radical` group, without any specific
values:
Flippant.enable("search", "radical")
#=> :ok
Assuming the group `awesome` checks whether an actor's id is in the list of
values, you would enable the `search` feature for actors 1, 2 and 3 like
this:
Flippant.enable("search", "awesome", [1, 2, 3])
#=> :ok
"""
@spec enable(binary, binary, list(any)) :: :ok
def enable(feature, group, values \\ []) do
enable(__MODULE__, feature, group, values)
end
@spec enable(name :: atom(), binary, binary, list(any)) :: :ok
def enable(name, feature, group, values)
when is_binary(feature) and is_binary(group) do
name
|> adapter()
|> GenServer.cast({:add, normalize(feature), {group, values}})
end
@doc """
Check if a particular feature is enabled for an actor.
If the actor belongs to any groups that have access to the feature then it
will be enabled.
## Examples
Flippant.enabled?("search", actor)
#=> false
"""
@spec enabled?(name :: atom(), binary, map | struct) :: boolean
def enabled?(name \\ __MODULE__, feature, actor) when is_binary(feature) do
name
|> adapter()
|> GenServer.call({:enabled?, normalize(feature), actor})
end
@doc """
Check whether a given feature has been registered.
If a `group` is provided it will check whether the feature has any rules for
that group.
## Examples
Flippant.exists?("search")
#=> false
Flippant.add("search")
Flippant.exists?("search")
#=> true
"""
@spec exists?(binary(), binary() | :any) :: boolean()
def exists?(feature, group \\ :any) when is_binary(feature) do
exists?(__MODULE__, feature, group)
end
@spec exists?(name :: atom(), binary(), binary() | :any) :: boolean()
def exists?(name, feature, group) when is_binary(feature) do
name
|> adapter()
|> GenServer.call({:exists?, normalize(feature), group})
end
@doc """
List all known features or only features enabled for a particular group.
## Examples
Given the features `search` and `delete`:
Flippant.features()
#=> ["search", "delete"]
Flippant.features(:all)
#=> ["search", "delete"]
If the `search` feature were only enabled for the `awesome` group:
Flippant.features("awesome")
#=> ["search"]
"""
@spec features(:all | binary()) :: list(binary())
def features(group \\ :all) do
features(__MODULE__, group)
end
@spec features(name :: atom(), :all | binary()) :: list(binary())
def features(name, group) do
name
|> adapter()
|> GenServer.call({:features, group})
end
@doc """
Restore all features from a dump file.
Dumped features may be restored in full using the `load/1` function. During
the load process the file will be decoded as json.
Loading happens atomically, but it does _not_ clear out any existing
features. To have a clean restore you'll need to run `clear/1` first.
## Examples
Restore a dump into a clean environment:
Flippant.clear(:features) #=> :ok
Flippant.load("backup.dump") #=> :ok
"""
@spec load(name :: atom(), binary()) :: :ok | {:error, File.posix() | binary()}
def load(name \\ __MODULE__, path) when is_binary(path) do
with {:ok, data} <- File.read(path) do
name
|> adapter()
|> GenServer.cast({:restore, Jason.decode!(data)})
end
end
@doc """
Rename an existing feature.
If the new feature name already exists it will overwritten and all of the
rules will be replaced.
## Examples
Flippant.rename("search", "super-search")
:ok
"""
@spec rename(name :: atom(), binary, binary) :: :ok
def rename(name \\ __MODULE__, old_name, new_name)
when is_binary(old_name) and is_binary(new_name) do
name
|> adapter()
|> GenServer.cast({:rename, normalize(old_name), normalize(new_name)})
end
@doc """
Fully remove a feature for all groups.
## Examples
Flippant.remove("search")
:ok
"""
@spec remove(name :: atom(), binary) :: :ok
def remove(name \\ __MODULE__, feature) when is_binary(feature) do
name
|> adapter()
|> GenServer.cast({:remove, normalize(feature)})
end
@doc """
Prepare the adapter for usage.
For adapters that don't require any setup this is a no-op. For other adapters,
such as Postgres, which require a schema/table to operate this will create the
necessary table.
## Examples
Flippant.setup()
:ok
"""
@spec setup(name :: atom()) :: :ok
def setup(name \\ __MODULE__) do
name
|> adapter()
|> GenServer.cast(:setup)
end
@doc false
def update_config(name \\ __MODULE__, key, value) do
conf =
name
|> Config.get()
|> Map.put(key, value)
Config.put(name, conf)
end
defp normalize(value) when is_binary(value) do
value
|> String.downcase()
|> String.trim()
end
end
|
lib/flippant.ex
| 0.853898 | 0.495239 |
flippant.ex
|
starcoder
|
defmodule Plug.Builder do
@moduledoc """
Conveniences for building plugs.
This module can be `use`-d into a module in order to build
a plug pipeline:
defmodule MyApp do
use Plug.Builder
plug Plug.Logger
plug :hello, upper: true
# A function from another module can be plugged too, provided it's
# imported into the current module first.
import AnotherModule, only: [interesting_plug: 2]
plug :interesting_plug
def hello(conn, opts) do
body = if opts[:upper], do: "WORLD", else: "world"
send_resp(conn, 200, body)
end
end
Multiple plugs can be defined with the `plug/2` macro, forming a pipeline.
The plugs in the pipeline will be executed in the order they've been added
through the `plug/2` macro. In the example above, `Plug.Logger` will be
called first and then the `:hello` function plug will be called on the
resulting connection.
`Plug.Builder` also imports the `Plug.Conn` module, making functions like
`send_resp/3` available.
## Options
When used, the following options are accepted by `Plug.Builder`:
* `:log_on_halt` - accepts the level to log whenever the request is halted
* `:init_mode` - the environment to initialize the plug's options, one of
`:compile` or `:runtime`. Defaults `:compile`.
## Plug behaviour
Internally, `Plug.Builder` implements the `Plug` behaviour, which means both
the `init/1` and `call/2` functions are defined.
By implementing the Plug API, `Plug.Builder` guarantees this module is a plug
and can be handed to a web server or used as part of another pipeline.
## Overriding the default Plug API functions
Both the `init/1` and `call/2` functions defined by `Plug.Builder` can be
manually overridden. For example, the `init/1` function provided by
`Plug.Builder` returns the options that it receives as an argument, but its
behaviour can be customized:
defmodule PlugWithCustomOptions do
use Plug.Builder
plug Plug.Logger
def init(opts) do
opts
end
end
The `call/2` function that `Plug.Builder` provides is used internally to
execute all the plugs listed using the `plug` macro, so overriding the
`call/2` function generally implies using `super` in order to still call the
plug chain:
defmodule PlugWithCustomCall do
use Plug.Builder
plug Plug.Logger
plug Plug.Head
def call(conn, opts) do
conn
|> super(opts) # calls Plug.Logger and Plug.Head
|> assign(:called_all_plugs, true)
end
end
## Halting a plug pipeline
A plug pipeline can be halted with `Plug.Conn.halt/1`. The builder will
prevent further plugs downstream from being invoked and return the current
connection. In the following example, the `Plug.Logger` plug never gets
called:
defmodule PlugUsingHalt do
use Plug.Builder
plug :stopper
plug Plug.Logger
def stopper(conn, _opts) do
halt(conn)
end
end
"""
@type plug :: module | atom
@doc false
defmacro __using__(opts) do
quote do
@behaviour Plug
@plug_builder_opts unquote(opts)
def init(opts) do
opts
end
def call(conn, opts) do
plug_builder_call(conn, opts)
end
defoverridable Plug
import Plug.Conn
import Plug.Builder, only: [plug: 1, plug: 2, builder_opts: 0]
Module.register_attribute(__MODULE__, :plugs, accumulate: true)
@before_compile Plug.Builder
end
end
@doc false
defmacro __before_compile__(env) do
plugs = Module.get_attribute(env.module, :plugs)
plugs =
if builder_ref = get_plug_builder_ref(env.module) do
traverse(plugs, builder_ref)
else
plugs
end
builder_opts = Module.get_attribute(env.module, :plug_builder_opts)
{conn, body} = Plug.Builder.compile(env, plugs, builder_opts)
compile_time =
if builder_opts[:init_mode] == :runtime do
[]
else
for triplet <- plugs,
{plug, _, _} = triplet,
match?(~c"Elixir." ++ _, Atom.to_charlist(plug)) do
quote(do: unquote(plug).__info__(:module))
end
end
quote do
unquote_splicing(compile_time)
defp plug_builder_call(unquote(conn), opts), do: unquote(body)
end
end
defp traverse(tuple, ref) when is_tuple(tuple) do
tuple |> Tuple.to_list() |> traverse(ref) |> List.to_tuple()
end
defp traverse(map, ref) when is_map(map) do
map |> Map.to_list() |> traverse(ref) |> Map.new()
end
defp traverse(list, ref) when is_list(list) do
Enum.map(list, &traverse(&1, ref))
end
defp traverse(ref, ref) do
{:unquote, [], [quote(do: opts)]}
end
defp traverse(term, _ref) do
term
end
@doc """
A macro that stores a new plug. `opts` will be passed unchanged to the new
plug.
This macro doesn't add any guards when adding the new plug to the pipeline;
for more information about adding plugs with guards see `compile/3`.
## Examples
plug Plug.Logger # plug module
plug :foo, some_options: true # plug function
"""
defmacro plug(plug, opts \\ []) do
# We always expand it but the @before_compile callback adds compile
# time dependencies back depending on the builder's init mode.
plug = Macro.expand(plug, %{__CALLER__ | function: {:init, 1}})
quote do
@plugs {unquote(plug), unquote(opts), true}
end
end
@doc """
Annotates a plug will receive the options given
to the current module itself as arguments.
Imagine the following plug:
defmodule MyPlug do
use Plug.Builder
plug :inspect_opts, builder_opts()
defp inspect_opts(conn, opts) do
IO.inspect(opts)
conn
end
end
When plugged as:
plug MyPlug, custom: :options
It will print `[custom: :options]` as the builder options
were passed to the inner plug.
Note you only pass `builder_opts()` to **function plugs**.
You cannot use `builder_opts()` with module plugs because
their options are evaluated at compile time. If you need
to pass `builder_opts()` to a module plug, you can wrap
the module plug in function. To be precise, do not do this:
plug Plug.Parsers, builder_opts()
Instead do this:
plug :custom_plug_parsers, builder_opts()
defp custom_plug_parsers(conn, opts) do
Plug.Parsers.call(conn, Plug.Parsers.init(opts))
end
"""
defmacro builder_opts() do
quote do
Plug.Builder.__builder_opts__(__MODULE__)
end
end
@doc false
def __builder_opts__(module) do
get_plug_builder_ref(module) || generate_plug_builder_ref(module)
end
defp get_plug_builder_ref(module) do
Module.get_attribute(module, :plug_builder_ref)
end
defp generate_plug_builder_ref(module) do
ref = make_ref()
Module.put_attribute(module, :plug_builder_ref, ref)
ref
end
@doc """
Compiles a plug pipeline.
Each element of the plug pipeline (according to the type signature of this
function) has the form:
{plug_name, options, guards}
Note that this function expects a reversed pipeline (with the last plug that
has to be called coming first in the pipeline).
The function returns a tuple with the first element being a quoted reference
to the connection and the second element being the compiled quoted pipeline.
## Examples
Plug.Builder.compile(env, [
{Plug.Logger, [], true}, # no guards, as added by the Plug.Builder.plug/2 macro
{Plug.Head, [], quote(do: a when is_binary(a))}
], [])
"""
@spec compile(Macro.Env.t(), [{plug, Plug.opts(), Macro.t()}], Keyword.t()) ::
{Macro.t(), Macro.t()}
def compile(env, pipeline, builder_opts) do
conn = quote do: conn
init_mode = builder_opts[:init_mode] || :compile
unless init_mode in [:compile, :runtime] do
raise ArgumentError, """
invalid :init_mode when compiling #{inspect(env.module)}.
Supported values include :compile or :runtime. Got: #{inspect(init_mode)}
"""
end
ast =
Enum.reduce(pipeline, conn, fn {plug, opts, guards}, acc ->
{plug, opts, guards}
|> init_plug(init_mode)
|> quote_plug(init_mode, acc, env, builder_opts)
end)
{conn, ast}
end
# Initializes the options of a plug in the configured init_mode.
defp init_plug({plug, opts, guards}, init_mode) do
case Atom.to_charlist(plug) do
~c"Elixir." ++ _ -> init_module_plug(plug, opts, guards, init_mode)
_ -> init_fun_plug(plug, opts, guards)
end
end
defp init_module_plug(plug, opts, guards, :compile) do
initialized_opts = plug.init(opts)
if function_exported?(plug, :call, 2) do
{:module, plug, escape(initialized_opts), guards}
else
raise ArgumentError, "#{inspect(plug)} plug must implement call/2"
end
end
defp init_module_plug(plug, opts, guards, :runtime) do
{:module, plug, quote(do: unquote(plug).init(unquote(escape(opts)))), guards}
end
defp init_fun_plug(plug, opts, guards) do
{:function, plug, escape(opts), guards}
end
defp escape(opts) do
Macro.escape(opts, unquote: true)
end
defp quote_plug({:module, plug, opts, guards}, :compile, acc, env, builder_opts) do
call = quote_plug(:module, plug, opts, guards, acc, env, builder_opts)
quote do
require unquote(plug)
unquote(call)
end
end
defp quote_plug({plug_type, plug, opts, guards}, _init_mode, acc, env, builder_opts) do
quote_plug(plug_type, plug, opts, guards, acc, env, builder_opts)
end
# `acc` is a series of nested plug calls in the form of plug3(plug2(plug1(conn))).
# `quote_plug` wraps a new plug around that series of calls.
defp quote_plug(plug_type, plug, opts, guards, acc, env, builder_opts) do
call = quote_plug_call(plug_type, plug, opts)
error_message =
case plug_type do
:module -> "expected #{inspect(plug)}.call/2 to return a Plug.Conn"
:function -> "expected #{plug}/2 to return a Plug.Conn"
end <> ", all plugs must receive a connection (conn) and return a connection"
quote generated: true do
case unquote(compile_guards(call, guards)) do
%Plug.Conn{halted: true} = conn ->
unquote(log_halt(plug_type, plug, env, builder_opts))
conn
%Plug.Conn{} = conn ->
unquote(acc)
other ->
raise unquote(error_message) <> ", got: #{inspect(other)}"
end
end
end
defp quote_plug_call(:function, plug, opts) do
quote do: unquote(plug)(conn, unquote(opts))
end
defp quote_plug_call(:module, plug, opts) do
quote do: unquote(plug).call(conn, unquote(opts))
end
defp compile_guards(call, true) do
call
end
defp compile_guards(call, guards) do
quote do
case true do
true when unquote(guards) -> unquote(call)
true -> conn
end
end
end
defp log_halt(plug_type, plug, env, builder_opts) do
if level = builder_opts[:log_on_halt] do
message =
case plug_type do
:module -> "#{inspect(env.module)} halted in #{inspect(plug)}.call/2"
:function -> "#{inspect(env.module)} halted in #{inspect(plug)}/2"
end
quote do
require Logger
# Matching, to make Dialyzer happy on code executing Plug.Builder.compile/3
_ = Logger.unquote(level)(unquote(message))
end
else
nil
end
end
end
|
deps/plug/lib/plug/builder.ex
| 0.920231 | 0.477554 |
builder.ex
|
starcoder
|
defmodule ExDoc.Markdown do
@moduledoc """
Transform a given document in MarkDown to HTML
ExDoc supports the following MarkDown parsers:
* [Hoedown][]
* [Earmark][]
* [Pandoc][]
If you don't offer any preference via `config/config.exs`. ExDoc will try to
find one of the earlier MarkDown parsers. Otherwise, ExDoc will raise an
exception.
[Pandoc]: http://johnmacfarlane.net/pandoc/
[Hoedown]: https://github.com/hoedown/hoedown
[Earmark]: http://github.com/pragdave/earmark
"""
@markdown_processors [
ExDoc.Markdown.Hoedown,
ExDoc.Markdown.Earmark,
ExDoc.Markdown.Pandoc
]
@markdown_processor_key :markdown_processor
@doc """
Converts the given markdown document to HTML.
"""
def to_html(text) when is_binary(text) do
get_markdown_processor().to_html(text)
|> pretty_codeblocks()
end
@doc """
Helper to handle plain code blocks (```...```) with and without
language specification and indentation code blocks
"""
def pretty_codeblocks(bin) do
bin = Regex.replace(~r/<pre><code(\s+class=\"\")?>\s*iex>/,
# Add "elixir" class for now, until we have support for
# "iex" in highlight.js
bin, ~S(<pre><code class="iex elixir">iex>))
bin = Regex.replace(~r/<pre><code(\s+class=\"\")?>/,
bin, ~S(<pre><code class="elixir">))
bin
end
defp get_markdown_processor() do
case Application.fetch_env(:ex_doc, @markdown_processor_key) do
{:ok, processor} -> processor
:error ->
processor = find_markdown_processor || raise_no_markdown_processor
Application.put_env(:ex_doc, @markdown_processor_key, processor)
processor
end
end
defp find_markdown_processor() do
Enum.find @markdown_processors, fn module ->
Code.ensure_loaded?(module) && module.available?
end
end
defp raise_no_markdown_processor() do
raise """
Could not find a markdown processor to be used by ex_doc.
You can either:
* Add {:earmark, ">= 0.0.0"} to your mix.exs deps
to use an Elixir-based markdown processor
* Add {:markdown, github: "devinus/markdown"} to your mix.exs deps
to use a C-based markdown processor
* Ensure pandoc (http://johnmacfarlane.net/pandoc) is available in your system
to use it as an external tool
"""
end
end
|
lib/ex_doc/markdown.ex
| 0.758511 | 0.690937 |
markdown.ex
|
starcoder
|
defmodule AWS.Redshift do
@moduledoc """
Amazon Redshift
## Overview
This is an interface reference for Amazon Redshift.
It contains documentation for one of the programming or command line interfaces
you can use to manage Amazon Redshift clusters. Note that Amazon Redshift is
asynchronous, which means that some interfaces may require techniques, such as
polling or asynchronous callback handlers, to determine when a command has been
applied. In this reference, the parameter descriptions indicate whether a change
is applied immediately, on the next instance reboot, or during the next
maintenance window. For a summary of the Amazon Redshift cluster management
interfaces, go to [Using the Amazon Redshift Management Interfaces](https://docs.aws.amazon.com/redshift/latest/mgmt/using-aws-sdk.html).
Amazon Redshift manages all the work of setting up, operating, and scaling a
data warehouse: provisioning capacity, monitoring and backing up the cluster,
and applying patches and upgrades to the Amazon Redshift engine. You can focus
on using your data to acquire new insights for your business and customers.
If you are a first-time user of Amazon Redshift, we recommend that you begin by
reading the [Amazon Redshift Getting Started Guide](https://docs.aws.amazon.com/redshift/latest/gsg/getting-started.html).
If you are a database developer, the [Amazon Redshift Database Developer Guide](https://docs.aws.amazon.com/redshift/latest/dg/welcome.html) explains how
to design, build, query, and maintain the databases that make up your data
warehouse.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2012-12-01",
content_type: "application/x-www-form-urlencoded",
credential_scope: nil,
endpoint_prefix: "redshift",
global?: false,
protocol: "query",
service_id: "Redshift",
signature_version: "v4",
signing_name: "redshift",
target_prefix: nil
}
end
@doc """
Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to the
configuration (term, payment type, or number of nodes) and no additional costs.
"""
def accept_reserved_node_exchange(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AcceptReservedNodeExchange", input, options)
end
@doc """
Adds an inbound (ingress) rule to an Amazon Redshift security group.
Depending on whether the application accessing your cluster is running on the
Internet or an Amazon EC2 instance, you can authorize inbound access to either a
Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an
Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon
Redshift security group.
If you authorize access to an Amazon EC2 security group, specify
*EC2SecurityGroupName* and *EC2SecurityGroupOwnerId*. The Amazon EC2 security
group and Amazon Redshift cluster must be in the same AWS Region.
If you authorize access to a CIDR/IP address range, specify *CIDRIP*. For an
overview of CIDR blocks, see the Wikipedia article on [Classless Inter-Domain Routing](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
You must also associate the security group with a cluster so that clients
running on these IP addresses or the EC2 instance are authorized to connect to
the cluster. For information about managing security groups, go to [Working with Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def authorize_cluster_security_group_ingress(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"AuthorizeClusterSecurityGroupIngress",
input,
options
)
end
@doc """
Authorizes the specified AWS customer account to restore the specified snapshot.
For more information about working with snapshots, go to [Amazon Redshift Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def authorize_snapshot_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AuthorizeSnapshotAccess", input, options)
end
@doc """
Deletes a set of cluster snapshots.
"""
def batch_delete_cluster_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDeleteClusterSnapshots", input, options)
end
@doc """
Modifies the settings for a set of cluster snapshots.
"""
def batch_modify_cluster_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchModifyClusterSnapshots", input, options)
end
@doc """
Cancels a resize operation for a cluster.
"""
def cancel_resize(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelResize", input, options)
end
@doc """
Copies the specified automated cluster snapshot to a new manual cluster
snapshot.
The source must be an automated snapshot and it must be in the available state.
When you delete a cluster, Amazon Redshift deletes any automated snapshots of
the cluster. Also, when the retention period of the snapshot expires, Amazon
Redshift automatically deletes it. If you want to keep an automated snapshot for
a longer period, you can make a manual copy of the snapshot. Manual snapshots
are retained until you delete them.
For more information about working with snapshots, go to [Amazon Redshift Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def copy_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopyClusterSnapshot", input, options)
end
@doc """
Creates a new cluster with the specified parameters.
To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster
subnet group name. The cluster subnet group identifies the subnets of your VPC
that Amazon Redshift uses when creating the cluster. For more information about
managing clusters, go to [Amazon Redshift Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCluster", input, options)
end
@doc """
Creates an Amazon Redshift parameter group.
Creating parameter groups is independent of creating clusters. You can associate
a cluster with a parameter group when you create the cluster. You can also
associate an existing cluster with a parameter group after the cluster is
created by using `ModifyCluster`.
Parameters in the parameter group define specific behavior that applies to the
databases you create on the cluster. For more information about parameters and
parameter groups, go to [Amazon Redshift Parameter Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateClusterParameterGroup", input, options)
end
@doc """
Creates a new Amazon Redshift security group.
You use security groups to control access to non-VPC clusters.
For information about managing security groups, go to [Amazon Redshift Cluster Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster_security_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateClusterSecurityGroup", input, options)
end
@doc """
Creates a manual snapshot of the specified cluster.
The cluster must be in the `available` state.
For more information about working with snapshots, go to [Amazon Redshift Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateClusterSnapshot", input, options)
end
@doc """
Creates a new Amazon Redshift subnet group.
You must provide a list of one or more subnets in your existing Amazon Virtual
Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.
For information about subnet groups, go to [Amazon Redshift Cluster Subnet Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-cluster-subnet-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateClusterSubnetGroup", input, options)
end
@doc """
Creates an Amazon Redshift event notification subscription.
This action requires an ARN (Amazon Resource Name) of an Amazon SNS topic
created by either the Amazon Redshift console, the Amazon SNS console, or the
Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a topic in
Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the source type, and lists of Amazon Redshift source IDs, event
categories, and event severities. Notifications will be sent for all events you
want that match those criteria. For example, you can specify source type =
cluster, source ID = my-cluster-1 and mycluster2, event categories =
Availability, Backup, and severity = ERROR. The subscription will only send
notifications for those ERROR events in the Availability and Backup categories
for the specified clusters.
If you specify both the source type and source IDs, such as source type =
cluster and source identifier = my-cluster-1, notifications will be sent for all
the cluster events for my-cluster-1. If you specify a source type but do not
specify a source identifier, you will receive notice of the events for the
objects of that type in your AWS account. If you do not specify either the
SourceType nor the SourceIdentifier, you will be notified of events generated
from all Amazon Redshift sources belonging to your AWS account. You must specify
a source type if you specify a source ID.
"""
def create_event_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateEventSubscription", input, options)
end
@doc """
Creates an HSM client certificate that an Amazon Redshift cluster will use to
connect to the client's HSM in order to store and retrieve the keys used to
encrypt the cluster databases.
The command returns a public key, which you must store in the HSM. In addition
to creating the HSM certificate, you must create an Amazon Redshift HSM
configuration that provides a cluster the information needed to store and use
encryption keys in the HSM. For more information, go to [Hardware Security Modules](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html)
in the Amazon Redshift Cluster Management Guide.
"""
def create_hsm_client_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateHsmClientCertificate", input, options)
end
@doc """
Creates an HSM configuration that contains the information required by an Amazon
Redshift cluster to store and use database encryption keys in a Hardware
Security Module (HSM).
After creating the HSM configuration, you can specify it as a parameter when
creating a cluster. The cluster will then store its encryption keys in the HSM.
In addition to creating an HSM configuration, you must also create an HSM client
certificate. For more information, go to [Hardware Security Modules](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html)
in the Amazon Redshift Cluster Management Guide.
"""
def create_hsm_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateHsmConfiguration", input, options)
end
@doc """
Creates a scheduled action.
A scheduled action contains a schedule and an Amazon Redshift API action. For
example, you can create a schedule of when to run the `ResizeCluster` API
operation.
"""
def create_scheduled_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateScheduledAction", input, options)
end
@doc """
Creates a snapshot copy grant that permits Amazon Redshift to use a customer
master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied
snapshots in a destination region.
For more information about managing snapshot copy grants, go to [Amazon Redshift Database
Encryption](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_snapshot_copy_grant(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSnapshotCopyGrant", input, options)
end
@doc """
Create a snapshot schedule that can be associated to a cluster and which
overrides the default system backup schedule.
"""
def create_snapshot_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSnapshotSchedule", input, options)
end
@doc """
Adds tags to a cluster.
A resource can have up to 50 tags. If you try to create more than 50 tags for a
resource, you will receive an error and the attempt will fail.
If you specify a key that already exists for the resource, the value for that
key will be updated with the new value.
"""
def create_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTags", input, options)
end
@doc """
Creates a usage limit for a specified Amazon Redshift feature on a cluster.
The usage limit is identified by the returned usage limit identifier.
"""
def create_usage_limit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateUsageLimit", input, options)
end
@doc """
Deletes a previously provisioned cluster without its final snapshot being
created.
A successful response from the web service indicates that the request was
received correctly. Use `DescribeClusters` to monitor the status of the
deletion. The delete operation cannot be canceled or reverted once submitted.
For more information about managing clusters, go to [Amazon Redshift Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
If you want to shut down the cluster and retain it for future use, set
*SkipFinalClusterSnapshot* to `false` and specify a name for
*FinalClusterSnapshotIdentifier*. You can later restore this snapshot to resume
using the cluster. If a final cluster snapshot is requested, the status of the
cluster will be "final-snapshot" while the snapshot is being taken, then it's
"deleting" once Amazon Redshift begins deleting the cluster.
For more information about managing clusters, go to [Amazon Redshift Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def delete_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCluster", input, options)
end
@doc """
Deletes a specified Amazon Redshift parameter group.
You cannot delete a parameter group if it is associated with a cluster.
"""
def delete_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteClusterParameterGroup", input, options)
end
@doc """
Deletes an Amazon Redshift security group.
You cannot delete a security group that is associated with any clusters. You
cannot delete the default security group.
For information about managing security groups, go to [Amazon Redshift Cluster Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def delete_cluster_security_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteClusterSecurityGroup", input, options)
end
@doc """
Deletes the specified manual snapshot.
The snapshot must be in the `available` state, with no other users authorized to
access the snapshot.
Unlike automated snapshots, manual snapshots are retained even after you delete
your cluster. Amazon Redshift does not delete your manual snapshots. You must
delete manual snapshot explicitly to avoid getting charged. If other accounts
are authorized to access the snapshot, you must revoke all of the authorizations
before you can delete the snapshot.
"""
def delete_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteClusterSnapshot", input, options)
end
@doc """
Deletes the specified cluster subnet group.
"""
def delete_cluster_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteClusterSubnetGroup", input, options)
end
@doc """
Deletes an Amazon Redshift event notification subscription.
"""
def delete_event_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteEventSubscription", input, options)
end
@doc """
Deletes the specified HSM client certificate.
"""
def delete_hsm_client_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteHsmClientCertificate", input, options)
end
@doc """
Deletes the specified Amazon Redshift HSM configuration.
"""
def delete_hsm_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteHsmConfiguration", input, options)
end
@doc """
Deletes a scheduled action.
"""
def delete_scheduled_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteScheduledAction", input, options)
end
@doc """
Deletes the specified snapshot copy grant.
"""
def delete_snapshot_copy_grant(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSnapshotCopyGrant", input, options)
end
@doc """
Deletes a snapshot schedule.
"""
def delete_snapshot_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSnapshotSchedule", input, options)
end
@doc """
Deletes tags from a resource.
You must provide the ARN of the resource from which you want to delete the tag
or tags.
"""
def delete_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTags", input, options)
end
@doc """
Deletes a usage limit from a cluster.
"""
def delete_usage_limit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteUsageLimit", input, options)
end
@doc """
Returns a list of attributes attached to an account
"""
def describe_account_attributes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAccountAttributes", input, options)
end
@doc """
Returns an array of `ClusterDbRevision` objects.
"""
def describe_cluster_db_revisions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusterDbRevisions", input, options)
end
@doc """
Returns a list of Amazon Redshift parameter groups, including parameter groups
you created and the default parameter group.
For each parameter group, the response includes the parameter group name,
description, and parameter group family name. You can optionally specify a name
to retrieve the description of a specific parameter group.
For more information about parameters and parameter groups, go to [Amazon Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all parameter groups that match any combination of the specified keys
and values. For example, if you have `owner` and `environment` for tag keys, and
`admin` and `test` for tag values, all parameter groups that have any
combination of those values are returned.
If both tag keys and values are omitted from the request, parameter groups are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_cluster_parameter_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusterParameterGroups", input, options)
end
@doc """
Returns a detailed list of parameters contained within the specified Amazon
Redshift parameter group.
For each parameter the response includes information such as parameter name,
description, data type, value, whether the parameter value is modifiable, and so
on.
You can specify *source* filter to retrieve parameters of only specific type.
For example, to retrieve parameters that were modified by a user action such as
from `ModifyClusterParameterGroup`, you can specify *source* equal to *user*.
For more information about parameters and parameter groups, go to [Amazon Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_cluster_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusterParameters", input, options)
end
@doc """
Returns information about Amazon Redshift security groups.
If the name of a security group is specified, the response will contain only
information about only that security group.
For information about managing security groups, go to [Amazon Redshift Cluster Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all security groups that match any combination of the specified keys and
values. For example, if you have `owner` and `environment` for tag keys, and
`admin` and `test` for tag values, all security groups that have any combination
of those values are returned.
If both tag keys and values are omitted from the request, security groups are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_cluster_security_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusterSecurityGroups", input, options)
end
@doc """
Returns one or more snapshot objects, which contain metadata about your cluster
snapshots.
By default, this operation returns information about all snapshots of all
clusters that are owned by you AWS customer account. No information is returned
for snapshots owned by inactive AWS customer accounts.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all snapshots that match any combination of the specified keys and
values. For example, if you have `owner` and `environment` for tag keys, and
`admin` and `test` for tag values, all snapshots that have any combination of
those values are returned. Only snapshots that you own are returned in the
response; shared snapshots are not returned with the tag key and tag value
request parameters.
If both tag keys and values are omitted from the request, snapshots are returned
regardless of whether they have tag keys or values associated with them.
"""
def describe_cluster_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusterSnapshots", input, options)
end
@doc """
Returns one or more cluster subnet group objects, which contain metadata about
your cluster subnet groups.
By default, this operation returns information about all cluster subnet groups
that are defined in you AWS account.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all subnet groups that match any combination of the specified keys and
values. For example, if you have `owner` and `environment` for tag keys, and
`admin` and `test` for tag values, all subnet groups that have any combination
of those values are returned.
If both tag keys and values are omitted from the request, subnet groups are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_cluster_subnet_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusterSubnetGroups", input, options)
end
@doc """
Returns a list of all the available maintenance tracks.
"""
def describe_cluster_tracks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusterTracks", input, options)
end
@doc """
Returns descriptions of the available Amazon Redshift cluster versions.
You can call this operation even before creating any clusters to learn more
about the Amazon Redshift versions. For more information about managing
clusters, go to [Amazon Redshift Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_cluster_versions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusterVersions", input, options)
end
@doc """
Returns properties of provisioned clusters including general cluster properties,
cluster database properties, maintenance and backup properties, and security and
access properties.
This operation supports pagination. For more information about managing
clusters, go to [Amazon Redshift Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all clusters that match any combination of the specified keys and
values. For example, if you have `owner` and `environment` for tag keys, and
`admin` and `test` for tag values, all clusters that have any combination of
those values are returned.
If both tag keys and values are omitted from the request, clusters are returned
regardless of whether they have tag keys or values associated with them.
"""
def describe_clusters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusters", input, options)
end
@doc """
Returns a list of parameter settings for the specified parameter group family.
For more information about parameters and parameter groups, go to [Amazon Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_default_cluster_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDefaultClusterParameters", input, options)
end
@doc """
Displays a list of event categories for all event source types, or for a
specified source type.
For a list of the event categories and source types, go to [Amazon Redshift Event
Notifications](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html).
"""
def describe_event_categories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventCategories", input, options)
end
@doc """
Lists descriptions of all the Amazon Redshift event notification subscriptions
for a customer account.
If you specify a subscription name, lists the description for that subscription.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all event notification subscriptions that match any combination of the
specified keys and values. For example, if you have `owner` and `environment`
for tag keys, and `admin` and `test` for tag values, all subscriptions that have
any combination of those values are returned.
If both tag keys and values are omitted from the request, subscriptions are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_event_subscriptions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventSubscriptions", input, options)
end
@doc """
Returns events related to clusters, security groups, snapshots, and parameter
groups for the past 14 days.
Events specific to a particular cluster, security group, snapshot or parameter
group can be obtained by providing the name as a parameter. By default, the past
hour of events are returned.
"""
def describe_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEvents", input, options)
end
@doc """
Returns information about the specified HSM client certificate.
If no certificate ID is specified, returns information about all the HSM
certificates owned by your AWS customer account.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all HSM client certificates that match any combination of the specified
keys and values. For example, if you have `owner` and `environment` for tag
keys, and `admin` and `test` for tag values, all HSM client certificates that
have any combination of those values are returned.
If both tag keys and values are omitted from the request, HSM client
certificates are returned regardless of whether they have tag keys or values
associated with them.
"""
def describe_hsm_client_certificates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeHsmClientCertificates", input, options)
end
@doc """
Returns information about the specified Amazon Redshift HSM configuration.
If no configuration ID is specified, returns information about all the HSM
configurations owned by your AWS customer account.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all HSM connections that match any combination of the specified keys and
values. For example, if you have `owner` and `environment` for tag keys, and
`admin` and `test` for tag values, all HSM connections that have any combination
of those values are returned.
If both tag keys and values are omitted from the request, HSM connections are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_hsm_configurations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeHsmConfigurations", input, options)
end
@doc """
Describes whether information, such as queries and connection attempts, is being
logged for the specified Amazon Redshift cluster.
"""
def describe_logging_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLoggingStatus", input, options)
end
@doc """
Returns properties of possible node configurations such as node type, number of
nodes, and disk usage for the specified action type.
"""
def describe_node_configuration_options(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeNodeConfigurationOptions", input, options)
end
@doc """
Returns a list of orderable cluster options.
Before you create a new cluster you can use this operation to find what options
are available, such as the EC2 Availability Zones (AZ) in the specific AWS
Region that you can specify, and the node types you can request. The node types
differ by available storage, memory, CPU and price. With the cost involved you
might want to obtain a list of cluster options in the specific region and
specify values when creating a cluster. For more information about managing
clusters, go to [Amazon Redshift Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_orderable_cluster_options(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOrderableClusterOptions", input, options)
end
@doc """
Returns a list of the available reserved node offerings by Amazon Redshift with
their descriptions including the node type, the fixed and recurring costs of
reserving the node and duration the node will be reserved for you.
These descriptions help you determine which reserve node offering you want to
purchase. You then use the unique offering ID in you call to
`PurchaseReservedNodeOffering` to reserve one or more nodes for your Amazon
Redshift cluster.
For more information about reserved node offerings, go to [Purchasing Reserved Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_reserved_node_offerings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeReservedNodeOfferings", input, options)
end
@doc """
Returns the descriptions of the reserved nodes.
"""
def describe_reserved_nodes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeReservedNodes", input, options)
end
@doc """
Returns information about the last resize operation for the specified cluster.
If no resize operation has ever been initiated for the specified cluster, a
`HTTP 404` error is returned. If a resize operation was initiated and completed,
the status of the resize remains as `SUCCEEDED` until the next resize.
A resize operation can be requested using `ModifyCluster` and specifying a
different number or type of nodes for the cluster.
"""
def describe_resize(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeResize", input, options)
end
@doc """
Describes properties of scheduled actions.
"""
def describe_scheduled_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeScheduledActions", input, options)
end
@doc """
Returns a list of snapshot copy grants owned by the AWS account in the
destination region.
For more information about managing snapshot copy grants, go to [Amazon Redshift Database
Encryption](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_snapshot_copy_grants(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSnapshotCopyGrants", input, options)
end
@doc """
Returns a list of snapshot schedules.
"""
def describe_snapshot_schedules(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSnapshotSchedules", input, options)
end
@doc """
Returns account level backups storage size and provisional storage.
"""
def describe_storage(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStorage", input, options)
end
@doc """
Lists the status of one or more table restore requests made using the
`RestoreTableFromClusterSnapshot` API action.
If you don't specify a value for the `TableRestoreRequestId` parameter, then
`DescribeTableRestoreStatus` returns the status of all table restore requests
ordered by the date and time of the request in ascending order. Otherwise
`DescribeTableRestoreStatus` returns the status of the table specified by
`TableRestoreRequestId`.
"""
def describe_table_restore_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTableRestoreStatus", input, options)
end
@doc """
Returns a list of tags.
You can return tags from a specific resource by specifying an ARN, or you can
return all tags for a given type of resource, such as clusters, snapshots, and
so on.
The following are limitations for `DescribeTags`:
* You cannot specify an ARN and a resource-type value together in
the same request.
* You cannot use the `MaxRecords` and `Marker` parameters together
with the ARN parameter.
* The `MaxRecords` parameter can be a range from 10 to 50 results to
return in a request.
If you specify both tag keys and tag values in the same request, Amazon Redshift
returns all resources that match any combination of the specified keys and
values. For example, if you have `owner` and `environment` for tag keys, and
`admin` and `test` for tag values, all resources that have any combination of
those values are returned.
If both tag keys and values are omitted from the request, resources are returned
regardless of whether they have tag keys or values associated with them.
"""
def describe_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTags", input, options)
end
@doc """
Shows usage limits on a cluster.
Results are filtered based on the combination of input usage limit identifier,
cluster identifier, and feature type parameters:
* If usage limit identifier, cluster identifier, and feature type
are not provided, then all usage limit objects for the current account in the
current region are returned.
* If usage limit identifier is provided, then the corresponding
usage limit object is returned.
* If cluster identifier is provided, then all usage limit objects
for the specified cluster are returned.
* If cluster identifier and feature type are provided, then all
usage limit objects for the combination of cluster and feature are returned.
"""
def describe_usage_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeUsageLimits", input, options)
end
@doc """
Stops logging information, such as queries and connection attempts, for the
specified Amazon Redshift cluster.
"""
def disable_logging(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableLogging", input, options)
end
@doc """
Disables the automatic copying of snapshots from one region to another region
for a specified cluster.
If your cluster and its snapshots are encrypted using a customer master key
(CMK) from AWS KMS, use `DeleteSnapshotCopyGrant` to delete the grant that
grants Amazon Redshift permission to the CMK in the destination region.
"""
def disable_snapshot_copy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableSnapshotCopy", input, options)
end
@doc """
Starts logging information, such as queries and connection attempts, for the
specified Amazon Redshift cluster.
"""
def enable_logging(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableLogging", input, options)
end
@doc """
Enables the automatic copy of snapshots from one region to another region for a
specified cluster.
"""
def enable_snapshot_copy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableSnapshotCopy", input, options)
end
@doc """
Returns a database user name and temporary password with temporary authorization
to log on to an Amazon Redshift database.
The action returns the database user name prefixed with `IAM:` if `AutoCreate`
is `False` or `IAMA:` if `AutoCreate` is `True`. You can optionally specify one
or more database user groups that the user will join at log on. By default, the
temporary credentials expire in 900 seconds. You can optionally specify a
duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For
more information, see [Using IAM Authentication to Generate Database User Credentials](https://docs.aws.amazon.com/redshift/latest/mgmt/generating-user-credentials.html)
in the Amazon Redshift Cluster Management Guide.
The AWS Identity and Access Management (IAM)user or role that executes
GetClusterCredentials must have an IAM policy attached that allows access to all
necessary actions and resources. For more information about permissions, see
[Resource Policies for GetClusterCredentials](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html#redshift-policy-resources.getclustercredentials-resources)
in the Amazon Redshift Cluster Management Guide.
If the `DbGroups` parameter is specified, the IAM policy must allow the
`redshift:JoinGroup` action with access to the listed `dbgroups`.
In addition, if the `AutoCreate` parameter is set to `True`, then the policy
must include the `redshift:CreateClusterUser` privilege.
If the `DbName` parameter is specified, the IAM policy must allow access to the
resource `dbname` for the specified database name.
"""
def get_cluster_credentials(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetClusterCredentials", input, options)
end
@doc """
Returns an array of DC2 ReservedNodeOfferings that matches the payment type,
term, and usage price of the given DC1 reserved node.
"""
def get_reserved_node_exchange_offerings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetReservedNodeExchangeOfferings", input, options)
end
@doc """
Modifies the settings for a cluster.
You can also change node type and the number of nodes to scale up or down the
cluster. When resizing a cluster, you must specify both the number of nodes and
the node type even if one of the parameters does not change.
You can add another security or parameter group, or change the master user
password. Resetting a cluster password or modifying the security groups
associated with a cluster do not need a reboot. However, modifying a parameter
group requires a reboot for parameters to take effect. For more information
about managing clusters, go to [Amazon Redshift Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def modify_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyCluster", input, options)
end
@doc """
Modifies the database revision of a cluster.
The database revision is a unique revision of the database running in a cluster.
"""
def modify_cluster_db_revision(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyClusterDbRevision", input, options)
end
@doc """
Modifies the list of AWS Identity and Access Management (IAM) roles that can be
used by the cluster to access other AWS services.
A cluster can have up to 10 IAM roles associated at any time.
"""
def modify_cluster_iam_roles(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyClusterIamRoles", input, options)
end
@doc """
Modifies the maintenance settings of a cluster.
"""
def modify_cluster_maintenance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyClusterMaintenance", input, options)
end
@doc """
Modifies the parameters of a parameter group.
For more information about parameters and parameter groups, go to [Amazon Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def modify_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyClusterParameterGroup", input, options)
end
@doc """
Modifies the settings for a snapshot.
This exanmple modifies the manual retention period setting for a cluster
snapshot.
"""
def modify_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyClusterSnapshot", input, options)
end
@doc """
Modifies a snapshot schedule for a cluster.
"""
def modify_cluster_snapshot_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyClusterSnapshotSchedule", input, options)
end
@doc """
Modifies a cluster subnet group to include the specified list of VPC subnets.
The operation replaces the existing list of subnets with the new list of
subnets.
"""
def modify_cluster_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyClusterSubnetGroup", input, options)
end
@doc """
Modifies an existing Amazon Redshift event notification subscription.
"""
def modify_event_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyEventSubscription", input, options)
end
@doc """
Modifies a scheduled action.
"""
def modify_scheduled_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyScheduledAction", input, options)
end
@doc """
Modifies the number of days to retain snapshots in the destination AWS Region
after they are copied from the source AWS Region.
By default, this operation only changes the retention period of copied automated
snapshots. The retention periods for both new and existing copied automated
snapshots are updated with the new retention period. You can set the manual
option to change only the retention periods of copied manual snapshots. If you
set this option, only newly copied manual snapshots have the new retention
period.
"""
def modify_snapshot_copy_retention_period(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifySnapshotCopyRetentionPeriod", input, options)
end
@doc """
Modifies a snapshot schedule.
Any schedule associated with a cluster is modified asynchronously.
"""
def modify_snapshot_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifySnapshotSchedule", input, options)
end
@doc """
Modifies a usage limit in a cluster.
You can't modify the feature type or period of a usage limit.
"""
def modify_usage_limit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyUsageLimit", input, options)
end
@doc """
Pauses a cluster.
"""
def pause_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PauseCluster", input, options)
end
@doc """
Allows you to purchase reserved nodes.
Amazon Redshift offers a predefined set of reserved node offerings. You can
purchase one or more of the offerings. You can call the
`DescribeReservedNodeOfferings` API to obtain the available reserved node
offerings. You can call this API by providing a specific reserved node offering
and the number of nodes you want to reserve.
For more information about reserved node offerings, go to [Purchasing Reserved Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def purchase_reserved_node_offering(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PurchaseReservedNodeOffering", input, options)
end
@doc """
Reboots a cluster.
This action is taken as soon as possible. It results in a momentary outage to
the cluster, during which the cluster status is set to `rebooting`. A cluster
event is created when the reboot is completed. Any pending cluster modifications
(see `ModifyCluster`) are applied at this reboot. For more information about
managing clusters, go to [Amazon Redshift Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def reboot_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RebootCluster", input, options)
end
@doc """
Sets one or more parameters of the specified parameter group to their default
values and sets the source values of the parameters to "engine-default".
To reset the entire parameter group specify the *ResetAllParameters* parameter.
For parameter changes to take effect you must reboot any associated clusters.
"""
def reset_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResetClusterParameterGroup", input, options)
end
@doc """
Changes the size of the cluster.
You can change the cluster's type, or change the number or type of nodes. The
default behavior is to use the elastic resize method. With an elastic resize,
your cluster is available for read and write operations more quickly than with
the classic resize method.
Elastic resize operations have the following restrictions:
* You can only resize clusters of the following types:
* dc1.large (if your cluster is in a VPC)
* dc1.8xlarge (if your cluster is in a VPC)
* dc2.large
* dc2.8xlarge
* ds2.xlarge
* ds2.8xlarge
* ra3.xlplus
* ra3.4xlarge
* ra3.16xlarge
* The type of nodes that you add must match the node type for the
cluster.
"""
def resize_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResizeCluster", input, options)
end
@doc """
Creates a new cluster from a snapshot.
By default, Amazon Redshift creates the resulting cluster with the same
configuration as the original cluster from which the snapshot was created,
except that the new cluster is created with the default cluster security and
parameter groups. After Amazon Redshift creates the cluster, you can use the
`ModifyCluster` API to associate a different security group and different
parameter group with the restored cluster. If you are using a DS node type, you
can also choose to change to another DS node type of the same size during
restore.
If you restore a cluster into a VPC, you must provide a cluster subnet group
where you want the cluster restored.
For more information about working with snapshots, go to [Amazon Redshift Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def restore_from_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreFromClusterSnapshot", input, options)
end
@doc """
Creates a new table from a table in an Amazon Redshift cluster snapshot.
You must create the new table within the Amazon Redshift cluster that the
snapshot was taken from.
You cannot use `RestoreTableFromClusterSnapshot` to restore a table with the
same name as an existing table in an Amazon Redshift cluster. That is, you
cannot overwrite an existing table in a cluster with a restored table. If you
want to replace your original table with a new, restored table, then rename or
drop your original table before you call `RestoreTableFromClusterSnapshot`. When
you have renamed your original table, then you can pass the original name of the
table as the `NewTableName` parameter value in the call to
`RestoreTableFromClusterSnapshot`. This way, you can replace the original table
with the table created from the snapshot.
"""
def restore_table_from_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreTableFromClusterSnapshot", input, options)
end
@doc """
Resumes a paused cluster.
"""
def resume_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResumeCluster", input, options)
end
@doc """
Revokes an ingress rule in an Amazon Redshift security group for a previously
authorized IP range or Amazon EC2 security group.
To add an ingress rule, see `AuthorizeClusterSecurityGroupIngress`. For
information about managing security groups, go to [Amazon Redshift Cluster Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def revoke_cluster_security_group_ingress(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RevokeClusterSecurityGroupIngress", input, options)
end
@doc """
Removes the ability of the specified AWS customer account to restore the
specified snapshot.
If the account is currently restoring the snapshot, the restore will run to
completion.
For more information about working with snapshots, go to [Amazon Redshift Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def revoke_snapshot_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RevokeSnapshotAccess", input, options)
end
@doc """
Rotates the encryption keys for a cluster.
"""
def rotate_encryption_key(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RotateEncryptionKey", input, options)
end
end
|
lib/aws/generated/redshift.ex
| 0.904598 | 0.636099 |
redshift.ex
|
starcoder
|
if Code.ensure_loaded?(Plug) do
defmodule Versioning.Controller do
@moduledoc """
A set of functions used with `Phoenix` controllers.
Typically, this module should be imported into your controller modules. In a normal
phoenix application, this can usually be done with the following:
defmodule YourAppWeb do
# ...
def controller do
quote do
use Phoenix.Controller, namespace: MyAppWeb
# ...
import Versioning.Controller
# ...
end
end
end
Please see the documentation at `Phoenix.Controller` for details on how to
set up a typical controller.
This module is mainly used to convert raw params from the version that the
request data represents, to the latest version that your "context" application
expects.
## Example
Below is an example of how to use versioning in a typical controller:
defmodule MyController do
use MyAppWeb, :controller
plug Versioning.Plug, schema: MyVersioningSchema
def update(conn, %{"id" => id, "user" => params}) do
with {:ok, params} <- params_version(conn, params, "User"),
{:ok, user} <- Blog.fetch_user(id),
{:ok, user} <- Blog.update_user(user) do
render(conn, "show.json", user: user)
end
end
end
The `params_version/3` function accepts a conn, a set of params representing
whatever version of the data the user uses, as well as the type of the data.
It will run the params through your schema, returning the results.
"""
@doc """
Stores the schema for versioning.
## Examples
Versioning.Controller.put_schema(conn, MySchema)
"""
@spec put_schema(Plug.Conn.t(), Versioning.Schema.t()) :: Plug.Conn.t()
def put_schema(conn, schema) when is_atom(schema) do
Plug.Conn.put_private(conn, :versioning_schema, schema)
end
@doc """
Fetches the current schema.
Returns `{:ok, schema}` on success, or `:error` if no schema exists.
## Examples
iex> conn = Versioning.Controller.put_schema(conn, MySchema)
iex> Versioning.Controller.fetch_schema(conn)
{:ok, MySchema}
"""
@spec fetch_schema(Plug.Conn.t()) :: {:ok, Versioning.Schema.t()} | :error
def fetch_schema(conn) do
Map.fetch(conn.private, :versioning_schema)
end
@doc """
Fetches the current schema or errors if empty.
Returns `schema` or raises a `Versioning.MissingSchemaError`.
## Examples
iex> conn = Versioning.Controller.put_schema(conn, MySchema)
iex> Versioning.Controller.fetch_schema!(conn)
MySchema
"""
@spec fetch_schema!(Plug.Conn.t()) :: Versioning.Schema.t()
def fetch_schema!(conn) do
Map.get(conn.private, :versioning_schema) || raise Versioning.MissingSchemaError
end
@doc """
Stores the request version.
## Examples
Versioning.Controller.put_version(conn, "1.0.0")
"""
@spec put_version(Plug.Conn.t(), binary()) :: Plug.Conn.t()
def put_version(conn, version) do
Plug.Conn.put_private(conn, :versioning_version, version)
end
@doc """
Fetches the current request version.
Returns `{:ok, version}` on success, or `:error` if no version exists.
## Examples
iex> conn = Versioning.Controller.put_version(conn, "1.0.0")
iex> Versioning.Controller.fetch_version(conn)
{:ok, "1.0.0"}
"""
@spec fetch_version(Plug.Conn.t()) :: {:ok, binary()} | :error
def fetch_version(conn) do
Map.fetch(conn.private, :versioning_version)
end
@doc """
Fetches the current request version or errors if empty.
Returns `version` or raises a `Versioning.MissingVersionError`.
## Examples
iex> conn = Versioning.Controller.put_schema(conn, "1.0.0")
iex> Versioning.Controller.fetch_version!(conn)
"1.0.0"
"""
@spec fetch_version!(Plug.Conn.t()) :: Versioning.Schema.t()
def fetch_version!(conn) do
Map.get(conn.private, :versioning_version) || raise Versioning.MissingVersionError
end
@doc """
Applies a version using the header or fallback.
The schema must already be stored on the conn to use this function.
The fallback is used if the header is not present. Its value can be `:latest`,
representing the latest version on the schema, or a `{module, function}`.
This module and function will be called with the conn.
"""
@spec apply_version(Plug.Conn.t(), binary(), :latest | {module(), atom()}) :: Plug.Conn.t()
def apply_version(conn, header \\ "x-api-version", fallback \\ :latest) do
version = get_version(conn, header, fallback)
put_version(conn, version)
end
defp get_version(conn, header, fallback) do
case Plug.Conn.get_req_header(conn, header) do
[version] ->
version
_ ->
case fallback do
:latest ->
schema = fetch_schema!(conn)
schema.__schema__(:latest, :string)
{mod, fun} ->
apply(mod, fun, [conn])
end
end
end
@doc """
Performs versioning on the `params` using the given `type`.
The schema and request version must already be stored on the conn to use
this function.
Returns `{:ok, params}` with the new versioned params, or `{:error, :bad_version}`
if the schema does not contain the version requested.
"""
@spec params_version(Plug.Conn.t(), map(), binary()) :: {:ok, map()} | {:error, :bad_version}
def params_version(conn, params, type) do
schema = fetch_schema!(conn)
current = fetch_version!(conn)
target = schema.__schema__(:latest, :string)
versioning = Versioning.new(params, current, target, type)
case schema.run(versioning) do
{:ok, versioning} -> {:ok, versioning.data}
{:error, _error} -> {:error, :bad_version}
end
end
end
end
|
lib/versioning/controller.ex
| 0.842944 | 0.524577 |
controller.ex
|
starcoder
|
defmodule EEx.Engine do
@moduledoc ~S"""
Basic EEx engine that ships with Elixir.
An engine needs to implement all callbacks below.
This module also ships with a default engine implementation
you can delegate to. See `EEx.SmartEngine` as an example.
"""
@type state :: term
@doc """
Called at the beginning of every template.
It must return the initial state.
"""
@callback init(opts :: keyword) :: state
@doc """
Called at the end of every template.
It must return Elixir's quoted expressions for the template.
"""
@callback handle_body(state) :: Macro.t()
@doc """
Called for the text/static parts of a template.
It must return the updated state.
"""
@callback handle_text(state, [line: pos_integer, column: pos_integer], text :: String.t()) ::
state
@doc """
Called for the dynamic/code parts of a template.
The marker is what follows exactly after `<%`. For example,
`<% foo %>` has an empty marker, but `<%= foo %>` has `"="`
as marker. The allowed markers so far are:
* `""`
* `"="`
* `"/"`
* `"|"`
Markers `"/"` and `"|"` are only for use in custom EEx engines
and are not implemented by default. Using them without an
appropriate implementation raises `EEx.SyntaxError`.
It must return the updated state.
"""
@callback handle_expr(state, marker :: String.t(), expr :: Macro.t()) :: state
@doc """
Invoked at the beginning of every nesting.
It must return a new state that is used only inside the nesting.
Once the nesting terminates, the current `state` is resumed.
"""
@callback handle_begin(state) :: state
@doc """
Invokes at the end of a nesting.
It must return Elixir's quoted expressions for the nesting.
"""
@callback handle_end(state) :: Macro.t()
@doc false
@deprecated "Use explicit delegation to EEx.Engine instead"
defmacro __using__(_) do
quote do
@behaviour EEx.Engine
def init(opts) do
EEx.Engine.init(opts)
end
def handle_body(state) do
EEx.Engine.handle_body(state)
end
def handle_begin(state) do
EEx.Engine.handle_begin(state)
end
def handle_end(state) do
EEx.Engine.handle_end(state)
end
def handle_text(state, text) do
EEx.Engine.handle_text(state, [], text)
end
def handle_expr(state, marker, expr) do
EEx.Engine.handle_expr(state, marker, expr)
end
defoverridable EEx.Engine
end
end
@doc """
Handles assigns in quoted expressions.
A warning will be printed on missing assigns.
Future versions will raise.
This can be added to any custom engine by invoking
`handle_assign/1` with `Macro.prewalk/2`:
def handle_expr(state, token, expr) do
expr = Macro.prewalk(expr, &EEx.Engine.handle_assign/1)
super(state, token, expr)
end
"""
@spec handle_assign(Macro.t()) :: Macro.t()
def handle_assign({:@, meta, [{name, _, atom}]}) when is_atom(name) and is_atom(atom) do
line = meta[:line] || 0
quote(line: line, do: EEx.Engine.fetch_assign!(var!(assigns), unquote(name)))
end
def handle_assign(arg) do
arg
end
@doc false
# TODO: Raise on v2.0
@spec fetch_assign!(Access.t(), Access.key()) :: term | nil
def fetch_assign!(assigns, key) do
case Access.fetch(assigns, key) do
{:ok, val} ->
val
:error ->
keys = Enum.map(assigns, &elem(&1, 0))
IO.warn(
"assign @#{key} not available in EEx template. " <>
"Please ensure all assigns are given as options. " <>
"Available assigns: #{inspect(keys)}"
)
nil
end
end
@doc "Default implementation for `c:init/1`."
def init(_opts) do
%{
binary: [],
dynamic: [],
vars_count: 0
}
end
@doc "Default implementation for `c:handle_begin/1`."
def handle_begin(state) do
check_state!(state)
%{state | binary: [], dynamic: []}
end
@doc "Default implementation for `c:handle_end/1`."
def handle_end(quoted) do
handle_body(quoted)
end
@doc "Default implementation for `c:handle_body/1`."
def handle_body(state) do
check_state!(state)
%{binary: binary, dynamic: dynamic} = state
binary = {:<<>>, [], Enum.reverse(binary)}
dynamic = [binary | dynamic]
{:__block__, [], Enum.reverse(dynamic)}
end
@doc "Default implementation for `c:handle_text/3`."
def handle_text(state, _meta, text) do
check_state!(state)
%{binary: binary} = state
%{state | binary: [text | binary]}
end
@doc "Default implementation for `c:handle_expr/3`."
def handle_expr(state, "=", ast) do
check_state!(state)
%{binary: binary, dynamic: dynamic, vars_count: vars_count} = state
var = Macro.var(:"arg#{vars_count}", __MODULE__)
ast =
quote do
unquote(var) = String.Chars.to_string(unquote(ast))
end
segment =
quote do
unquote(var) :: binary
end
%{state | dynamic: [ast | dynamic], binary: [segment | binary], vars_count: vars_count + 1}
end
def handle_expr(state, "", ast) do
%{dynamic: dynamic} = state
%{state | dynamic: [ast | dynamic]}
end
def handle_expr(_state, marker, _ast) when marker in ["/", "|"] do
raise EEx.SyntaxError,
"unsupported EEx syntax <%#{marker} %> (the syntax is valid but not supported by the current EEx engine)"
end
defp check_state!(%{binary: _, dynamic: _, vars_count: _}), do: :ok
defp check_state!(state) do
raise "unexpected EEx.Engine state: #{inspect(state)}. " <>
"This typically means a bug or an outdated EEx.Engine or tool"
end
end
|
lib/eex/lib/eex/engine.ex
| 0.782746 | 0.551091 |
engine.ex
|
starcoder
|
defmodule Zaryn.Mining.TransactionContext.NodeDistribution do
@moduledoc false
@doc """
Split the previous storage nodes into groups to distribute fairly
the storage nodes involved, avoiding nodes overlapping if possible
## Examples
iex> NodeDistribution.split_storage_nodes([
...> %Node{last_public_key: "key1"},
...> %Node{last_public_key: "key2"},
...> %Node{last_public_key: "key3"},
...> %Node{last_public_key: "key4"},
...> %Node{last_public_key: "key5"},
...> %Node{last_public_key: "key6"},
...> %Node{last_public_key: "key7"},
...> %Node{last_public_key: "key8"},
...> %Node{last_public_key: "key9"}
...> ], 3, 3)
[
[%Node{last_public_key: "key1"}, %Node{last_public_key: "key4"}, %Node{last_public_key: "key7"}],
[%Node{last_public_key: "key2"}, %Node{last_public_key: "key5"}, %Node{last_public_key: "key8"}],
[%Node{last_public_key: "key3"}, %Node{last_public_key: "key6"}, %Node{last_public_key: "key9"}]
]
Distribute across sublist if the number of nodes doesn't match the number of sub lists and sample size
iex> NodeDistribution.split_storage_nodes([
...> %Node{last_public_key: "key1"},
...> %Node{last_public_key: "key2"},
...> %Node{last_public_key: "key3"},
...> %Node{last_public_key: "key4"}
...> ], 3, 3)
[
[%Node{last_public_key: "key1"}, %Node{last_public_key: "key4"}, %Node{last_public_key: "key3"}],
[%Node{last_public_key: "key2"}, %Node{last_public_key: "key1"}, %Node{last_public_key: "key4"}],
[%Node{last_public_key: "key3"}, %Node{last_public_key: "key2"}, %Node{last_public_key: "key1"}]
]
iex> NodeDistribution.split_storage_nodes([
...> %Node{last_public_key: "key1"},
...> %Node{last_public_key: "key2"},
...> %Node{last_public_key: "key3"},
...> %Node{last_public_key: "key4"}
...> ], 2, 3)
[
[%Node{last_public_key: "key1"}, %Node{last_public_key: "key3"}, %Node{last_public_key: "key1"}],
[%Node{last_public_key: "key2"}, %Node{last_public_key: "key4"}, %Node{last_public_key: "key2"}]
]
iex> NodeDistribution.split_storage_nodes([
...> %Node{last_public_key: "key1"},
...> %Node{last_public_key: "key2"},
...> %Node{last_public_key: "key3"},
...> %Node{last_public_key: "key4"}
...> ], 5, 3)
[
[%Node{last_public_key: "key1"}, %Node{last_public_key: "key2"}, %Node{last_public_key: "key3"}],
[%Node{last_public_key: "key2"}, %Node{last_public_key: "key3"}, %Node{last_public_key: "key4"}],
[%Node{last_public_key: "key3"}, %Node{last_public_key: "key4"}, %Node{last_public_key: "key1"}],
[%Node{last_public_key: "key4"}, %Node{last_public_key: "key1"}, %Node{last_public_key: "key2"}],
[%Node{last_public_key: "key1"}, %Node{last_public_key: "key2"}, %Node{last_public_key: "key3"}]
]
"""
@spec split_storage_nodes(
storage_nodes :: list(Node.t()),
nb_sub_list :: pos_integer(),
sample_size :: pos_integer()
) :: list(list(Node.t()))
def split_storage_nodes(storage_nodes, nb_sublist, sample_size)
when is_list(storage_nodes) and is_number(nb_sublist) and nb_sublist > 0 and
is_number(sample_size) and sample_size > 0 do
do_split(storage_nodes, nb_sublist, sample_size, Enum.map(1..nb_sublist, fn _ -> [] end))
end
defp do_split(storage_nodes, nb_sublist, sample_size, sub_lists) do
split =
storage_nodes
|> Enum.reduce_while(sub_lists, fn node, acc ->
if length(acc) == nb_sublist and Enum.all?(acc, &(length(&1) == sample_size)) do
{:halt, acc}
else
smallest_sub_list = Enum.min_by(acc, &length/1)
sub_list_index_to_add = Enum.find_index(acc, &(&1 == smallest_sub_list))
{:cont, List.update_at(acc, sub_list_index_to_add, &[node | &1])}
end
end)
if Enum.all?(split, &(length(&1) == sample_size)) do
Enum.map(split, &Enum.reverse/1)
else
do_split(storage_nodes, nb_sublist, sample_size, split)
end
end
end
|
lib/zaryn/mining/transaction_context/node_distribution.ex
| 0.722037 | 0.438184 |
node_distribution.ex
|
starcoder
|
defmodule Benchmarks.GoogleMessage3.Message34390 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field34452: [Benchmarks.GoogleMessage3.Message34387.t()]
}
defstruct [:field34452]
field :field34452, 1, repeated: true, type: Benchmarks.GoogleMessage3.Message34387
end
defmodule Benchmarks.GoogleMessage3.Message34624 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field34683: Benchmarks.GoogleMessage3.Message34621.t() | nil,
field34684: Benchmarks.GoogleMessage3.Message34621.t() | nil
}
defstruct [:field34683, :field34684]
field :field34683, 1, optional: true, type: Benchmarks.GoogleMessage3.Message34621
field :field34684, 2, optional: true, type: Benchmarks.GoogleMessage3.Message34621
end
defmodule Benchmarks.GoogleMessage3.Message34791.Message34792 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field34808: String.t(),
field34809: String.t()
}
defstruct [:field34808, :field34809]
field :field34808, 3, required: true, type: :string
field :field34809, 4, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message34791 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field34793: non_neg_integer,
message34792: [any],
field34795: integer,
field34796: integer,
field34797: integer,
field34798: integer,
field34799: integer,
field34800: integer,
field34801: boolean,
field34802: float | :infinity | :negative_infinity | :nan,
field34803: integer,
field34804: String.t(),
field34805: integer,
field34806: [non_neg_integer]
}
defstruct [
:field34793,
:message34792,
:field34795,
:field34796,
:field34797,
:field34798,
:field34799,
:field34800,
:field34801,
:field34802,
:field34803,
:field34804,
:field34805,
:field34806
]
field :field34793, 1, optional: true, type: :fixed64
field :message34792, 2, repeated: true, type: :group
field :field34795, 5, optional: true, type: :int32
field :field34796, 6, optional: true, type: :int32
field :field34797, 7, optional: true, type: :int32
field :field34798, 8, optional: true, type: :int32
field :field34799, 9, optional: true, type: :int32
field :field34800, 10, optional: true, type: :int32
field :field34801, 11, optional: true, type: :bool
field :field34802, 12, optional: true, type: :float
field :field34803, 13, optional: true, type: :int32
field :field34804, 14, optional: true, type: :string
field :field34805, 15, optional: true, type: :int64
field :field34806, 17, repeated: true, type: :fixed64, packed: true
end
defmodule Benchmarks.GoogleMessage3.Message35483 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field35499: integer,
field35500: String.t(),
field35501: String.t(),
field35502: String.t(),
field35503: [Benchmarks.GoogleMessage3.Message35476.t()],
field35504: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil
}
defstruct [:field35499, :field35500, :field35501, :field35502, :field35503, :field35504]
field :field35499, 1, optional: true, type: :int32
field :field35500, 2, optional: true, type: :string
field :field35501, 3, optional: true, type: :string
field :field35502, 4, optional: true, type: :string
field :field35503, 5, repeated: true, type: Benchmarks.GoogleMessage3.Message35476
field :field35504, 6, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message35807 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field35810: integer,
field35811: integer,
field35812: integer,
field35813: integer,
field35814: integer,
field35815: integer,
field35816: integer,
field35817: integer
}
defstruct [
:field35810,
:field35811,
:field35812,
:field35813,
:field35814,
:field35815,
:field35816,
:field35817
]
field :field35810, 1, optional: true, type: :int32
field :field35811, 2, optional: true, type: :int32
field :field35812, 3, optional: true, type: :int32
field :field35813, 4, optional: true, type: :int32
field :field35814, 5, optional: true, type: :int32
field :field35815, 6, optional: true, type: :int32
field :field35816, 7, optional: true, type: :int32
field :field35817, 8, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message37487 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field37501: binary,
field37502: boolean
}
defstruct [:field37501, :field37502]
field :field37501, 2, optional: true, type: :bytes
field :field37502, 3, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message13062 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field13075: integer,
field13076: String.t(),
field13077: integer,
field13078: String.t(),
field13079: integer
}
defstruct [:field13075, :field13076, :field13077, :field13078, :field13079]
field :field13075, 1, optional: true, type: :int64
field :field13076, 2, optional: true, type: :string
field :field13077, 3, optional: true, type: :int32
field :field13078, 4, optional: true, type: :string
field :field13079, 5, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message952 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field963: [Benchmarks.GoogleMessage3.Message949.t()]
}
defstruct [:field963]
field :field963, 1, repeated: true, type: Benchmarks.GoogleMessage3.Message949
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36877 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field37044: String.t(),
field37045: integer,
field37046: binary,
field37047: integer,
field37048: integer
}
defstruct [:field37044, :field37045, :field37046, :field37047, :field37048]
field :field37044, 112, required: true, type: :string
field :field37045, 113, optional: true, type: :int32
field :field37046, 114, optional: true, type: :bytes
field :field37047, 115, optional: true, type: :int32
field :field37048, 157, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36878 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36879 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field37050: String.t(),
field37051: integer
}
defstruct [:field37050, :field37051]
field :field37050, 56, required: true, type: :string
field :field37051, 69, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36880 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36881 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36882 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36883 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36884 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36885 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36886 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36887 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36888 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field37089: non_neg_integer,
field37090: boolean,
field37091: non_neg_integer,
field37092: float | :infinity | :negative_infinity | :nan,
field37093: non_neg_integer,
field37094: binary
}
defstruct [:field37089, :field37090, :field37091, :field37092, :field37093, :field37094]
field :field37089, 75, optional: true, type: :uint64
field :field37090, 76, optional: true, type: :bool
field :field37091, 165, optional: true, type: :uint64
field :field37092, 166, optional: true, type: :double
field :field37093, 109, optional: true, type: :uint64
field :field37094, 122, optional: true, type: :bytes
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36889 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field37095: integer,
field37096: String.t(),
field37097: integer,
field37098: boolean,
field37099: integer,
field37100: integer,
field37101: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37102: Benchmarks.GoogleMessage3.Message13174.t() | nil,
field37103: Benchmarks.GoogleMessage3.Message13169.t() | nil,
field37104: non_neg_integer,
field37105: [[Benchmarks.GoogleMessage3.Enum36890.t()]],
field37106: boolean,
field37107: boolean,
field37108: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37109: float | :infinity | :negative_infinity | :nan,
field37110: float | :infinity | :negative_infinity | :nan,
field37111: boolean,
field37112: integer,
field37113: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37114: boolean,
field37115: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37116: Benchmarks.GoogleMessage3.UnusedEnum.t(),
field37117: [[Benchmarks.GoogleMessage3.UnusedEnum.t()]],
field37118: integer,
field37119: [String.t()]
}
defstruct [
:field37095,
:field37096,
:field37097,
:field37098,
:field37099,
:field37100,
:field37101,
:field37102,
:field37103,
:field37104,
:field37105,
:field37106,
:field37107,
:field37108,
:field37109,
:field37110,
:field37111,
:field37112,
:field37113,
:field37114,
:field37115,
:field37116,
:field37117,
:field37118,
:field37119
]
field :field37095, 117, optional: true, type: :int64
field :field37096, 145, optional: true, type: :string
field :field37097, 123, optional: true, type: :int32
field :field37098, 163, optional: true, type: :bool
field :field37099, 164, optional: true, type: :int32
field :field37100, 149, optional: true, type: :int32
field :field37101, 129, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37102, 124, optional: true, type: Benchmarks.GoogleMessage3.Message13174
field :field37103, 128, optional: true, type: Benchmarks.GoogleMessage3.Message13169
field :field37104, 132, optional: true, type: :uint64
field :field37105, 131, repeated: true, type: Benchmarks.GoogleMessage3.Enum36890, enum: true
field :field37106, 134, optional: true, type: :bool
field :field37107, 140, optional: true, type: :bool
field :field37108, 135, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37109, 136, optional: true, type: :float
field :field37110, 156, optional: true, type: :float
field :field37111, 142, optional: true, type: :bool
field :field37112, 167, optional: true, type: :int64
field :field37113, 146, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37114, 148, optional: true, type: :bool
field :field37115, 154, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37116, 158, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field37117, 159, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field37118, 160, optional: true, type: :int32
field :field37119, 161, repeated: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36910 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36911 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field37121: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37122: Benchmarks.GoogleMessage3.Message35538.t() | nil,
field37123: Benchmarks.GoogleMessage3.Message35540.t() | nil,
field37124: Benchmarks.GoogleMessage3.Message35542.t() | nil
}
defstruct [:field37121, :field37122, :field37123, :field37124]
field :field37121, 127, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37122, 130, optional: true, type: Benchmarks.GoogleMessage3.Message35538
field :field37123, 144, optional: true, type: Benchmarks.GoogleMessage3.Message35540
field :field37124, 150, optional: true, type: Benchmarks.GoogleMessage3.Message35542
end
defmodule Benchmarks.GoogleMessage3.Message36876.Message36912 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field37125: Benchmarks.GoogleMessage3.Message3901.t() | nil,
field37126: Benchmarks.GoogleMessage3.Message3901.t() | nil
}
defstruct [:field37125, :field37126]
field :field37125, 153, optional: true, type: Benchmarks.GoogleMessage3.Message3901
field :field37126, 162, optional: true, type: Benchmarks.GoogleMessage3.Message3901
end
defmodule Benchmarks.GoogleMessage3.Message36876 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field36980: Benchmarks.GoogleMessage3.Message2356.t() | nil,
message36877: [any],
message36878: [any],
message36879: [any],
field36984: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
message36880: any,
field36986: non_neg_integer,
field36987: binary,
field36988: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field36989: Benchmarks.GoogleMessage3.Message7029.t() | nil,
field36990: Benchmarks.GoogleMessage3.Message35573.t() | nil,
field36991: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field36992: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field36993: float | :infinity | :negative_infinity | :nan,
field36994: integer,
field36995: boolean,
field36996: boolean,
field36997: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field36998: integer,
field36999: integer,
field37000: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
message36881: [any],
field37002: Benchmarks.GoogleMessage3.Message4144.t() | nil,
message36882: [any],
field37004: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37005: Benchmarks.GoogleMessage3.Message18921.t() | nil,
field37006: Benchmarks.GoogleMessage3.Message36858.t() | nil,
field37007: Benchmarks.GoogleMessage3.Message18831.t() | nil,
field37008: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37009: Benchmarks.GoogleMessage3.Message18283.t() | nil,
field37010: String.t(),
field37011: String.t(),
field37012: Benchmarks.GoogleMessage3.Message0.t() | nil,
field37013: Benchmarks.GoogleMessage3.Message0.t() | nil,
field37014: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37015: Benchmarks.GoogleMessage3.Message36869.t() | nil,
message36883: any,
message36884: [any],
message36885: [any],
message36886: any,
field37020: [[Benchmarks.GoogleMessage3.UnusedEnum.t()]],
field37021: [integer],
field37022: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37023: Benchmarks.GoogleMessage3.Message13090.t() | nil,
message36887: any,
field37025: [Benchmarks.GoogleMessage3.Message10155.t()],
field37026: [Benchmarks.GoogleMessage3.Message11874.t()],
field37027: String.t(),
field37028: integer,
field37029: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field37030: Benchmarks.GoogleMessage3.Message35546.t() | nil,
message36888: any,
field37032: [Benchmarks.GoogleMessage3.Message19255.t()],
field37033: Benchmarks.GoogleMessage3.Message33968.t() | nil,
field37034: boolean,
field37035: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field37036: Benchmarks.GoogleMessage3.Message6644.t() | nil,
field37037: binary,
message36889: any,
message36910: [any],
message36911: any,
message36912: any,
field37042: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil
}
defstruct [
:field36980,
:message36877,
:message36878,
:message36879,
:field36984,
:message36880,
:field36986,
:field36987,
:field36988,
:field36989,
:field36990,
:field36991,
:field36992,
:field36993,
:field36994,
:field36995,
:field36996,
:field36997,
:field36998,
:field36999,
:field37000,
:message36881,
:field37002,
:message36882,
:field37004,
:field37005,
:field37006,
:field37007,
:field37008,
:field37009,
:field37010,
:field37011,
:field37012,
:field37013,
:field37014,
:field37015,
:message36883,
:message36884,
:message36885,
:message36886,
:field37020,
:field37021,
:field37022,
:field37023,
:message36887,
:field37025,
:field37026,
:field37027,
:field37028,
:field37029,
:field37030,
:message36888,
:field37032,
:field37033,
:field37034,
:field37035,
:field37036,
:field37037,
:message36889,
:message36910,
:message36911,
:message36912,
:field37042
]
field :field36980, 1, optional: true, type: Benchmarks.GoogleMessage3.Message2356
field :message36877, 111, repeated: true, type: :group
field :message36878, 168, repeated: true, type: :group
field :message36879, 55, repeated: true, type: :group
field :field36984, 78, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :message36880, 137, optional: true, type: :group
field :field36986, 59, optional: true, type: :uint64
field :field36987, 121, optional: true, type: :bytes
field :field36988, 2, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field36989, 118, optional: true, type: Benchmarks.GoogleMessage3.Message7029
field :field36990, 11, optional: true, type: Benchmarks.GoogleMessage3.Message35573
field :field36991, 21, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field36992, 22, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field36993, 13, optional: true, type: :float
field :field36994, 20, optional: true, type: :int32
field :field36995, 51, optional: true, type: :bool
field :field36996, 57, optional: true, type: :bool
field :field36997, 100, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field36998, 47, optional: true, type: :int32
field :field36999, 48, optional: true, type: :int32
field :field37000, 68, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :message36881, 23, repeated: true, type: :group
field :field37002, 125, optional: true, type: Benchmarks.GoogleMessage3.Message4144
field :message36882, 35, repeated: true, type: :group
field :field37004, 49, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37005, 52, optional: true, type: Benchmarks.GoogleMessage3.Message18921
field :field37006, 46, optional: true, type: Benchmarks.GoogleMessage3.Message36858
field :field37007, 54, optional: true, type: Benchmarks.GoogleMessage3.Message18831
field :field37008, 58, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37009, 10, optional: true, type: Benchmarks.GoogleMessage3.Message18283
field :field37010, 44, optional: true, type: :string
field :field37011, 103, optional: true, type: :string
field :field37012, 43, optional: true, type: Benchmarks.GoogleMessage3.Message0
field :field37013, 143, optional: true, type: Benchmarks.GoogleMessage3.Message0
field :field37014, 53, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37015, 15, optional: true, type: Benchmarks.GoogleMessage3.Message36869
field :message36883, 3, optional: true, type: :group
field :message36884, 16, repeated: true, type: :group
field :message36885, 27, repeated: true, type: :group
field :message36886, 32, optional: true, type: :group
field :field37020, 71, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field37021, 70, repeated: true, type: :int32
field :field37022, 66, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37023, 67, optional: true, type: Benchmarks.GoogleMessage3.Message13090
field :message36887, 62, optional: true, type: :group
field :field37025, 50, repeated: true, type: Benchmarks.GoogleMessage3.Message10155
field :field37026, 151, repeated: true, type: Benchmarks.GoogleMessage3.Message11874
field :field37027, 12, optional: true, type: :string
field :field37028, 72, optional: true, type: :int64
field :field37029, 73, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37030, 108, optional: true, type: Benchmarks.GoogleMessage3.Message35546
field :message36888, 74, optional: true, type: :group
field :field37032, 104, repeated: true, type: Benchmarks.GoogleMessage3.Message19255
field :field37033, 105, optional: true, type: Benchmarks.GoogleMessage3.Message33968
field :field37034, 106, optional: true, type: :bool
field :field37035, 107, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field37036, 110, optional: true, type: Benchmarks.GoogleMessage3.Message6644
field :field37037, 133, optional: true, type: :bytes
field :message36889, 116, optional: true, type: :group
field :message36910, 119, repeated: true, type: :group
field :message36911, 126, optional: true, type: :group
field :message36912, 152, optional: true, type: :group
field :field37042, 155, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message1328 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message6850 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message6863 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field6931: Benchmarks.GoogleMessage3.Enum6858.t(),
field6932: Benchmarks.GoogleMessage3.Enum6858.t(),
field6933: Benchmarks.GoogleMessage3.UnusedEnum.t(),
field6934: boolean,
field6935: Benchmarks.GoogleMessage3.Message6773.t() | nil,
field6936: integer,
field6937: integer,
field6938: Benchmarks.GoogleMessage3.Enum6815.t(),
field6939: String.t(),
field6940: integer,
field6941: Benchmarks.GoogleMessage3.Enum6822.t(),
field6942: boolean,
field6943: boolean,
field6944: float | :infinity | :negative_infinity | :nan,
field6945: float | :infinity | :negative_infinity | :nan,
field6946: integer,
field6947: integer,
field6948: boolean,
field6949: integer,
field6950: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6951: non_neg_integer,
field6952: String.t(),
field6953: binary,
field6954: integer,
field6955: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6956: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6957: Benchmarks.GoogleMessage3.Message3886.t() | nil,
field6958: String.t(),
field6959: non_neg_integer,
field6960: Benchmarks.GoogleMessage3.Message6743.t() | nil,
field6961: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6962: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field6963: boolean
}
defstruct [
:field6931,
:field6932,
:field6933,
:field6934,
:field6935,
:field6936,
:field6937,
:field6938,
:field6939,
:field6940,
:field6941,
:field6942,
:field6943,
:field6944,
:field6945,
:field6946,
:field6947,
:field6948,
:field6949,
:field6950,
:field6951,
:field6952,
:field6953,
:field6954,
:field6955,
:field6956,
:field6957,
:field6958,
:field6959,
:field6960,
:field6961,
:field6962,
:field6963
]
field :field6931, 1, optional: true, type: Benchmarks.GoogleMessage3.Enum6858, enum: true
field :field6932, 2, optional: true, type: Benchmarks.GoogleMessage3.Enum6858, enum: true
field :field6933, 36, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field6934, 27, optional: true, type: :bool
field :field6935, 26, optional: true, type: Benchmarks.GoogleMessage3.Message6773
field :field6936, 30, optional: true, type: :int32
field :field6937, 37, optional: true, type: :int32
field :field6938, 31, optional: true, type: Benchmarks.GoogleMessage3.Enum6815, enum: true
field :field6939, 3, optional: true, type: :string
field :field6940, 4, optional: true, type: :int32
field :field6941, 15, optional: true, type: Benchmarks.GoogleMessage3.Enum6822, enum: true
field :field6942, 10, optional: true, type: :bool
field :field6943, 17, optional: true, type: :bool
field :field6944, 18, optional: true, type: :float
field :field6945, 19, optional: true, type: :float
field :field6946, 5, optional: true, type: :int32
field :field6947, 6, optional: true, type: :int32
field :field6948, 7, optional: true, type: :bool
field :field6949, 12, optional: true, type: :int32
field :field6950, 8, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6951, 9, optional: true, type: :uint64
field :field6952, 11, optional: true, type: :string
field :field6953, 13, optional: true, type: :bytes
field :field6954, 14, optional: true, type: :int32
field :field6955, 16, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6956, 22, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6957, 38, optional: true, type: Benchmarks.GoogleMessage3.Message3886
field :field6958, 20, optional: true, type: :string
field :field6959, 21, optional: true, type: :uint32
field :field6960, 23, optional: true, type: Benchmarks.GoogleMessage3.Message6743
field :field6961, 29, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6962, 33, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6963, 34, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message6871 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message7547 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field7549: binary,
field7550: integer
}
defstruct [:field7549, :field7550]
field :field7549, 1, required: true, type: :bytes
field :field7550, 2, required: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message7648 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field7669: String.t(),
field7670: integer,
field7671: integer,
field7672: integer,
field7673: integer,
field7674: integer,
field7675: float | :infinity | :negative_infinity | :nan,
field7676: boolean,
field7677: boolean,
field7678: boolean,
field7679: boolean,
field7680: boolean
}
defstruct [
:field7669,
:field7670,
:field7671,
:field7672,
:field7673,
:field7674,
:field7675,
:field7676,
:field7677,
:field7678,
:field7679,
:field7680
]
field :field7669, 1, optional: true, type: :string
field :field7670, 2, optional: true, type: :int32
field :field7671, 3, optional: true, type: :int32
field :field7672, 4, optional: true, type: :int32
field :field7673, 5, optional: true, type: :int32
field :field7674, 6, optional: true, type: :int32
field :field7675, 7, optional: true, type: :float
field :field7676, 8, optional: true, type: :bool
field :field7677, 9, optional: true, type: :bool
field :field7678, 10, optional: true, type: :bool
field :field7679, 11, optional: true, type: :bool
field :field7680, 12, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message7865 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{}
defstruct []
end
defmodule Benchmarks.GoogleMessage3.Message7928 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field7940: String.t(),
field7941: integer
}
defstruct [:field7940, :field7941]
field :field7940, 1, optional: true, type: :string
field :field7941, 2, optional: true, type: :int64
end
defmodule Benchmarks.GoogleMessage3.Message7919 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field7931: non_neg_integer,
field7932: integer,
field7933: binary
}
defstruct [:field7931, :field7932, :field7933]
field :field7931, 1, optional: true, type: :fixed64
field :field7932, 2, optional: true, type: :int64
field :field7933, 3, optional: true, type: :bytes
end
defmodule Benchmarks.GoogleMessage3.Message7920 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field7934: integer,
field7935: integer
}
defstruct [:field7934, :field7935]
field :field7934, 1, optional: true, type: :int64
field :field7935, 2, optional: true, type: :int64
end
defmodule Benchmarks.GoogleMessage3.Message7921 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field7936: integer,
field7937: integer,
field7938: float | :infinity | :negative_infinity | :nan,
field7939: Benchmarks.GoogleMessage3.UnusedEnum.t()
}
defstruct [:field7936, :field7937, :field7938, :field7939]
field :field7936, 1, optional: true, type: :int32
field :field7937, 2, optional: true, type: :int64
field :field7938, 3, optional: true, type: :float
field :field7939, 4, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
end
defmodule Benchmarks.GoogleMessage3.Message8511 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field8539: Benchmarks.GoogleMessage3.Message8224.t() | nil,
field8540: String.t(),
field8541: boolean,
field8542: integer,
field8543: String.t()
}
defstruct [:field8539, :field8540, :field8541, :field8542, :field8543]
field :field8539, 1, optional: true, type: Benchmarks.GoogleMessage3.Message8224
field :field8540, 2, optional: true, type: :string
field :field8541, 3, optional: true, type: :bool
field :field8542, 4, optional: true, type: :int64
field :field8543, 5, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message8512 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field8544: Benchmarks.GoogleMessage3.Message8301.t() | nil,
field8545: Benchmarks.GoogleMessage3.Message8302.t() | nil,
field8546: String.t(),
field8547: boolean,
field8548: integer,
field8549: String.t()
}
defstruct [:field8544, :field8545, :field8546, :field8547, :field8548, :field8549]
field :field8544, 1, optional: true, type: Benchmarks.GoogleMessage3.Message8301
field :field8545, 2, optional: true, type: Benchmarks.GoogleMessage3.Message8302
field :field8546, 3, optional: true, type: :string
field :field8547, 4, optional: true, type: :bool
field :field8548, 5, optional: true, type: :int64
field :field8549, 6, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message8513 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field8550: [Benchmarks.GoogleMessage3.Message8392.t()],
field8551: String.t(),
field8552: boolean,
field8553: String.t()
}
defstruct [:field8550, :field8551, :field8552, :field8553]
field :field8550, 1, repeated: true, type: Benchmarks.GoogleMessage3.Message8392
field :field8551, 2, optional: true, type: :string
field :field8552, 3, optional: true, type: :bool
field :field8553, 4, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message8514 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field8554: String.t(),
field8555: integer,
field8556: boolean,
field8557: [Benchmarks.GoogleMessage3.Message8130.t()],
field8558: String.t()
}
defstruct [:field8554, :field8555, :field8556, :field8557, :field8558]
field :field8554, 1, optional: true, type: :string
field :field8555, 2, optional: true, type: :int64
field :field8556, 3, optional: true, type: :bool
field :field8557, 4, repeated: true, type: Benchmarks.GoogleMessage3.Message8130
field :field8558, 5, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message8515 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field8559: Benchmarks.GoogleMessage3.Message8479.t() | nil,
field8560: Benchmarks.GoogleMessage3.Message8478.t() | nil,
field8561: String.t()
}
defstruct [:field8559, :field8560, :field8561]
field :field8559, 1, optional: true, type: Benchmarks.GoogleMessage3.Message8479
field :field8560, 2, optional: true, type: Benchmarks.GoogleMessage3.Message8478
field :field8561, 3, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message10320 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field10347: Benchmarks.GoogleMessage3.Enum10335.t(),
field10348: [Benchmarks.GoogleMessage3.Message10319.t()],
field10349: integer,
field10350: integer,
field10351: integer,
field10352: integer,
field10353: Benchmarks.GoogleMessage3.Enum10337.t()
}
defstruct [
:field10347,
:field10348,
:field10349,
:field10350,
:field10351,
:field10352,
:field10353
]
field :field10347, 1, optional: true, type: Benchmarks.GoogleMessage3.Enum10335, enum: true
field :field10348, 2, repeated: true, type: Benchmarks.GoogleMessage3.Message10319
field :field10349, 3, optional: true, type: :int32
field :field10350, 4, optional: true, type: :int32
field :field10351, 5, optional: true, type: :int32
field :field10352, 6, optional: true, type: :int32
field :field10353, 7, optional: true, type: Benchmarks.GoogleMessage3.Enum10337, enum: true
end
defmodule Benchmarks.GoogleMessage3.Message10321 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field10354: integer,
field10355: integer,
field10356: non_neg_integer
}
defstruct [:field10354, :field10355, :field10356]
field :field10354, 1, optional: true, type: :int32
field :field10355, 2, optional: true, type: :int32
field :field10356, 3, optional: true, type: :uint64
end
defmodule Benchmarks.GoogleMessage3.Message10322 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field10357: Benchmarks.GoogleMessage3.Message4016.t() | nil,
field10358: boolean,
field10359: boolean
}
defstruct [:field10357, :field10358, :field10359]
field :field10357, 1, optional: true, type: Benchmarks.GoogleMessage3.Message4016
field :field10358, 2, optional: true, type: :bool
field :field10359, 3, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message11988 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field12021: String.t(),
field12022: String.t(),
field12023: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field12024: Benchmarks.GoogleMessage3.Message10155.t() | nil
}
defstruct [:field12021, :field12022, :field12023, :field12024]
field :field12021, 1, optional: true, type: :string
field :field12022, 2, optional: true, type: :string
field :field12023, 3, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field12024, 4, optional: true, type: Benchmarks.GoogleMessage3.Message10155
end
defmodule Benchmarks.GoogleMessage3.Message12668 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field12677: [Benchmarks.GoogleMessage3.Message12669.t()],
field12678: integer,
field12679: integer,
field12680: integer
}
defstruct [:field12677, :field12678, :field12679, :field12680]
field :field12677, 1, repeated: true, type: Benchmarks.GoogleMessage3.Message12669
field :field12678, 2, optional: true, type: :int32
field :field12679, 3, optional: true, type: :int32
field :field12680, 4, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message12825 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field12862: [Benchmarks.GoogleMessage3.Message12818.t()],
field12863: integer,
field12864: Benchmarks.GoogleMessage3.Message12819.t() | nil,
field12865: Benchmarks.GoogleMessage3.Message12820.t() | nil,
field12866: integer,
field12867: [Benchmarks.GoogleMessage3.Message12821.t()],
field12868: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()]
}
defstruct [
:field12862,
:field12863,
:field12864,
:field12865,
:field12866,
:field12867,
:field12868
]
field :field12862, 1, repeated: true, type: Benchmarks.GoogleMessage3.Message12818
field :field12863, 2, optional: true, type: :int32
field :field12864, 3, optional: true, type: Benchmarks.GoogleMessage3.Message12819
field :field12865, 4, optional: true, type: Benchmarks.GoogleMessage3.Message12820
field :field12866, 5, optional: true, type: :int32
field :field12867, 6, repeated: true, type: Benchmarks.GoogleMessage3.Message12821
field :field12868, 7, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message16478 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field16481: [Benchmarks.GoogleMessage3.Message16479.t()],
field16482: boolean,
field16483: integer
}
defstruct [:field16481, :field16482, :field16483]
field :field16481, 1, repeated: true, type: Benchmarks.GoogleMessage3.Message16479
field :field16482, 3, optional: true, type: :bool
field :field16483, 2, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message16552 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field16565: non_neg_integer,
field16566: integer,
field16567: Benchmarks.GoogleMessage3.Enum16553.t()
}
defstruct [:field16565, :field16566, :field16567]
field :field16565, 1, optional: true, type: :fixed64
field :field16566, 2, optional: true, type: :int32
field :field16567, 3, optional: true, type: Benchmarks.GoogleMessage3.Enum16553, enum: true
end
defmodule Benchmarks.GoogleMessage3.Message16660 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field16668: String.t(),
field16669: String.t(),
field16670: integer
}
defstruct [:field16668, :field16669, :field16670]
field :field16668, 1, optional: true, type: :string
field :field16669, 2, optional: true, type: :string
field :field16670, 3, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message16727 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field16782: Benchmarks.GoogleMessage3.Enum16728.t(),
field16783: String.t(),
field16784: String.t(),
field16785: integer,
field16786: String.t(),
field16787: String.t(),
field16788: String.t(),
field16789: Benchmarks.GoogleMessage3.Enum16732.t(),
field16790: String.t(),
field16791: String.t(),
field16792: String.t(),
field16793: Benchmarks.GoogleMessage3.Enum16738.t(),
field16794: integer,
field16795: [Benchmarks.GoogleMessage3.Message16722.t()],
field16796: boolean,
field16797: boolean,
field16798: String.t(),
field16799: integer,
field16800: boolean,
field16801: String.t(),
field16802: Benchmarks.GoogleMessage3.Enum16698.t(),
field16803: Benchmarks.GoogleMessage3.Message16724.t() | nil,
field16804: boolean,
field16805: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
__pb_extensions__: map
}
defstruct [
:field16782,
:field16783,
:field16784,
:field16785,
:field16786,
:field16787,
:field16788,
:field16789,
:field16790,
:field16791,
:field16792,
:field16793,
:field16794,
:field16795,
:field16796,
:field16797,
:field16798,
:field16799,
:field16800,
:field16801,
:field16802,
:field16803,
:field16804,
:field16805,
:__pb_extensions__
]
field :field16782, 1, required: true, type: Benchmarks.GoogleMessage3.Enum16728, enum: true
field :field16783, 2, required: true, type: :string
field :field16784, 3, optional: true, type: :string
field :field16785, 23, optional: true, type: :int32
field :field16786, 4, required: true, type: :string
field :field16787, 5, optional: true, type: :string
field :field16788, 6, optional: true, type: :string
field :field16789, 7, required: true, type: Benchmarks.GoogleMessage3.Enum16732, enum: true
field :field16790, 8, optional: true, type: :string
field :field16791, 9, optional: true, type: :string
field :field16792, 10, optional: true, type: :string
field :field16793, 11, optional: true, type: Benchmarks.GoogleMessage3.Enum16738, enum: true
field :field16794, 12, optional: true, type: :int32
field :field16795, 13, repeated: true, type: Benchmarks.GoogleMessage3.Message16722
field :field16796, 19, optional: true, type: :bool
field :field16797, 24, optional: true, type: :bool
field :field16798, 14, optional: true, type: :string
field :field16799, 15, optional: true, type: :int64
field :field16800, 16, optional: true, type: :bool
field :field16801, 17, optional: true, type: :string
field :field16802, 18, optional: true, type: Benchmarks.GoogleMessage3.Enum16698, enum: true
field :field16803, 20, optional: true, type: Benchmarks.GoogleMessage3.Message16724
field :field16804, 22, optional: true, type: :bool
field :field16805, 25, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
extensions [{1000, 536_870_912}]
end
defmodule Benchmarks.GoogleMessage3.Message16725 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field16774: Benchmarks.GoogleMessage3.Enum16728.t(),
field16775: [String.t()]
}
defstruct [:field16774, :field16775]
field :field16774, 1, optional: true, type: Benchmarks.GoogleMessage3.Enum16728, enum: true
field :field16775, 2, repeated: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message17726 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field17801: String.t(),
field17802: [String.t()],
field17803: String.t(),
field17804: [String.t()],
field17805: String.t(),
field17806: [String.t()],
field17807: String.t(),
field17808: String.t(),
field17809: [String.t()],
field17810: [String.t()],
field17811: [String.t()],
field17812: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17813: String.t(),
field17814: String.t(),
field17815: String.t(),
field17816: String.t(),
field17817: String.t(),
field17818: String.t(),
field17819: String.t(),
field17820: [Benchmarks.GoogleMessage3.Message17728.t()],
field17821: [Benchmarks.GoogleMessage3.Message17728.t()],
field17822: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()]
}
defstruct [
:field17801,
:field17802,
:field17803,
:field17804,
:field17805,
:field17806,
:field17807,
:field17808,
:field17809,
:field17810,
:field17811,
:field17812,
:field17813,
:field17814,
:field17815,
:field17816,
:field17817,
:field17818,
:field17819,
:field17820,
:field17821,
:field17822
]
field :field17801, 1, optional: true, type: :string
field :field17802, 2, repeated: true, type: :string
field :field17803, 3, optional: true, type: :string
field :field17804, 4, repeated: true, type: :string
field :field17805, 5, optional: true, type: :string
field :field17806, 6, repeated: true, type: :string
field :field17807, 7, optional: true, type: :string
field :field17808, 8, optional: true, type: :string
field :field17809, 15, repeated: true, type: :string
field :field17810, 16, repeated: true, type: :string
field :field17811, 17, repeated: true, type: :string
field :field17812, 18, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17813, 9, optional: true, type: :string
field :field17814, 10, optional: true, type: :string
field :field17815, 11, optional: true, type: :string
field :field17816, 12, optional: true, type: :string
field :field17817, 13, optional: true, type: :string
field :field17818, 14, optional: true, type: :string
field :field17819, 19, optional: true, type: :string
field :field17820, 20, repeated: true, type: Benchmarks.GoogleMessage3.Message17728
field :field17821, 21, repeated: true, type: Benchmarks.GoogleMessage3.Message17728
field :field17822, 30, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message17782 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field18153: String.t(),
field18154: String.t()
}
defstruct [:field18153, :field18154]
field :field18153, 1, optional: true, type: :string
field :field18154, 2, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message17783.Message17784 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field18162: String.t(),
field18163: String.t(),
field18164: String.t(),
field18165: [String.t()],
field18166: String.t(),
field18167: String.t()
}
defstruct [:field18162, :field18163, :field18164, :field18165, :field18166, :field18167]
field :field18162, 5, optional: true, type: :string
field :field18163, 6, optional: true, type: :string
field :field18164, 7, optional: true, type: :string
field :field18165, 8, repeated: true, type: :string
field :field18166, 17, optional: true, type: :string
field :field18167, 18, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message17783.Message17785 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field18168: String.t(),
field18169: String.t(),
field18170: Benchmarks.GoogleMessage3.Message17783.t() | nil,
field18171: String.t(),
field18172: String.t(),
field18173: [String.t()]
}
defstruct [:field18168, :field18169, :field18170, :field18171, :field18172, :field18173]
field :field18168, 10, optional: true, type: :string
field :field18169, 11, optional: true, type: :string
field :field18170, 12, optional: true, type: Benchmarks.GoogleMessage3.Message17783
field :field18171, 13, optional: true, type: :string
field :field18172, 14, optional: true, type: :string
field :field18173, 15, repeated: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message17783 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field18155: String.t(),
field18156: String.t(),
field18157: String.t(),
message17784: [any],
message17785: [any],
field18160: [String.t()]
}
defstruct [:field18155, :field18156, :field18157, :message17784, :message17785, :field18160]
field :field18155, 1, optional: true, type: :string
field :field18156, 2, optional: true, type: :string
field :field18157, 3, optional: true, type: :string
field :message17784, 4, repeated: true, type: :group
field :message17785, 9, repeated: true, type: :group
field :field18160, 16, repeated: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message16945 do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
field16946: String.t(),
field16947: String.t(),
field16948: String.t(),
field16949: String.t(),
field16950: String.t(),
field16951: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field16952: [Benchmarks.GoogleMessage3.Message0.t()],
field16953: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16954: [Benchmarks.GoogleMessage3.Message0.t()],
field16955: [String.t()],
field16956: [String.t()],
field16957: [String.t()],
field16958: [String.t()],
field16959: [String.t()],
field16960: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16961: [Benchmarks.GoogleMessage3.Message0.t()],
field16962: [Benchmarks.GoogleMessage3.Message0.t()],
field16963: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16964: [String.t()],
field16965: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16966: [String.t()],
field16967: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16968: [String.t()],
field16969: [Benchmarks.GoogleMessage3.Message0.t()],
field16970: [String.t()],
field16971: [String.t()],
field16972: [String.t()],
field16973: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16974: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16975: [String.t()],
field16976: [String.t()],
field16977: [Benchmarks.GoogleMessage3.Message0.t()],
field16978: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16979: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16980: [integer],
field16981: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16982: [String.t()],
field16983: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16984: [String.t()],
field16985: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16986: [String.t()],
field16987: [String.t()],
field16988: [String.t()],
field16989: String.t(),
field16990: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field16991: [String.t()],
field16992: [String.t()],
field16993: [String.t()],
field16994: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field16995: integer,
field16996: integer,
field16997: String.t(),
field16998: [String.t()],
field16999: [String.t()],
field17000: String.t(),
field17001: [String.t()],
field17002: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17003: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17004: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17005: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17006: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17007: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17008: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17009: Benchmarks.GoogleMessage3.UnusedEmptyMessage.t() | nil,
field17010: [Benchmarks.GoogleMessage3.Message0.t()],
field17011: [String.t()],
field17012: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17013: [String.t()],
field17014: [Benchmarks.GoogleMessage3.UnusedEmptyMessage.t()],
field17015: [Benchmarks.GoogleMessage3.Message0.t()],
field17016: [String.t()],
field17017: [String.t()],
field17018: [String.t()],
field17019: [String.t()],
field17020: [String.t()],
field17021: [String.t()],
field17022: [String.t()],
field17023: [Benchmarks.GoogleMessage3.Message0.t()],
field17024: [String.t()],
__pb_extensions__: map
}
defstruct [
:field16946,
:field16947,
:field16948,
:field16949,
:field16950,
:field16951,
:field16952,
:field16953,
:field16954,
:field16955,
:field16956,
:field16957,
:field16958,
:field16959,
:field16960,
:field16961,
:field16962,
:field16963,
:field16964,
:field16965,
:field16966,
:field16967,
:field16968,
:field16969,
:field16970,
:field16971,
:field16972,
:field16973,
:field16974,
:field16975,
:field16976,
:field16977,
:field16978,
:field16979,
:field16980,
:field16981,
:field16982,
:field16983,
:field16984,
:field16985,
:field16986,
:field16987,
:field16988,
:field16989,
:field16990,
:field16991,
:field16992,
:field16993,
:field16994,
:field16995,
:field16996,
:field16997,
:field16998,
:field16999,
:field17000,
:field17001,
:field17002,
:field17003,
:field17004,
:field17005,
:field17006,
:field17007,
:field17008,
:field17009,
:field17010,
:field17011,
:field17012,
:field17013,
:field17014,
:field17015,
:field17016,
:field17017,
:field17018,
:field17019,
:field17020,
:field17021,
:field17022,
:field17023,
:field17024,
:__pb_extensions__
]
field :field16946, 1, optional: true, type: :string
field :field16947, 2, optional: true, type: :string
field :field16948, 3, optional: true, type: :string
field :field16949, 4, optional: true, type: :string
field :field16950, 5, optional: true, type: :string
field :field16951, 872, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16952, 16, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field16953, 54, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16954, 55, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field16955, 58, repeated: true, type: :string
field :field16956, 59, repeated: true, type: :string
field :field16957, 62, repeated: true, type: :string
field :field16958, 37, repeated: true, type: :string
field :field16959, 18, repeated: true, type: :string
field :field16960, 38, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16961, 67, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field16962, 130, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field16963, 136, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16964, 138, repeated: true, type: :string
field :field16965, 156, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16966, 139, repeated: true, type: :string
field :field16967, 126, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16968, 152, repeated: true, type: :string
field :field16969, 183, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field16970, 168, repeated: true, type: :string
field :field16971, 212, repeated: true, type: :string
field :field16972, 213, repeated: true, type: :string
field :field16973, 189, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16974, 190, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16975, 191, repeated: true, type: :string
field :field16976, 192, repeated: true, type: :string
field :field16977, 193, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field16978, 194, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16979, 195, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16980, 196, repeated: true, type: :int32
field :field16981, 95, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16982, 96, repeated: true, type: :string
field :field16983, 97, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16984, 1086, repeated: true, type: :string
field :field16985, 98, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16986, 99, repeated: true, type: :string
field :field16987, 100, repeated: true, type: :string
field :field16988, 48, repeated: true, type: :string
field :field16989, 22, optional: true, type: :string
field :field16990, 51, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16991, 81, repeated: true, type: :string
field :field16992, 85, repeated: true, type: :string
field :field16993, 169, repeated: true, type: :string
field :field16994, 260, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field16995, 198, optional: true, type: :int32
field :field16996, 204, optional: true, type: :int32
field :field16997, 1087, optional: true, type: :string
field :field16998, 197, repeated: true, type: :string
field :field16999, 206, repeated: true, type: :string
field :field17000, 211, optional: true, type: :string
field :field17001, 205, repeated: true, type: :string
field :field17002, 68, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17003, 69, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17004, 70, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17005, 71, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17006, 72, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17007, 19, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17008, 24, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17009, 23, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17010, 131, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field17011, 133, repeated: true, type: :string
field :field17012, 142, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17013, 143, repeated: true, type: :string
field :field17014, 153, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field17015, 170, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field17016, 171, repeated: true, type: :string
field :field17017, 172, repeated: true, type: :string
field :field17018, 173, repeated: true, type: :string
field :field17019, 174, repeated: true, type: :string
field :field17020, 175, repeated: true, type: :string
field :field17021, 186, repeated: true, type: :string
field :field17022, 101, repeated: true, type: :string
field :field17023, 102, repeated: true, type: Benchmarks.GoogleMessage3.Message0
field :field17024, 274, repeated: true, type: :string
extensions [
{17, 18},
{21, 22},
{25, 26},
{27, 28},
{29, 30},
{30, 31},
{31, 32},
{32, 33},
{33, 34},
{34, 35},
{35, 36},
{36, 37},
{39, 40},
{40, 41},
{41, 42},
{42, 43},
{43, 44},
{44, 45},
{45, 46},
{46, 47},
{47, 48},
{49, 50},
{50, 51},
{52, 53},
{53, 54},
{56, 57},
{57, 58},
{60, 61},
{61, 62},
{63, 64},
{64, 65},
{65, 66},
{66, 67},
{73, 74},
{74, 75},
{75, 76},
{76, 77},
{77, 78},
{78, 79},
{79, 80},
{80, 81},
{82, 83},
{83, 84},
{84, 85},
{86, 87},
{87, 88},
{88, 89},
{89, 90},
{90, 91},
{91, 92},
{92, 93},
{93, 94},
{94, 95},
{103, 104},
{104, 105},
{105, 106},
{106, 107},
{107, 108},
{108, 109},
{109, 110},
{110, 111},
{111, 112},
{112, 113},
{113, 114},
{114, 115},
{115, 116},
{116, 117},
{117, 118},
{118, 119},
{119, 120},
{120, 121},
{121, 122},
{122, 123},
{123, 124},
{124, 125},
{125, 126},
{127, 128},
{128, 129},
{129, 130},
{132, 133},
{134, 135},
{135, 136},
{137, 138},
{140, 141},
{141, 142},
{144, 145},
{145, 146},
{146, 147},
{147, 148},
{148, 149},
{149, 150},
{150, 151},
{151, 152},
{154, 155},
{155, 156},
{157, 158},
{158, 159},
{159, 160},
{160, 161},
{161, 162},
{162, 163},
{163, 164},
{164, 165},
{165, 166},
{166, 167},
{167, 168},
{176, 177},
{177, 178},
{178, 179},
{179, 180},
{180, 181},
{181, 182},
{182, 183},
{184, 185},
{185, 186},
{187, 188},
{188, 189},
{199, 200},
{200, 201},
{201, 202},
{202, 203},
{203, 204},
{207, 208},
{208, 209},
{209, 210},
{210, 211},
{214, 215},
{215, 216},
{216, 217},
{217, 218},
{218, 219},
{219, 220},
{220, 221},
{221, 222},
{222, 223},
{223, 224},
{224, 225},
{225, 226},
{226, 227},
{227, 228},
{228, 229},
{229, 230},
{230, 231},
{231, 232},
{232, 233},
{233, 234},
{234, 235},
{235, 236},
{236, 237},
{237, 238},
{238, 239},
{239, 240},
{240, 241},
{241, 242},
{242, 243},
{243, 244},
{244, 245},
{245, 246},
{246, 247},
{247, 248},
{248, 249},
{249, 250},
{250, 251},
{251, 252},
{252, 253},
{253, 254},
{254, 255},
{255, 256},
{256, 257},
{257, 258},
{258, 259},
{259, 260},
{261, 262},
{262, 263},
{263, 264},
{264, 265},
{265, 266},
{266, 267},
{267, 268},
{268, 269},
{269, 270},
{270, 271},
{271, 272},
{272, 273},
{273, 274},
{275, 276},
{276, 277},
{277, 278},
{278, 279},
{279, 280},
{280, 281},
{281, 282},
{282, 283},
{283, 284},
{284, 285},
{285, 286},
{286, 287},
{290, 291},
{291, 292},
{292, 293},
{293, 294},
{294, 295},
{295, 296},
{296, 297},
{297, 298},
{298, 299},
{299, 300},
{300, 301},
{301, 302},
{302, 303},
{303, 304},
{304, 305},
{305, 306},
{306, 307},
{307, 308},
{308, 309},
{309, 310},
{310, 311},
{311, 312},
{312, 313},
{313, 314},
{314, 315},
{315, 316},
{316, 317},
{317, 318},
{318, 319},
{319, 320},
{320, 321},
{321, 322},
{322, 323},
{323, 324},
{324, 325},
{325, 326},
{326, 327},
{327, 328},
{328, 329},
{329, 330},
{330, 331},
{331, 332},
{332, 333},
{333, 334},
{334, 335},
{335, 336},
{336, 337},
{337, 338},
{338, 339},
{339, 340},
{340, 341},
{341, 342},
{342, 343},
{343, 344},
{344, 345},
{345, 346},
{346, 347},
{347, 348},
{348, 349},
{349, 350},
{350, 351},
{351, 352},
{352, 353},
{353, 354},
{354, 355},
{355, 356},
{356, 357},
{357, 358},
{358, 359},
{359, 360},
{360, 361},
{361, 362},
{362, 363},
{363, 364},
{364, 365},
{365, 366},
{366, 367},
{367, 368},
{368, 369},
{369, 370},
{370, 371},
{371, 372},
{372, 373},
{373, 374},
{374, 375},
{375, 376},
{376, 377},
{377, 378},
{378, 379},
{379, 380},
{380, 381},
{381, 382},
{382, 383},
{383, 384},
{384, 385},
{385, 386},
{386, 387},
{387, 388},
{388, 389},
{389, 390},
{390, 391},
{391, 392},
{392, 393},
{393, 394},
{394, 395},
{395, 396},
{396, 397},
{397, 398},
{398, 399},
{399, 400},
{400, 401},
{401, 402},
{402, 403},
{403, 404},
{404, 405},
{405, 406},
{406, 407},
{407, 408},
{408, 409},
{409, 410},
{410, 411},
{411, 412},
{412, 413},
{413, 414},
{414, 415},
{415, 416},
{416, 417},
{417, 418},
{418, 419},
{419, 420},
{420, 421},
{421, 422},
{422, 423},
{423, 424},
{424, 425},
{425, 426},
{426, 427},
{427, 428},
{428, 429},
{429, 430},
{430, 431},
{431, 432},
{432, 433},
{433, 434},
{434, 435},
{435, 436},
{436, 437},
{437, 438},
{438, 439},
{439, 440},
{440, 441},
{441, 442},
{442, 443},
{443, 444},
{444, 445},
{445, 446},
{446, 447},
{447, 448},
{448, 449},
{449, 450},
{450, 451},
{451, 452},
{452, 453},
{453, 454},
{454, 455},
{455, 456},
{456, 457},
{457, 458},
{458, 459},
{459, 460},
{460, 461},
{461, 462},
{462, 463},
{463, 464},
{464, 465},
{465, 466},
{466, 467},
{467, 468},
{468, 469},
{469, 470},
{470, 471},
{471, 472},
{472, 473},
{473, 474},
{474, 475},
{509, 510},
{511, 512},
{512, 513},
{513, 514},
{514, 515},
{515, 516},
{516, 517},
{517, 518},
{518, 519},
{519, 520},
{520, 521},
{521, 522},
{522, 523},
{523, 524},
{524, 525},
{525, 526},
{526, 527},
{527, 528},
{528, 529},
{529, 530},
{530, 531},
{531, 532},
{532, 533},
{533, 534},
{534, 535},
{535, 536},
{536, 537},
{537, 538},
{538, 539},
{539, 540},
{540, 541},
{541, 542},
{542, 543},
{543, 544},
{544, 545},
{545, 546},
{546, 547},
{547, 548},
{548, 549},
{549, 550},
{550, 551},
{551, 552},
{552, 553},
{553, 554},
{554, 555},
{555, 556},
{556, 557},
{557, 558},
{558, 559},
{559, 560},
{560, 561},
{561, 562},
{562, 563},
{563, 564},
{564, 565},
{565, 566},
{566, 567},
{567, 568},
{568, 569},
{569, 570},
{570, 571},
{571, 572},
{572, 573},
{573, 574},
{574, 575},
{575, 576},
{576, 577},
{577, 578},
{578, 579},
{579, 580},
{580, 581},
{581, 582},
{582, 583},
{583, 584},
{584, 585},
{585, 586},
{586, 587},
{587, 588},
{588, 589},
{589, 590},
{590, 591},
{604, 605},
{605, 606},
{606, 607},
{607, 608},
{608, 609},
{609, 610},
{610, 611},
{611, 612},
{612, 613},
{613, 614},
{614, 615},
{615, 616},
{616, 617},
{617, 618},
{618, 619},
{619, 620},
{620, 621},
{621, 622},
{622, 623},
{623, 624},
{624, 625},
{625, 626},
{626, 627},
{627, 628},
{628, 629},
{629, 630},
{813, 814},
{814, 815},
{815, 816},
{816, 817},
{817, 818},
{818, 819},
{819, 820},
{820, 821},
{821, 822},
{822, 823},
{823, 824},
{824, 825},
{827, 828},
{828, 829},
{829, 830},
{830, 831},
{831, 832},
{832, 833},
{833, 834},
{834, 835},
{835, 836},
{836, 837},
{837, 838},
{838, 839},
{839, 840},
{840, 841},
{841, 842},
{842, 843},
{843, 844},
{844, 845},
{845, 846},
{846, 847},
{847, 848},
{848, 849},
{849, 850},
{850, 851},
{851, 852},
{852, 853},
{853, 854},
{854, 855},
{855, 856},
{856, 857},
{857, 858},
{858, 859},
{859, 860},
{860, 861},
{861, 862},
{862, 863},
{863, 864},
{864, 865},
{865, 866},
{866, 867},
{867, 868},
{868, 869},
{869, 870},
{870, 871},
{871, 872},
{880, 881},
{881, 882},
{882, 883},
{883, 884},
{884, 885},
{885, 886},
{886, 887},
{887, 888},
{888, 889},
{890, 891},
{891, 892},
{892, 893},
{912, 913},
{914, 915},
{915, 916},
{916, 917},
{917, 918},
{918, 919},
{919, 920},
{920, 921},
{921, 922},
{922, 923},
{923, 924},
{924, 925},
{925, 926},
{926, 927},
{927, 928},
{928, 929},
{929, 930},
{930, 931},
{931, 932},
{932, 933},
{933, 934},
{934, 935},
{935, 936},
{936, 937},
{937, 938},
{938, 939},
{939, 940},
{940, 941},
{941, 942},
{942, 943},
{943, 944},
{944, 945},
{945, 946},
{946, 947},
{947, 948},
{949, 950},
{950, 951},
{951, 952},
{952, 953},
{954, 955},
{955, 956},
{956, 957},
{957, 958},
{958, 959},
{959, 960},
{960, 961},
{961, 962},
{962, 963},
{963, 964},
{964, 965},
{965, 966},
{966, 967},
{967, 968},
{968, 969},
{969, 970},
{970, 971},
{971, 972},
{972, 973},
{973, 974},
{974, 975},
{975, 976},
{976, 977},
{977, 978},
{978, 979},
{979, 980},
{980, 981},
{981, 982},
{982, 983},
{983, 984},
{984, 985},
{985, 986},
{987, 988},
{988, 989},
{1000, 1001},
{1001, 1002},
{1002, 1003},
{1003, 1004},
{1004, 1005},
{1005, 1006},
{1006, 1007},
{1007, 1008},
{1008, 1009},
{1009, 1010},
{1010, 1011},
{1011, 1012},
{1012, 1013},
{1013, 1014},
{1014, 1015},
{1015, 1016},
{1016, 1017},
{1017, 1018},
{1018, 1019},
{1019, 1020},
{1020, 1021},
{1021, 1022},
{1022, 1023},
{1023, 1024},
{1024, 1025},
{1025, 1026},
{1026, 1027},
{1027, 1028},
{1028, 1029},
{1029, 1030},
{1030, 1031},
{1031, 1032},
{1032, 1033},
{1033, 1034},
{1034, 1035},
{1035, 1036},
{1036, 1037},
{1037, 1038},
{1038, 1039},
{1039, 1040},
{1040, 1041},
{1041, 1042},
{1042, 1043},
{1043, 1044},
{1044, 1045},
{1045, 1046},
{1046, 1047},
{1047, 1048},
{1048, 1049},
{1049, 1050},
{1050, 1051},
{1051, 1052},
{1052, 1053},
{1053, 1054},
{1054, 1055},
{1055, 1056},
{1056, 1057},
{1057, 1058},
{1058, 1059},
{1079, 1080},
{1080, 1081},
{1081, 1082},
{1082, 1083},
{1083, 1084},
{1084, 1085},
{1085, 1086}
]
end
defmodule Benchmarks.GoogleMessage3.PbExtension do
@moduledoc false
use Protobuf, syntax: :proto2
extend Benchmarks.GoogleMessage3.Message0, :"Message34390.field34453", 92_144_610,
optional: true,
type: Benchmarks.GoogleMessage3.Message34390
extend Benchmarks.GoogleMessage3.Message0, :"Message34624.field34685", 18_178_548,
optional: true,
type: Benchmarks.GoogleMessage3.Message34624
extend Benchmarks.GoogleMessage3.Message0, :"Message34791.field34807", 6_330_340,
optional: true,
type: Benchmarks.GoogleMessage3.Message34791
extend Benchmarks.GoogleMessage3.Message0, :"Message35483.field35505", 7_913_554,
optional: true,
type: Benchmarks.GoogleMessage3.Message35483
extend Benchmarks.GoogleMessage3.Message0, :"Message35807.field35818", 3_803_299,
optional: true,
type: Benchmarks.GoogleMessage3.Message35807
extend Benchmarks.GoogleMessage3.Message0, :"Message16945.field17025", 22_068_132,
optional: true,
type: Benchmarks.GoogleMessage3.Message16945
end
|
bench/lib/datasets/google_message3/benchmark_message3_1.pb.ex
| 0.879121 | 0.458894 |
benchmark_message3_1.pb.ex
|
starcoder
|
defmodule Eml.Element do
@moduledoc """
`Eml.Element` defines the struct that represents an element in Eml.
In practice, you will mostly use the element macro's instead of
directly creating `Eml.Element` structs, but the functions in this
module can be valuable when querying, manipulating or transforming
`eml`.
"""
alias __MODULE__, as: El
defstruct tag: :div, attrs: %{}, content: nil, template: nil, type: :primitive
@type attr_name :: atom
@type attr_value :: Eml.t
@type attrs :: %{ attr_name => attr_value }
@type template_fn :: ((Dict.t) -> { :safe, String.t } | Macro.t)
@type element_type :: :primitive | :fragment | :component
@type t :: %El{tag: atom, content: Eml.t, attrs: attrs, template: template_fn, type: element_type}
@doc """
Assign a template function to an element
Setting the element type is purely informative and has no effect on
compilation.
"""
@spec put_template(t, template_fn, element_type) :: t
def put_template(%El{} = el, fun, type \\ :fragment) do
%El{el| template: fun, type: type}
end
@doc """
Removes a template function from an element
"""
@spec remove_template(t) :: t
def remove_template(%El{} = el) do
%El{el| template: nil, type: :primitive}
end
@doc """
Calls the template function of an element with its attributes and
content as argument.
Raises an `Eml.CompileError` when no template function is present.
### Example
iex> use Eml
nil
iex> use Eml.HTML
nil
iex> defmodule ElTest do
...>
...> fragment my_list do
...> ul class: @class do
...> quote do
...> for item <- @__CONTENT__ do
...> li do
...> end
...> end
...> end
...> end
...> end
...>
...> end
{:module, ElTest, ...}
iex> import ElTest
nil
iex> el = my_list class: "some-class" do
...> span 1
...> span 2
...> end
#my_list<%{class: "some-class"} [#span<[1]>, #span<[2]>]>
iex> Eml.Element.apply_template(el)
[{:safe, "<ul class='some-class'><li><span>* </span><span>1</span><span> *</span></li><li><span>* </span><span>2</span><span> *</span></li></ul>"}]
"""
@spec apply_template(t) :: { :safe, String.t } | Macro.t
def apply_template(%El{attrs: attrs, content: content, template: fun}) when is_function(fun) do
assigns = Map.put(attrs, :__CONTENT__, content)
fun.(assigns)
end
def apply_template(badarg) do
raise Eml.CompileError, message: "Bad template element: #{inspect badarg}"
end
end
# Enumerable protocol implementation
defimpl Enumerable, for: Eml.Element do
def count(_el), do: { :error, __MODULE__ }
def member?(_el, _), do: { :error, __MODULE__ }
def slice(_el), do: { :error, __MODULE__ }
def reduce(el, acc, fun) do
case reduce_content([el], acc, fun) do
{ :cont, acc } -> { :done, acc }
{ :suspend, acc } -> { :suspended, acc }
{ :halt, acc } -> { :halted, acc }
end
end
defp reduce_content(_, { :halt, acc }, _fun) do
{ :halt, acc }
end
defp reduce_content(content, { :suspend, acc }, fun) do
{ :suspend, acc, &reduce_content(content, &1, fun) }
end
defp reduce_content([%Eml.Element{content: content} = el | rest], { :cont, acc }, fun) do
reduce_content(rest, reduce_content(content, fun.(el, acc), fun), fun)
end
defp reduce_content([node | rest], { :cont, acc }, fun) do
reduce_content(rest, fun.(node, acc), fun)
end
defp reduce_content(nil, acc, _fun) do
acc
end
defp reduce_content([], acc, _fun) do
acc
end
defp reduce_content(node, { :cont, acc }, fun) do
fun.(node, acc)
end
end
# Inspect protocol implementation
defimpl Inspect, for: Eml.Element do
import Inspect.Algebra
def inspect(%Eml.Element{tag: tag, attrs: attrs, content: content}, opts) do
opts = if is_list(opts), do: Keyword.put(opts, :hide_content_type, true), else: opts
tag = Atom.to_string(tag)
attrs = if attrs == %{}, do: "", else: to_doc(attrs, opts)
content = if content in [nil, "", []], do: "", else: to_doc(content, opts)
fields = case { attrs, content } do
{ "", "" } -> ""
{ "", _ } -> content
{ _, "" } -> attrs
{ _, _ } -> glue(attrs, " ", content)
end
concat ["#", tag, "<", fields, ">"]
end
end
|
lib/eml/element.ex
| 0.778691 | 0.542136 |
element.ex
|
starcoder
|
defmodule Ymlr do
@moduledoc """
Encodes data into YAML documents using the `Ymlr.Encoder`.
Every document starts with a separator ("---") and can be enhanced with comments.
"""
alias Ymlr.Encoder
@type document :: Encoder.data | {binary(), Encoder.data} | {[binary()], Encoder.data}
@doc """
Encodes a given data as YAML document with a separator ("---") at the beginning. Raises if it cannot be encoded.
Optinally you can pass a tuple with comment(s) and data as first argument.
## Examples
iex> Ymlr.document!(%{a: 1})
"---\\na: 1\\n"
iex> Ymlr.document!({"comment", %{a: 1}})
"---\\n# comment\\na: 1\\n"
iex> Ymlr.document!({["comment 1", "comment 2"], %{a: 1}})
"---\\n# comment 1\\n# comment 2\\na: 1\\n"
iex> Ymlr.document!({[], {"a", "b"}})
** (ArgumentError) The given data {\"a\", \"b\"} cannot be converted to YAML.
"""
@spec document!(document) :: binary()
def document!(document)
def document!({lines, data}) when is_list(lines) do
comments = lines
|> Enum.map(&("# #{&1}\n"))
|> Enum.join("")
"---\n" <> comments <> Encoder.to_s!(data) <> "\n"
end
def document!({comment, data}), do: document!({[comment], data})
def document!(data) do
document!({[], data})
end
@doc """
Encodes a given data as YAML document with a separator ("---") at the beginning.
Optinally you can pass a tuple with comment(s) and data as first argument.
## Examples
iex> Ymlr.document(%{a: 1})
{:ok, "---\\na: 1\\n"}
iex> Ymlr.document({"comment", %{a: 1}})
{:ok, "---\\n# comment\\na: 1\\n"}
iex> Ymlr.document({["comment 1", "comment 2"], %{a: 1}})
{:ok, "---\\n# comment 1\\n# comment 2\\na: 1\\n"}
iex> Ymlr.document({[], {"a", "b"}})
{:error, "The given data {\\"a\\", \\"b\\"} cannot be converted to YAML."}
"""
@spec document(document) :: {:ok, binary()} | {:error, binary()}
def document(document) do
yml = document!(document)
{:ok, yml}
rescue
e in ArgumentError -> {:error, e.message}
end
@doc """
Encodes a given list of data as "---" separated YAML documents. Raises if it cannot be encoded.
## Examples
iex> Ymlr.documents!([%{a: 1}])
"---\\na: 1\\n"
iex> Ymlr.documents!([%{a: 1}, %{b: 2}])
"---\\na: 1\\n\\n---\\nb: 2\\n"
iex> Ymlr.documents!([{[], {"a", "b"}}])
** (ArgumentError) The given data {\"a\", \"b\"} cannot be converted to YAML.
iex> Ymlr.documents!(%{a: "a"})
** (ArgumentError) The given argument is not a list of documents. Use document/1, document/2, document!/1 or document!/2 for a single document.
"""
def documents!(documents) when is_list(documents) do
documents
|> Enum.map(&document!/1)
|> Enum.join("\n")
end
def documents!(_documents), do:
raise(ArgumentError, "The given argument is not a list of documents. Use document/1, document/2, document!/1 or document!/2 for a single document.")
@doc """
Encodes a given list of data as "---" separated YAML documents.
## Examples
iex> Ymlr.documents([%{a: 1}])
{:ok, "---\\na: 1\\n"}
iex> Ymlr.documents([%{a: 1}, %{b: 2}])
{:ok, "---\\na: 1\\n\\n---\\nb: 2\\n"}
iex> Ymlr.documents([{[], {"a", "b"}}])
{:error, "The given data {\\"a\\", \\"b\\"} cannot be converted to YAML."}
iex> Ymlr.documents(%{a: "a"})
{:error, "The given argument is not a list of documents. Use document/1, document/2, document!/1 or document!/2 for a single document."}
"""
@spec documents([document]) :: {:ok, binary()} | {:error, binary()}
def documents(documents) do
yml = documents!(documents)
{:ok, yml}
rescue
e in ArgumentError ->
{:error, e.message}
end
end
|
lib/ymlr.ex
| 0.865622 | 0.573977 |
ymlr.ex
|
starcoder
|
defmodule OMG.JSONRPC.ExposeSpec do
@moduledoc """
`use OMG.JSONRPC.ExposeSpec` to expose all @spec in the runtime via YourModule.get_specs()
NOTE: this is a stripped down version of ExposeSpec. The original one parsed `@spec` annotations automatically
This version requires to give and maintain `@expose_spec` annotations for every exposed function.
`@expose_spec` annotations follow the following convention:
```
@spec get_block(hash :: bitstring) ::
{:ok, %{hash: bitstring, transactions: list, number: integer}} | {:error, :not_found | :internal_error}
@expose_spec {:get_block,
%{
args: [hash: :bitstring],
arity: 1,
name: :get_block,
returns:
{:alternative,
[
ok: {:map, [hash: :bitstring, transactions: :list, number: :integer]},
error: {:alternative, [:not_found, :internal_error]}
]}
}}
```
The reason to strip down was achieving quick compatibility with Elixir 1.7, where `Module.get_attribute(module, :spec)`
[doesn't work anymore](https://elixirforum.com/t/since-elixir-1-7-module-get-attributes-module-spec-returns-nil/15808)
and git blame for the original version.
"""
@typedoc """
Describes function: it's name, arity, list of arguments and their types, return type.
"""
@type spec :: %{name: atom(), arity: arity(), args: [{atom(), type()}], returns: type()}
@typedoc """
Describes Elixir type. For details see https://hexdocs.pm/elixir/typespecs.html
Note that tuple() denotes tuple of any size where all elements are of type type()
"""
@type type() :: atom | tuple() | {:alternatives, [type()]}
# Sanity check since functions of the same name
# but different arity are not yet handled.
defp arity_sanity_check(list) do
names = for {name, _} <- list, do: name
testresult = length(Enum.uniq(names)) != length(names)
if testresult, do: :problem_with_arity, else: :ok
end
defmacro __using__(_opts) do
quote do
import OMG.JSONRPC.ExposeSpec
Module.register_attribute(__MODULE__, :expose_spec, accumulate: true)
@before_compile OMG.JSONRPC.ExposeSpec
end
end
defmacro __before_compile__(env) do
module = env.module
nice_spec =
module
|> Module.get_attribute(:expose_spec)
:ok = arity_sanity_check(nice_spec)
escaped = Macro.escape(Map.new(nice_spec))
quote do
def get_specs, do: unquote(escaped)
end
end
end
|
apps/omg_jsonrpc/lib/expose_spec.ex
| 0.849472 | 0.803829 |
expose_spec.ex
|
starcoder
|
defmodule Ecto.Query.SelectBuilder do
@moduledoc false
alias Ecto.Query.BuilderUtil
@doc """
Escapes a select.
It allows tuples, lists and variables at the top level or a
single `assoc(x, y)` expression.
## Examples
iex> escape({ 1, 2 }, [])
{ :{}, [], [ :{}, [], [1, 2] ] }
iex> escape([ 1, 2 ], [])
[1, 2]
iex> escape(quote(do: x), [x: 0])
{ :{}, [], [:&, [], [0]] }
"""
@spec escape(Macro.t, Keyword.t) :: Macro.t
def escape({ :assoc, _, args } = assoc, vars) when is_list(args) do
escape_assoc(assoc, vars)
end
def escape(other, vars), do: do_escape(other, vars)
# Tuple
defp do_escape({ left, right }, vars) do
do_escape({ :{}, [], [left, right] }, vars)
end
# Tuple
defp do_escape({ :{}, _, list }, vars) do
list = Enum.map(list, &do_escape(&1, vars))
{ :{}, [], [:{}, [], list] }
end
# List
defp do_escape(list, vars) when is_list(list) do
Enum.map(list, &do_escape(&1, vars))
end
# var - where var is bound
defp do_escape({ var, _, context}, vars) when is_atom(var) and is_atom(context) do
BuilderUtil.escape_var(var, vars)
end
defp do_escape(other, vars) do
BuilderUtil.escape(other, vars)
end
# assoc/2
defp escape_assoc({ :assoc, _, [{ var, _, context }, list] }, vars)
when is_atom(var) and is_atom(context) and is_list(list) do
var = BuilderUtil.escape_var(var, vars)
list = Enum.map(list, fn
{ field, { assoc_var, _, assoc_ctxt } }
when is_atom(field) and is_atom(assoc_var) and is_atom(assoc_ctxt) ->
{ field, BuilderUtil.escape_var(assoc_var, vars) }
{ field, other } when is_atom(field) ->
{ field, escape_assoc(other, vars) }
other ->
escape_assoc(other, vars)
end)
{ :{}, [], [:assoc, [], [var, list]] }
end
defp escape_assoc(other, _vars) do
raise Ecto.QueryError,
reason: "`#{Macro.to_string(other)}` is not a valid expression inside `assoc/2` selector"
end
@doc """
Builds a quoted expression.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t
def build(query, binding, expr, env) do
binding = BuilderUtil.escape_binding(binding)
expr = escape(expr, binding)
select = quote do: Ecto.Query.QueryExpr[expr: unquote(expr),
file: unquote(env.file), line: unquote(env.line)]
BuilderUtil.apply_query(query, __MODULE__, [select], env)
end
@doc """
The callback applied by `build/4` to build the query.
"""
@spec apply(Ecto.Queryable.t, term) :: Ecto.Query.Query.t
def apply(query, select) do
Ecto.Query.Query[] = query = Ecto.Queryable.to_query(query)
if query.select do
raise Ecto.QueryError, reason: "only one select expression is allowed in query"
else
query.select(select)
end
end
end
|
lib/ecto/query/select_builder.ex
| 0.831383 | 0.458046 |
select_builder.ex
|
starcoder
|
defmodule HelloFinance.Currency do
@required_keys [:code, :value]
@enforce_keys @required_keys
defstruct @required_keys
@codes [
:AED,
:AFN,
:ALL,
:AMD,
:ANG,
:AOA,
:ARS,
:AUD,
:AWG,
:AZN,
:BAM,
:BBD,
:BDT,
:BGN,
:BHD,
:BIF,
:BMD,
:BND,
:BOB,
:BOV,
:BRL,
:BSD,
:BTN,
:BWP,
:BYR,
:BZD,
:CAD,
:CDF,
:CHE,
:CHF,
:CHW,
:CLF,
:CLP,
:CNY,
:COP,
:COU,
:CRC,
:CUC,
:CUP,
:CVE,
:CZK,
:DJF,
:DKK,
:DOP,
:DZD,
:ECS,
:EGP,
:ERN,
:ETB,
:EUR,
:FJD,
:FKP,
:GBP,
:GEL,
:GHS,
:GIP,
:GMD,
:GNF,
:GTQ,
:GYD,
:HKD,
:HNL,
:HRK,
:HTG,
:HUF,
:IDR,
:ILS,
:IMP,
:INR,
:IQD,
:IRR,
:ISK,
:JMD,
:JOD,
:JPY,
:KES,
:KGS,
:KHR,
:KMF,
:KPW,
:KRW,
:KWD,
:KYD,
:KZT,
:LAK,
:LBP,
:LKR,
:LRD,
:LSL,
:LTL,
:LVL,
:LYD,
:MAD,
:MDL,
# :MGA,
:MKD,
:MMK,
:MNT,
:MOP,
# :MRO,
:MUR,
:MVR,
:MWK,
:MXN,
:MXV,
:MYR,
:MZN,
:NAD,
:NGN,
:NIO,
:NOK,
:NPR,
:NZD,
:OMR,
:PAB,
:PEN,
:PGK,
:PHP,
:PKR,
:PLN,
:PYG,
:QAR,
:RON,
:RSD,
:RUB,
:RWF,
:SAR,
:SBD,
:SCR,
:SDG,
:SEK,
:SGD,
:SHP,
:SLL,
:SOS,
:SRD,
:STN,
:SVC,
:SYP,
:SZL,
:THB,
:TJS,
:TMT,
:TND,
:TOP,
:TRY,
:TTD,
:TWD,
:TZS,
:UAH,
:UGX,
:USD,
:USN,
:USS,
# :UYI,
:UYU,
:UZS,
:VES,
:VND,
:VUV,
:WST,
:XAF,
# :XAG,
# :XAU,
# :XBA,
# :XBB,
# :XBC,
# :XBD,
:XCD,
# :XDR,
# :XFU,
:XOF,
# :XPD,
:XPF,
# :XPT,
# :XTS,
# :XXX,
:YER,
:ZAR,
:ZMW,
:ZWL
]
def build(code, value) do
%__MODULE__{
code: code,
value: value
}
|> validate_code()
|> validate_value()
|> apply_status()
end
defp validate_code(%{code: code} = currency) when is_atom(code), do: code_exists(currency, code)
defp validate_code(%{code: code, value: value}) when is_binary(code) do
atom_code = String.to_atom(code)
currency = %__MODULE__{code: atom_code, value: value}
code_exists(currency, atom_code)
end
defp code_exists(currency, code) do
case Enum.member?(@codes, code) do
true -> currency
false -> {:error, [:code, "not found"]}
end
end
defp validate_value({:error, _message} = error), do: error
defp validate_value(%{value: value}) when not is_integer(value),
do: {:error, [:value, "should be an integer"]}
defp validate_value(%{value: value}) when value < 0,
do: {:error, [:value, "should be positive"]}
defp validate_value(currency), do: currency
defp apply_status({:error, _message} = error), do: error
defp apply_status(struct), do: {:ok, struct}
end
|
lib/hello_finance/currency.ex
| 0.617743 | 0.585871 |
currency.ex
|
starcoder
|
if Code.ensure_loaded?(Ecto) do
defmodule LoggerJSON.Ecto do
@moduledoc """
Implements the behaviour of `Ecto.LogEntry` and sends query as a string
to Logger with additional metadata:
* query.execution_time_μs - the time spent executing the query in microseconds;
* query.decode_time_μs - the time spent decoding the result in microseconds (it may be 0);
* query.queue_time_μs - the time spent to check the connection out in microseconds (it may be 0);
* query.duration_μs - time the query taken (sum of `query_time`, `decode_time` and `queue_time`);
* connection_pid - the connection process that executed the query;
* ansi_color - the color that should be used when logging the entry.
For more information see [LogEntry](https://github.com/elixir-ecto/ecto/blob/master/lib/ecto/log_entry.ex)
source code.
"""
require Logger
@doc """
Logs query string with metadata from `Ecto.LogEntry` in with debug level.
"""
@spec log(entry :: Ecto.LogEntry.t()) :: Ecto.LogEntry.t()
def log(entry) do
{query, metadata} = query_and_metadata(entry)
# The logger call will be removed at compile time if
# `compile_time_purge_level` is set to higher than debug.
Logger.debug(query, metadata)
entry
end
@doc """
Overwritten to use JSON.
Logs the given entry in the given level.
"""
@spec log(entry :: Ecto.LogEntry.t(), level :: Logger.level()) :: Ecto.LogEntry.t()
def log(entry, level) do
{query, metadata} = query_and_metadata(entry)
# The logger call will not be removed at compile time,
# because we use level as a variable
Logger.log(level, query, metadata)
entry
end
defp query_and_metadata(%{
query: query,
query_time: query_time,
decode_time: decode_time,
queue_time: queue_time
}) do
query_time = format_time(query_time)
decode_time = format_time(decode_time)
queue_time = format_time(queue_time)
metadata = [
query: %{
execution_time_μs: query_time,
decode_time_μs: decode_time,
queue_time_μs: queue_time,
latency_μs: query_time + decode_time + queue_time
}
]
{query, metadata}
end
defp format_time(nil), do: 0
defp format_time(time), do: System.convert_time_unit(time, :native, :microsecond)
end
end
|
lib/logger_json/ecto.ex
| 0.845942 | 0.466967 |
ecto.ex
|
starcoder
|
defmodule AWS.SESv2 do
@moduledoc """
Amazon SES API v2
Welcome to the Amazon SES API v2 Reference.
This guide provides information about the Amazon SES API v2, including supported
operations, data types, parameters, and schemas.
[Amazon SES](https://aws.amazon.com/pinpoint) is an AWS service that you can use to send email messages to your customers.
If you're new to Amazon SES API v2, you might find it helpful to also review the
[Amazon Simple Email Service Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/). The *Amazon SES
Developer Guide* provides information and code samples that demonstrate how to
use Amazon SES API v2 features programmatically.
The Amazon SES API v2 is available in several AWS Regions and it provides an
endpoint for each of these Regions. For a list of all the Regions and endpoints
where the API is currently available, see [AWS Service Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region)
in the *Amazon Web Services General Reference*. To learn more about AWS Regions,
see [Managing AWS Regions](https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) in the
*Amazon Web Services General Reference*.
In each Region, AWS maintains multiple Availability Zones. These Availability
Zones are physically isolated from each other, but are united by private,
low-latency, high-throughput, and highly redundant network connections. These
Availability Zones enable us to provide very high levels of availability and
redundancy, while also minimizing latency. To learn more about the number of
Availability Zones that are available in each Region, see [AWS Global Infrastructure](http://aws.amazon.com/about-aws/global-infrastructure/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2019-09-27",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "email",
global?: false,
protocol: "rest-json",
service_id: "SESv2",
signature_version: "v4",
signing_name: "ses",
target_prefix: nil
}
end
@doc """
Create a configuration set.
*Configuration sets* are groups of rules that you can apply to the emails that
you send. You apply a configuration set to an email by specifying the name of
the configuration set when you call the Amazon SES API v2. When you apply a
configuration set to an email, all of the rules in that configuration set are
applied to the email.
"""
def create_configuration_set(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/configuration-sets"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Create an event destination.
*Events* include message sends, deliveries, opens, clicks, bounces, and
complaints. *Event destinations* are places that you can send information about
these events to. For example, you can send event data to Amazon SNS to receive
notifications when you receive bounces or complaints, or you can use Amazon
Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
A single configuration set can include more than one event destination.
"""
def create_configuration_set_event_destination(
%Client{} = client,
configuration_set_name,
input,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/event-destinations"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a contact, which is an end-user who is receiving the email, and adds
them to a contact list.
"""
def create_contact(%Client{} = client, contact_list_name, input, options \\ []) do
url_path = "/v2/email/contact-lists/#{AWS.Util.encode_uri(contact_list_name)}/contacts"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a contact list.
"""
def create_contact_list(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/contact-lists"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a new custom verification email template.
For more information about custom verification email templates, see [Using Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def create_custom_verification_email_template(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/custom-verification-email-templates"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Create a new pool of dedicated IP addresses.
A pool can include one or more dedicated IP addresses that are associated with
your AWS account. You can associate a pool with a configuration set. When you
send an email that uses that configuration set, the message is sent from one of
the addresses in the associated pool.
"""
def create_dedicated_ip_pool(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/dedicated-ip-pools"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Create a new predictive inbox placement test.
Predictive inbox placement tests can help you predict how your messages will be
handled by various email providers around the world. When you perform a
predictive inbox placement test, you provide a sample message that contains the
content that you plan to send to your customers. Amazon SES then sends that
message to special email addresses spread across several major email providers.
After about 24 hours, the test is complete, and you can use the
`GetDeliverabilityTestReport` operation to view the results of the test.
"""
def create_deliverability_test_report(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/deliverability-dashboard/test"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Starts the process of verifying an email identity.
An *identity* is an email address or domain that you use when you send email.
Before you can use an identity to send email, you first have to verify it. By
verifying an identity, you demonstrate that you're the owner of the identity,
and that you've given Amazon SES API v2 permission to send email from the
identity.
When you verify an email address, Amazon SES sends an email to the address. Your
email address is verified as soon as you follow the link in the verification
email.
When you verify a domain without specifying the `DkimSigningAttributes` object,
this operation provides a set of DKIM tokens. You can convert these tokens into
CNAME records, which you then add to the DNS configuration for your domain. Your
domain is verified when Amazon SES detects these records in the DNS
configuration for your domain. This verification method is known as [Easy DKIM](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html).
Alternatively, you can perform the verification process by providing your own
public-private key pair. This verification method is known as Bring Your Own
DKIM (BYODKIM). To use BYODKIM, your call to the `CreateEmailIdentity` operation
has to include the `DkimSigningAttributes` object. When you specify this object,
you provide a selector (a component of the DNS record name that identifies the
public key that you want to use for DKIM authentication) and a private key.
When you verify a domain, this operation provides a set of DKIM tokens, which
you can convert into CNAME tokens. You add these CNAME tokens to the DNS
configuration for your domain. Your domain is verified when Amazon SES detects
these records in the DNS configuration for your domain. For some DNS providers,
it can take 72 hours or more to complete the domain verification process.
Additionally, you can associate an existing configuration set with the email
identity that you're verifying.
"""
def create_email_identity(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/identities"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates the specified sending authorization policy for the given identity (an
email address or a domain).
This API is for the identity owner only. If you have not verified the identity,
this API will return an error.
Sending authorization is a feature that enables an identity owner to authorize
other senders to use its identities. For information about using sending
authorization, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html).
You can execute this operation no more than once per second.
"""
def create_email_identity_policy(
%Client{} = client,
email_identity,
policy_name,
input,
options \\ []
) do
url_path =
"/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}/policies/#{AWS.Util.encode_uri(policy_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an email template.
Email templates enable you to send personalized email to one or more
destinations in a single API operation. For more information, see the [Amazon SES Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html).
You can execute this operation no more than once per second.
"""
def create_email_template(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/templates"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an import job for a data destination.
"""
def create_import_job(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/import-jobs"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Delete an existing configuration set.
*Configuration sets* are groups of rules that you can apply to the emails you
send. You apply a configuration set to an email by including a reference to the
configuration set in the headers of the email. When you apply a configuration
set to an email, all of the rules in that configuration set are applied to the
email.
"""
def delete_configuration_set(%Client{} = client, configuration_set_name, input, options \\ []) do
url_path = "/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Delete an event destination.
*Events* include message sends, deliveries, opens, clicks, bounces, and
complaints. *Event destinations* are places that you can send information about
these events to. For example, you can send event data to Amazon SNS to receive
notifications when you receive bounces or complaints, or you can use Amazon
Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
"""
def delete_configuration_set_event_destination(
%Client{} = client,
configuration_set_name,
event_destination_name,
input,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/event-destinations/#{AWS.Util.encode_uri(event_destination_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes a contact from a contact list.
"""
def delete_contact(%Client{} = client, contact_list_name, email_address, input, options \\ []) do
url_path =
"/v2/email/contact-lists/#{AWS.Util.encode_uri(contact_list_name)}/contacts/#{AWS.Util.encode_uri(email_address)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes a contact list and all of the contacts on that list.
"""
def delete_contact_list(%Client{} = client, contact_list_name, input, options \\ []) do
url_path = "/v2/email/contact-lists/#{AWS.Util.encode_uri(contact_list_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes an existing custom verification email template.
For more information about custom verification email templates, see [Using Custom Verification Email
Templates](https://docs.aws.amazon.com/es/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def delete_custom_verification_email_template(
%Client{} = client,
template_name,
input,
options \\ []
) do
url_path =
"/v2/email/custom-verification-email-templates/#{AWS.Util.encode_uri(template_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Delete a dedicated IP pool.
"""
def delete_dedicated_ip_pool(%Client{} = client, pool_name, input, options \\ []) do
url_path = "/v2/email/dedicated-ip-pools/#{AWS.Util.encode_uri(pool_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes an email identity.
An identity can be either an email address or a domain name.
"""
def delete_email_identity(%Client{} = client, email_identity, input, options \\ []) do
url_path = "/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the specified sending authorization policy for the given identity (an
email address or a domain).
This API returns successfully even if a policy with the specified name does not
exist.
This API is for the identity owner only. If you have not verified the identity,
this API will return an error.
Sending authorization is a feature that enables an identity owner to authorize
other senders to use its identities. For information about using sending
authorization, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html).
You can execute this operation no more than once per second.
"""
def delete_email_identity_policy(
%Client{} = client,
email_identity,
policy_name,
input,
options \\ []
) do
url_path =
"/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}/policies/#{AWS.Util.encode_uri(policy_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes an email template.
You can execute this operation no more than once per second.
"""
def delete_email_template(%Client{} = client, template_name, input, options \\ []) do
url_path = "/v2/email/templates/#{AWS.Util.encode_uri(template_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes an email address from the suppression list for your account.
"""
def delete_suppressed_destination(%Client{} = client, email_address, input, options \\ []) do
url_path = "/v2/email/suppression/addresses/#{AWS.Util.encode_uri(email_address)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Obtain information about the email-sending status and capabilities of your
Amazon SES account in the current AWS Region.
"""
def get_account(%Client{} = client, options \\ []) do
url_path = "/v2/email/account"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieve a list of the blacklists that your dedicated IP addresses appear on.
"""
def get_blacklist_reports(%Client{} = client, blacklist_item_names, options \\ []) do
url_path = "/v2/email/deliverability-dashboard/blacklist-report"
headers = []
query_params = []
query_params =
if !is_nil(blacklist_item_names) do
[{"BlacklistItemNames", blacklist_item_names} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Get information about an existing configuration set, including the dedicated IP
pool that it's associated with, whether or not it's enabled for sending email,
and more.
*Configuration sets* are groups of rules that you can apply to the emails you
send. You apply a configuration set to an email by including a reference to the
configuration set in the headers of the email. When you apply a configuration
set to an email, all of the rules in that configuration set are applied to the
email.
"""
def get_configuration_set(%Client{} = client, configuration_set_name, options \\ []) do
url_path = "/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieve a list of event destinations that are associated with a configuration
set.
*Events* include message sends, deliveries, opens, clicks, bounces, and
complaints. *Event destinations* are places that you can send information about
these events to. For example, you can send event data to Amazon SNS to receive
notifications when you receive bounces or complaints, or you can use Amazon
Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
"""
def get_configuration_set_event_destinations(
%Client{} = client,
configuration_set_name,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/event-destinations"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a contact from a contact list.
"""
def get_contact(%Client{} = client, contact_list_name, email_address, options \\ []) do
url_path =
"/v2/email/contact-lists/#{AWS.Util.encode_uri(contact_list_name)}/contacts/#{AWS.Util.encode_uri(email_address)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns contact list metadata.
It does not return any information about the contacts present in the list.
"""
def get_contact_list(%Client{} = client, contact_list_name, options \\ []) do
url_path = "/v2/email/contact-lists/#{AWS.Util.encode_uri(contact_list_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the custom email verification template for the template name you
specify.
For more information about custom verification email templates, see [Using Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def get_custom_verification_email_template(%Client{} = client, template_name, options \\ []) do
url_path =
"/v2/email/custom-verification-email-templates/#{AWS.Util.encode_uri(template_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Get information about a dedicated IP address, including the name of the
dedicated IP pool that it's associated with, as well information about the
automatic warm-up process for the address.
"""
def get_dedicated_ip(%Client{} = client, ip, options \\ []) do
url_path = "/v2/email/dedicated-ips/#{AWS.Util.encode_uri(ip)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
List the dedicated IP addresses that are associated with your AWS account.
"""
def get_dedicated_ips(
%Client{} = client,
next_token \\ nil,
page_size \\ nil,
pool_name \\ nil,
options \\ []
) do
url_path = "/v2/email/dedicated-ips"
headers = []
query_params = []
query_params =
if !is_nil(pool_name) do
[{"PoolName", pool_name} | query_params]
else
query_params
end
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieve information about the status of the Deliverability dashboard for your
account.
When the Deliverability dashboard is enabled, you gain access to reputation,
deliverability, and other metrics for the domains that you use to send email.
You also gain the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription
charge, in addition to any other fees that you accrue by using Amazon SES and
other AWS services. For more information about the features and cost of a
Deliverability dashboard subscription, see [Amazon SES Pricing](http://aws.amazon.com/ses/pricing/).
"""
def get_deliverability_dashboard_options(%Client{} = client, options \\ []) do
url_path = "/v2/email/deliverability-dashboard"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieve the results of a predictive inbox placement test.
"""
def get_deliverability_test_report(%Client{} = client, report_id, options \\ []) do
url_path = "/v2/email/deliverability-dashboard/test-reports/#{AWS.Util.encode_uri(report_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieve all the deliverability data for a specific campaign.
This data is available for a campaign only if the campaign sent email by using a
domain that the Deliverability dashboard is enabled for.
"""
def get_domain_deliverability_campaign(%Client{} = client, campaign_id, options \\ []) do
url_path = "/v2/email/deliverability-dashboard/campaigns/#{AWS.Util.encode_uri(campaign_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieve inbox placement and engagement rates for the domains that you use to
send email.
"""
def get_domain_statistics_report(
%Client{} = client,
domain,
end_date,
start_date,
options \\ []
) do
url_path =
"/v2/email/deliverability-dashboard/statistics-report/#{AWS.Util.encode_uri(domain)}"
headers = []
query_params = []
query_params =
if !is_nil(start_date) do
[{"StartDate", start_date} | query_params]
else
query_params
end
query_params =
if !is_nil(end_date) do
[{"EndDate", end_date} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Provides information about a specific identity, including the identity's
verification status, sending authorization policies, its DKIM authentication
status, and its custom Mail-From settings.
"""
def get_email_identity(%Client{} = client, email_identity, options \\ []) do
url_path = "/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the requested sending authorization policies for the given identity (an
email address or a domain).
The policies are returned as a map of policy names to policy contents. You can
retrieve a maximum of 20 policies at a time.
This API is for the identity owner only. If you have not verified the identity,
this API will return an error.
Sending authorization is a feature that enables an identity owner to authorize
other senders to use its identities. For information about using sending
authorization, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html).
You can execute this operation no more than once per second.
"""
def get_email_identity_policies(%Client{} = client, email_identity, options \\ []) do
url_path = "/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}/policies"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Displays the template object (which includes the subject line, HTML part and
text part) for the template you specify.
You can execute this operation no more than once per second.
"""
def get_email_template(%Client{} = client, template_name, options \\ []) do
url_path = "/v2/email/templates/#{AWS.Util.encode_uri(template_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Provides information about an import job.
"""
def get_import_job(%Client{} = client, job_id, options \\ []) do
url_path = "/v2/email/import-jobs/#{AWS.Util.encode_uri(job_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves information about a specific email address that's on the suppression
list for your account.
"""
def get_suppressed_destination(%Client{} = client, email_address, options \\ []) do
url_path = "/v2/email/suppression/addresses/#{AWS.Util.encode_uri(email_address)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
List all of the configuration sets associated with your account in the current
region.
*Configuration sets* are groups of rules that you can apply to the emails you
send. You apply a configuration set to an email by including a reference to the
configuration set in the headers of the email. When you apply a configuration
set to an email, all of the rules in that configuration set are applied to the
email.
"""
def list_configuration_sets(
%Client{} = client,
next_token \\ nil,
page_size \\ nil,
options \\ []
) do
url_path = "/v2/email/configuration-sets"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists all of the contact lists available.
"""
def list_contact_lists(%Client{} = client, next_token \\ nil, page_size \\ nil, options \\ []) do
url_path = "/v2/email/contact-lists"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the contacts present in a specific contact list.
"""
def list_contacts(
%Client{} = client,
contact_list_name,
next_token \\ nil,
page_size \\ nil,
options \\ []
) do
url_path = "/v2/email/contact-lists/#{AWS.Util.encode_uri(contact_list_name)}/contacts"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the existing custom verification email templates for your account in the
current AWS Region.
For more information about custom verification email templates, see [Using Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def list_custom_verification_email_templates(
%Client{} = client,
next_token \\ nil,
page_size \\ nil,
options \\ []
) do
url_path = "/v2/email/custom-verification-email-templates"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
List all of the dedicated IP pools that exist in your AWS account in the current
Region.
"""
def list_dedicated_ip_pools(
%Client{} = client,
next_token \\ nil,
page_size \\ nil,
options \\ []
) do
url_path = "/v2/email/dedicated-ip-pools"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Show a list of the predictive inbox placement tests that you've performed,
regardless of their statuses.
For predictive inbox placement tests that are complete, you can use the
`GetDeliverabilityTestReport` operation to view the results.
"""
def list_deliverability_test_reports(
%Client{} = client,
next_token \\ nil,
page_size \\ nil,
options \\ []
) do
url_path = "/v2/email/deliverability-dashboard/test-reports"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieve deliverability data for all the campaigns that used a specific domain
to send email during a specified time range.
This data is available for a domain only if you enabled the Deliverability
dashboard for the domain.
"""
def list_domain_deliverability_campaigns(
%Client{} = client,
subscribed_domain,
end_date,
next_token \\ nil,
page_size \\ nil,
start_date,
options \\ []
) do
url_path =
"/v2/email/deliverability-dashboard/domains/#{AWS.Util.encode_uri(subscribed_domain)}/campaigns"
headers = []
query_params = []
query_params =
if !is_nil(start_date) do
[{"StartDate", start_date} | query_params]
else
query_params
end
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(end_date) do
[{"EndDate", end_date} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of all of the email identities that are associated with your AWS
account.
An identity can be either an email address or a domain. This operation returns
identities that are verified as well as those that aren't. This operation
returns identities that are associated with Amazon SES and Amazon Pinpoint.
"""
def list_email_identities(
%Client{} = client,
next_token \\ nil,
page_size \\ nil,
options \\ []
) do
url_path = "/v2/email/identities"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the email templates present in your Amazon SES account in the current AWS
Region.
You can execute this operation no more than once per second.
"""
def list_email_templates(%Client{} = client, next_token \\ nil, page_size \\ nil, options \\ []) do
url_path = "/v2/email/templates"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists all of the import jobs.
"""
def list_import_jobs(%Client{} = client, next_token \\ nil, page_size \\ nil, options \\ []) do
url_path = "/v2/email/import-jobs"
headers = []
query_params = []
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a list of email addresses that are on the suppression list for your
account.
"""
def list_suppressed_destinations(
%Client{} = client,
end_date \\ nil,
next_token \\ nil,
page_size \\ nil,
reasons \\ nil,
start_date \\ nil,
options \\ []
) do
url_path = "/v2/email/suppression/addresses"
headers = []
query_params = []
query_params =
if !is_nil(start_date) do
[{"StartDate", start_date} | query_params]
else
query_params
end
query_params =
if !is_nil(reasons) do
[{"Reason", reasons} | query_params]
else
query_params
end
query_params =
if !is_nil(page_size) do
[{"PageSize", page_size} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(end_date) do
[{"EndDate", end_date} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieve a list of the tags (keys and values) that are associated with a
specified resource.
A *tag* is a label that you optionally define and associate with a resource.
Each tag consists of a required *tag key* and an optional associated *tag
value*. A tag key is a general label that acts as a category for more specific
tag values. A tag value acts as a descriptor within a tag key.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/v2/email/tags"
headers = []
query_params = []
query_params =
if !is_nil(resource_arn) do
[{"ResourceArn", resource_arn} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Enable or disable the automatic warm-up feature for dedicated IP addresses.
"""
def put_account_dedicated_ip_warmup_attributes(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/account/dedicated-ips/warmup"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Update your Amazon SES account details.
"""
def put_account_details(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/account/details"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Enable or disable the ability of your account to send email.
"""
def put_account_sending_attributes(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/account/sending"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Change the settings for the account-level suppression list.
"""
def put_account_suppression_attributes(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/account/suppression"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Associate a configuration set with a dedicated IP pool.
You can use dedicated IP pools to create groups of dedicated IP addresses for
sending specific types of email.
"""
def put_configuration_set_delivery_options(
%Client{} = client,
configuration_set_name,
input,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/delivery-options"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Enable or disable collection of reputation metrics for emails that you send
using a particular configuration set in a specific AWS Region.
"""
def put_configuration_set_reputation_options(
%Client{} = client,
configuration_set_name,
input,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/reputation-options"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Enable or disable email sending for messages that use a particular configuration
set in a specific AWS Region.
"""
def put_configuration_set_sending_options(
%Client{} = client,
configuration_set_name,
input,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/sending"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Specify the account suppression list preferences for a configuration set.
"""
def put_configuration_set_suppression_options(
%Client{} = client,
configuration_set_name,
input,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/suppression-options"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Specify a custom domain to use for open and click tracking elements in email
that you send.
"""
def put_configuration_set_tracking_options(
%Client{} = client,
configuration_set_name,
input,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/tracking-options"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Move a dedicated IP address to an existing dedicated IP pool.
The dedicated IP address that you specify must already exist, and must be
associated with your AWS account.
The dedicated IP pool you specify must already exist. You can create a new pool
by using the `CreateDedicatedIpPool` operation.
"""
def put_dedicated_ip_in_pool(%Client{} = client, ip, input, options \\ []) do
url_path = "/v2/email/dedicated-ips/#{AWS.Util.encode_uri(ip)}/pool"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
def put_dedicated_ip_warmup_attributes(%Client{} = client, ip, input, options \\ []) do
url_path = "/v2/email/dedicated-ips/#{AWS.Util.encode_uri(ip)}/warmup"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Enable or disable the Deliverability dashboard.
When you enable the Deliverability dashboard, you gain access to reputation,
deliverability, and other metrics for the domains that you use to send email.
You also gain the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription
charge, in addition to any other fees that you accrue by using Amazon SES and
other AWS services. For more information about the features and cost of a
Deliverability dashboard subscription, see [Amazon SES Pricing](http://aws.amazon.com/ses/pricing/).
"""
def put_deliverability_dashboard_option(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/deliverability-dashboard"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Used to associate a configuration set with an email identity.
"""
def put_email_identity_configuration_set_attributes(
%Client{} = client,
email_identity,
input,
options \\ []
) do
url_path = "/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}/configuration-set"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Used to enable or disable DKIM authentication for an email identity.
"""
def put_email_identity_dkim_attributes(%Client{} = client, email_identity, input, options \\ []) do
url_path = "/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}/dkim"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Used to configure or change the DKIM authentication settings for an email domain
identity.
You can use this operation to do any of the following:
* Update the signing attributes for an identity that uses Bring Your
Own DKIM (BYODKIM).
* Change from using no DKIM authentication to using Easy DKIM.
* Change from using no DKIM authentication to using BYODKIM.
* Change from using Easy DKIM to using BYODKIM.
* Change from using BYODKIM to using Easy DKIM.
"""
def put_email_identity_dkim_signing_attributes(
%Client{} = client,
email_identity,
input,
options \\ []
) do
url_path = "/v1/email/identities/#{AWS.Util.encode_uri(email_identity)}/dkim/signing"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Used to enable or disable feedback forwarding for an identity.
This setting determines what happens when an identity is used to send an email
that results in a bounce or complaint event.
If the value is `true`, you receive email notifications when bounce or complaint
events occur. These notifications are sent to the address that you specified in
the `Return-Path` header of the original email.
You're required to have a method of tracking bounces and complaints. If you
haven't set up another mechanism for receiving bounce or complaint notifications
(for example, by setting up an event destination), you receive an email
notification when these events occur (even if this setting is disabled).
"""
def put_email_identity_feedback_attributes(
%Client{} = client,
email_identity,
input,
options \\ []
) do
url_path = "/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}/feedback"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Used to enable or disable the custom Mail-From domain configuration for an email
identity.
"""
def put_email_identity_mail_from_attributes(
%Client{} = client,
email_identity,
input,
options \\ []
) do
url_path = "/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}/mail-from"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Adds an email address to the suppression list for your account.
"""
def put_suppressed_destination(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/suppression/addresses"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Composes an email message to multiple destinations.
"""
def send_bulk_email(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/outbound-bulk-emails"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Adds an email address to the list of identities for your Amazon SES account in
the current AWS Region and attempts to verify it.
As a result of executing this operation, a customized verification email is sent
to the specified address.
To use this operation, you must first create a custom verification email
template. For more information about creating and using custom verification
email templates, see [Using Custom Verification Email Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def send_custom_verification_email(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/outbound-custom-verification-emails"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Sends an email message.
You can use the Amazon SES API v2 to send two types of messages:
* **Simple** – A standard email message. When you create this type
of message, you specify the sender, the recipient, and the message body, and
Amazon SES assembles the message for you.
* **Raw** – A raw, MIME-formatted email message. When you send this
type of email, you have to specify all of the message headers, as well as the
message body. You can use this message type to send messages that contain
attachments. The message that you specify has to be a valid MIME message.
* **Templated** – A message that contains personalization tags. When
you send this type of email, Amazon SES API v2 automatically replaces the tags
with values that you specify.
"""
def send_email(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/outbound-emails"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Add one or more tags (keys and values) to a specified resource.
A *tag* is a label that you optionally define and associate with a resource.
Tags can help you categorize and manage resources in different ways, such as by
purpose, owner, environment, or other criteria. A resource can have as many as
50 tags.
Each tag consists of a required *tag key* and an associated *tag value*, both of
which you define. A tag key is a general label that acts as a category for more
specific tag values. A tag value acts as a descriptor within a tag key.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/tags"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a preview of the MIME content of an email when provided with a template
and a set of replacement data.
You can execute this operation no more than once per second.
"""
def test_render_email_template(%Client{} = client, template_name, input, options \\ []) do
url_path = "/v2/email/templates/#{AWS.Util.encode_uri(template_name)}/render"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Remove one or more tags (keys and values) from a specified resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
url_path = "/v2/email/tags"
headers = []
{query_params, input} =
[
{"ResourceArn", "ResourceArn"},
{"TagKeys", "TagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Update the configuration of an event destination for a configuration set.
*Events* include message sends, deliveries, opens, clicks, bounces, and
complaints. *Event destinations* are places that you can send information about
these events to. For example, you can send event data to Amazon SNS to receive
notifications when you receive bounces or complaints, or you can use Amazon
Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
"""
def update_configuration_set_event_destination(
%Client{} = client,
configuration_set_name,
event_destination_name,
input,
options \\ []
) do
url_path =
"/v2/email/configuration-sets/#{AWS.Util.encode_uri(configuration_set_name)}/event-destinations/#{AWS.Util.encode_uri(event_destination_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates a contact's preferences for a list.
It is not necessary to specify all existing topic preferences in the
TopicPreferences object, just the ones that need updating.
"""
def update_contact(%Client{} = client, contact_list_name, email_address, input, options \\ []) do
url_path =
"/v2/email/contact-lists/#{AWS.Util.encode_uri(contact_list_name)}/contacts/#{AWS.Util.encode_uri(email_address)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates contact list metadata.
This operation does a complete replacement.
"""
def update_contact_list(%Client{} = client, contact_list_name, input, options \\ []) do
url_path = "/v2/email/contact-lists/#{AWS.Util.encode_uri(contact_list_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an existing custom verification email template.
For more information about custom verification email templates, see [Using Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def update_custom_verification_email_template(
%Client{} = client,
template_name,
input,
options \\ []
) do
url_path =
"/v2/email/custom-verification-email-templates/#{AWS.Util.encode_uri(template_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates the specified sending authorization policy for the given identity (an
email address or a domain).
This API returns successfully even if a policy with the specified name does not
exist.
This API is for the identity owner only. If you have not verified the identity,
this API will return an error.
Sending authorization is a feature that enables an identity owner to authorize
other senders to use its identities. For information about using sending
authorization, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html).
You can execute this operation no more than once per second.
"""
def update_email_identity_policy(
%Client{} = client,
email_identity,
policy_name,
input,
options \\ []
) do
url_path =
"/v2/email/identities/#{AWS.Util.encode_uri(email_identity)}/policies/#{AWS.Util.encode_uri(policy_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an email template.
Email templates enable you to send personalized email to one or more
destinations in a single API operation. For more information, see the [Amazon SES Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html).
You can execute this operation no more than once per second.
"""
def update_email_template(%Client{} = client, template_name, input, options \\ []) do
url_path = "/v2/email/templates/#{AWS.Util.encode_uri(template_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end
|
lib/aws/generated/sesv2.ex
| 0.80406 | 0.5169 |
sesv2.ex
|
starcoder
|
defmodule Braintree.HTTP do
@moduledoc """
Base client for all server interaction, used by all endpoint specific
modules.
This request wrapper coordinates the remote server, headers, authorization
and SSL options.
Using `Braintree.HTTP` requires the presence of three config values:
* `merchant_id` - Braintree merchant id
* `private_key` - Braintree private key
* `public_key` - Braintree public key
All three values must be set or a `Braintree.ConfigError` will be raised at
runtime. All those config values support the `{:system, "VAR_NAME"}` as a
value - in which case the value will be read from the system environment with
`System.get_env("VAR_NAME")`.
"""
require Logger
alias Braintree.ErrorResponse, as: Error
alias Braintree.XML.{Decoder, Encoder}
@type response ::
{:ok, map | {:error, atom}}
| {:error, Error.t()}
| {:error, binary}
@production_endpoint "https://api.braintreegateway.com/merchants/"
@sandbox_endpoint "https://api.sandbox.braintreegateway.com/merchants/"
@cacertfile "/certs/api_braintreegateway_com.ca.crt"
@headers [
{"Accept", "application/xml"},
{"User-Agent", "Braintree Elixir/0.1"},
{"Accept-Encoding", "gzip"},
{"X-ApiVersion", "4"},
{"Content-Type", "application/xml"}
]
@statuses %{
400 => :bad_request,
401 => :unauthorized,
403 => :forbidden,
404 => :not_found,
422 => :unprocessable_entity,
426 => :upgrade_required,
429 => :too_many_requests,
500 => :server_error,
503 => :service_unavailable,
504 => :connect_timeout
}
@doc """
Centralized request handling function. All convenience structs use this
function to interact with the Braintree servers. This function can be used
directly to supplement missing functionality.
## Example
defmodule MyApp.Disbursement do
alias Braintree.HTTP
def disburse(params \\ %{}) do
HTTP.request(:get, "disbursements", params)
end
end
"""
@spec request(atom, binary, binary | map, Keyword.t()) :: response
def request(method, path, body \\ %{}, opts \\ []) do
emit_start(method, path)
start_time = System.monotonic_time()
try do
:hackney.request(
method,
build_url(path, opts),
build_headers(opts),
encode_body(body),
build_options()
)
catch
kind, reason ->
duration = System.monotonic_time() - start_time
emit_exception(duration, method, path, %{
kind: kind,
reason: reason,
stacktrace: __STACKTRACE__
})
:erlang.raise(kind, reason, __STACKTRACE__)
else
{:ok, code, _headers, body} when code in 200..399 ->
duration = System.monotonic_time() - start_time
emit_stop(duration, method, path, code)
{:ok, decode_body(body)}
{:ok, 422, _headers, body} ->
duration = System.monotonic_time() - start_time
emit_stop(duration, method, path, 422)
{
:error,
body
|> decode_body()
|> resolve_error_response()
}
{:ok, code, _headers, _body} when code in 400..504 ->
duration = System.monotonic_time() - start_time
emit_stop(duration, method, path, code)
{:error, code_to_reason(code)}
{:error, reason} ->
duration = System.monotonic_time() - start_time
emit_error(duration, method, path, reason)
{:error, reason}
end
end
for method <- ~w(get delete post put)a do
@spec unquote(method)(binary) :: response
@spec unquote(method)(binary, map | list) :: response
@spec unquote(method)(binary, map, list) :: response
def unquote(method)(path) do
request(unquote(method), path, %{}, [])
end
def unquote(method)(path, payload) when is_map(payload) do
request(unquote(method), path, payload, [])
end
def unquote(method)(path, opts) when is_list(opts) do
request(unquote(method), path, %{}, opts)
end
def unquote(method)(path, payload, opts) do
request(unquote(method), path, payload, opts)
end
end
## Helper Functions
@doc false
@spec build_url(binary, Keyword.t()) :: binary
def build_url(path, opts) do
environment = opts |> get_lazy_env(:environment) |> maybe_to_atom()
merchant_id = get_lazy_env(opts, :merchant_id)
Keyword.fetch!(endpoints(), environment) <> merchant_id <> "/" <> path
end
defp maybe_to_atom(value) when is_binary(value), do: String.to_existing_atom(value)
defp maybe_to_atom(value) when is_atom(value), do: value
@doc false
@spec encode_body(binary | map) :: binary
def encode_body(body) when body == "" or body == %{}, do: ""
def encode_body(body), do: Encoder.dump(body)
@doc false
@spec decode_body(binary) :: map
def decode_body(body) do
body
|> :zlib.gunzip()
|> String.trim()
|> Decoder.load()
rescue
ErlangError -> Logger.error("unprocessable response")
end
@doc false
@spec build_headers(Keyword.t()) :: [tuple]
def build_headers(opts) do
auth_header =
case get_lazy_env(opts, :access_token, :none) do
token when is_binary(token) ->
"Bearer " <> token
_ ->
username = get_lazy_env(opts, :public_key)
password = get_lazy_env(opts, :private_key)
"Basic " <> :base64.encode("#{username}:#{password}")
end
[{"Authorization", auth_header} | @headers]
end
defp get_lazy_env(opts, key, default \\ nil) do
Keyword.get_lazy(opts, key, fn -> Braintree.get_env(key, default) end)
end
@doc false
@spec build_options() :: [...]
def build_options do
cacertfile = Path.join(:code.priv_dir(:braintree), @cacertfile)
http_opts = Braintree.get_env(:http_options, [])
[:with_body, ssl_options: [cacertfile: cacertfile]] ++ http_opts
end
@doc false
@spec code_to_reason(integer) :: atom
def code_to_reason(integer)
for {code, status} <- @statuses do
def code_to_reason(unquote(code)), do: unquote(status)
end
defp resolve_error_response(%{"api_error_response" => api_error_response}) do
Error.new(api_error_response)
end
defp resolve_error_response(%{"unprocessable_entity" => _}) do
Error.new(%{message: "Unprocessable Entity"})
end
defp endpoints do
[production: @production_endpoint, sandbox: sandbox_endpoint()]
end
defp sandbox_endpoint do
Application.get_env(
:braintree,
:sandbox_endpoint,
@sandbox_endpoint
)
end
defp emit_start(method, path) do
:telemetry.execute(
[:braintree, :request, :start],
%{system_time: System.system_time()},
%{method: method, path: path}
)
end
defp emit_exception(duration, method, path, error_data) do
:telemetry.execute(
[:braintree, :request, :exception],
%{duration: duration},
%{method: method, path: path, error: error_data}
)
end
defp emit_error(duration, method, path, error_reason) do
:telemetry.execute(
[:braintree, :request, :error],
%{duration: duration},
%{method: method, path: path, error: error_reason}
)
end
defp emit_stop(duration, method, path, code) do
:telemetry.execute(
[:braintree, :request, :stop],
%{duration: duration},
%{method: method, path: path, http_status: code}
)
end
end
|
lib/http.ex
| 0.865722 | 0.429788 |
http.ex
|
starcoder
|
defmodule Braintree do
@moduledoc """
A native Braintree client library for Elixir. Only a subset of the API is
supported and this is a work in progress. That said, it is already uned in
production, and any modules that have been implemented can be used.
For general reference please see:
https://developers.braintreepayments.com/reference/overview
"""
defmodule ConfigError do
@moduledoc """
Raised at runtime when a config variable is missing.
"""
defexception [:message]
@doc """
Build a new ConfigError exception.
"""
@impl true
def exception(value) do
message = "missing config for :#{value}"
%ConfigError{message: message}
end
end
@doc """
Convenience function for retrieving braintree specfic environment values, but
will raise an exception if values are missing.
## Example
iex> Braintree.get_env(:random_value)
** (Braintree.ConfigError) missing config for :random_value
iex> Braintree.get_env(:random_value, "random")
"random"
iex> Application.put_env(:braintree, :random_value, "not-random")
...> value = Braintree.get_env(:random_value)
...> Application.delete_env(:braintree, :random_value)
...> value
"not-random"
iex> System.put_env("RANDOM", "not-random")
...> Application.put_env(:braintree, :system_value, {:system, "RANDOM"})
...> value = Braintree.get_env(:system_value)
...> System.delete_env("RANDOM")
...> value
"not-random"
"""
@spec get_env(atom, any) :: any
def get_env(key, default \\ nil) do
case Application.fetch_env(:braintree, key) do
{:ok, {:system, var}} when is_binary(var) ->
fallback_or_raise(var, System.get_env(var), default)
{:ok, value} ->
value
:error ->
fallback_or_raise(key, nil, default)
end
end
@doc """
Convenience function for setting `braintree` application environment
variables.
## Example
iex> Braintree.put_env(:thingy, "thing")
...> Braintree.get_env(:thingy)
"thing"
"""
@spec put_env(atom, any) :: :ok
def put_env(key, value) do
Application.put_env(:braintree, key, value)
end
defp fallback_or_raise(key, nil, nil), do: raise(ConfigError, key)
defp fallback_or_raise(_, nil, default), do: default
defp fallback_or_raise(_, value, _), do: value
end
|
lib/braintree.ex
| 0.745028 | 0.47457 |
braintree.ex
|
starcoder
|
defmodule JOSEUtils.JWS do
@moduledoc """
Convenience functions to work with signed JWTs
"""
alias JOSEUtils.{JWA, JWK}
@typedoc """
Serialized JWS signed token
For instance:
"<KEY>"
"""
@type serialized :: String.t()
defmodule MalformedError do
defexception message: "malformed JWS"
end
@doc """
Returns the unverified header
It ensures the `"alg"` parameter is set.
## Example
iex> JOSEUtils.JWS.peek_header("<KEY>")
{:ok, %{"alg" => "HS256", "typ" => "JWT"}}
iex> JOSEUtils.JWS.peek_header("probably invalid...?")
{:error, %JOSEUtils.JWS.MalformedError{message: "malformed JWS"}}
"""
@spec peek_header(serialized()) ::
{:ok, %{optional(String.t()) => any()}} | {:error, Exception.t()}
def peek_header(<<_::binary>> = jws) do
protected_str = JOSE.JWS.peek_protected(jws)
{:ok, %{"alg" => _}} = Jason.decode(protected_str)
rescue
_ ->
{:error, %MalformedError{}}
end
@doc """
Returns `:mac` if the JWS uses a MAC signature algoithm, `:public_key_crypto` otherwise
## Example
iex> JOSE.JWS.sign(JOSE.JWK.generate_key({:ec, "P-256"}), "toto", %{"alg" => "ES256"})
...> |> JOSE.JWS.compact()
...> |> elem(1)
...> |> JOSEUtils.JWS.sig_alg_type()
:public_key_crypto
iex> JOSE.JWS.sign(JOSE.JWK.generate_key({:oct, 32}), "toto", %{"alg" => "HS256"})
...> |> JOSE.JWS.compact()
...> |> elem(1)
...> |> JOSEUtils.JWS.sig_alg_type()
:mac
"""
@spec sig_alg_type(serialized()) :: :public_key_crypto | :mac
def sig_alg_type(<<_::binary>> = jws) do
jws
|> JOSE.JWS.peek_protected()
|> Jason.decode!()
|> case do
%{"alg" => alg} when alg in ["HS256", "HS384", "HS512"] ->
:mac
_ ->
:public_key_crypto
end
end
@doc """
Signs a payload with a JWK and a given signing algorithm
The payload can be a string, in which case it is signed directly, or any other data type
which will first be converted into text using JSON serialization.
If the JWK has a key id ("kid" member), it is automatically added to the resulting JWS.
When using the `"none"` algorithm, anything can be passed as the JWK (as it is not used).
## Example
iex> jwk = %{"k" => "FWTNVgrQyQyZmduoAVyOfI1myMs", "kty" => "oct"}
iex> JOSEUtils.JWS.sign("some text", jwk, "HS256")
{:ok, "<KEY>"}
iex> JOSE.unsecured_signing(true)
iex> JOSEUtils.JWS.sign!("test payload", %{}, "none", %{"some" => "header"})
"<KEY>."
"""
@spec sign(
payload :: any(),
JWK.t() | any(),
JWA.sig_alg(),
additional_headers :: %{optional(String.t()) => any()}
) :: {:ok, serialized()} | {:error, Exception.t()}
def sign(payload, jwk, sig_alg, additional_headers \\ %{}) do
{:ok, sign!(payload, jwk, sig_alg, additional_headers)}
rescue
e ->
{:error, e}
end
@doc """
See `sign/4`
"""
@spec sign!(
payload :: any(),
JWK.t(),
JWA.sig_alg(),
header :: %{optional(String.t()) => any()}
) :: serialized()
def sign!(payload, jwk, sig_alg, additional_headers \\ %{})
def sign!(payload, %{"kid" => kid} = jwk, sig_alg, additional_headers),
do: do_sign!(payload, jwk, sig_alg, Map.put(additional_headers, "kid", kid))
def sign!(payload, jwk, sig_alg, additional_headers),
do: do_sign!(payload, jwk, sig_alg, additional_headers)
defp do_sign!(<<_::binary>> = payload, %JOSE.JWK{} = jose_jwk, sig_alg, additional_headers) do
JOSE.JWS.sign(jose_jwk, payload, Map.merge(additional_headers, %{"alg" => sig_alg}))
|> JOSE.JWS.compact()
|> elem(1)
end
defp do_sign!(payload, _jwk, "none", additional_headers) do
do_sign!(payload, %JOSE.JWK{}, "none", additional_headers)
end
defp do_sign!(<<_::binary>> = payload, jwk, sig_alg, additional_headers) do
do_sign!(payload, JOSE.JWK.from_map(jwk), sig_alg, additional_headers)
end
defp do_sign!(payload, jwk, sig_alg, additional_headers) do
payload
|> Jason.encode!()
|> sign!(jwk, sig_alg, additional_headers)
end
@doc """
Verifies the signature of a JWS, and returns its content and the signature key
The function also filters the key using `JOSEUtils.JWKS.verification_keys/2` with the
whitelisted signature algorithms. If the JWS has an identifier (`"kid"`), it only uses
that specific key.
## Example
iex> JOSE.crypto_fallback(true)
iex> jwk_ed25519 = JOSE.JWK.generate_key({:okp, :Ed25519})
iex> jwk_ed25519_map = jwk_ed25519 |> JOSE.JWK.to_map() |> elem(1)
iex> signed_ed25519 = JOSE.JWS.sign(jwk_ed25519, "{}", %{ "alg" => "EdDSA" }) |> JOSE.JWS.compact |> elem(1)
iex> JOSEUtils.JWS.verify(signed_ed25519, jwk_ed25519_map, ["RS256"])
:error
iex> JOSEUtils.JWS.verify(signed_ed25519, jwk_ed25519_map, ["EdDSA"]) |> elem(0)
:ok
"""
@spec verify(
jws :: serialized(),
jwk_or_jwks :: JOSEUtils.JWK.t() | [JOSEUtils.JWK.t()],
allowed_algs :: [JOSEUtils.JWA.sig_alg()]
) :: {:ok, {verified_content :: binary(), JOSEUtils.JWK.t()}} | :error
def verify(jws, %{} = jwk, allowed_algs) do
verify(jws, [jwk], allowed_algs)
end
def verify(jws, jwks, allowed_algs) do
with {:ok, header} <- peek_header(jws),
true <- header["alg"] in allowed_algs do
jwks =
case header do
%{"alg" => _, "kid" => jws_kid} ->
Enum.filter(jwks, fn jwk -> jwk["kid"] == jws_kid end)
_ ->
jwks
end
|> JOSEUtils.JWKS.verification_keys(header["alg"])
do_verify(jws, header, jwks)
else
_ ->
:error
end
end
@spec do_verify(
jws :: serialized(),
map(),
jwk_or_jwks :: JOSEUtils.JWK.t() | [JOSEUtils.JWK.t()]
) :: {:ok, {binary(), JOSEUtils.JWK.t()}} | :error
defp do_verify(jws, header, %{} = jwk) do
case JOSE.JWS.verify_strict(JOSE.JWK.from_map(jwk), [header["alg"]], jws) do
{true, verified_content, _} ->
{:ok, {verified_content, jwk}}
_ ->
:error
end
end
defp do_verify(jws, header, jwks) when is_list(jwks) do
Enum.find_value(
jwks,
:error,
fn jwk ->
case do_verify(jws, header, jwk) do
{:ok, _} = result ->
result
:error ->
false
end
end
)
end
end
|
lib/jose_utils/jws.ex
| 0.894427 | 0.41745 |
jws.ex
|
starcoder
|
defmodule EctoTablestore.Repo do
@moduledoc ~S"""
Defines a repository for Tablestore.
A repository maps to an underlying data store, controlled by `Ecto.Adapters.Tablestore` adapter.
When used, the repository expects the `:otp_app` option, and uses `Ecto.Adapters.Tablestore` by
default. The `:otp_app` should point to an OTP application that has repository configuration.
For example, the repository:
```elixir
defmodule EctoTablestore.MyRepo do
use EctoTablestore.Repo,
otp_app: :my_otp_app
end
```
Configure `ex_aliyun_ots` as usual:
```elixir
config :ex_aliyun_ots, MyInstance,
name: "MY_INSTANCE_NAME",
endpoint: "MY_INSTANCE_ENDPOINT",
access_key_id: "MY_OTS_ACCESS_KEY",
access_key_secret: "MY_OTS_ACCESS_KEY_SECRET"
config :ex_aliyun_ots,
instances: [MyInstance]
```
Add the following configuration to associate `MyRepo` with the previous configuration of
`ex_aliyun_ots`:
```elixir
config :my_otp_app, EctoTablestore.MyRepo,
instance: MyInstance
```
"""
@type search_result :: %{
is_all_succeeded: boolean(),
next_token: binary() | nil,
schemas: list(),
total_hits: integer()
}
@type schema :: Ecto.Schema.t()
@type schema_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t()
@type options :: Keyword.t()
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
use Ecto.Repo,
otp_app: Keyword.get(opts, :otp_app),
adapter: Ecto.Adapters.Tablestore
end
end
@doc """
Returns the adapter tied to the repository.
"""
@callback __adapter__ :: Ecto.Adapters.Tablestore.t()
@doc """
Provide search index features for the following scenarios:
* MatchAllQuery
* MatchQuery
* MatchPhraseQuery
* TermQuery
* TermsQuery
* PrefixQuery
* RangeQuery
* WildcardQuery
* BoolQuery
* NestedQuery
* ExistsQuery
* GeoBoundingBoxQuery
* GeoDistanceQuery
* GeoPolygonQuery
Please refer [ExAliyunOts.Search](https://hexdocs.pm/ex_aliyun_ots/ExAliyunOts.Search.html#query) query section
for details, they are similar options for use, for example:
```
MyRepo.search(MySchema, "index_name",
search_query: [
query: exists_query("comment")
]
)
```
"""
@callback search(schema, index_name :: String.t(), options) ::
{:ok, search_result} | {:error, term()}
@doc """
As a wrapper built on `ExAliyunOts.stream_search/4` to create composable and lazy enumerables
stream for iteration.
## Options
Please see options of `c:search/3` for details.
"""
@callback stream_search(schema, index_name :: String.t(), options) :: Enumerable.t()
@doc """
Similar to `c:get/3`, please ensure schema entity has been filled with the whole primary key(s).
## Options
* `:entity_full_match`, whether to transfer the input attribute column(s) into the `:==`
filtering expressions, by default it is `false`, when set `entity_full_match: true`, please
notice the following rules:
* If there exists attribute column(s) provided in entity, these fields will be combined
within multiple `:==` filtering expressions;
* If there exists attribute column(s) provided and meanwhile set `filter` option, they will
be merged into a composite filter.
Other options please refer `c:get/3`.
"""
@callback one(schema, options) :: schema | {:error, term()} | nil
@doc """
Fetch a single struct from tablestore where the whole primary key(s) match the given ids.
## Options
* `:columns_to_get`, string list, return the specified attribute columns, if not specify this
option field, will try to return all attribute columns together.
* `:start_column`, string, used as a starting column for Wide Column read, the return result
contains this as starter.
* `:end_column`, string, used as a ending column for Wide Column read, the return result DON NOT
contain this column.
* `:filter`, used as a filter by condition, support `">"`, `"<"`, `">="`, `"<="`, `"=="`,
`"and"`, `"or"` and `"()"` expressions.
The `ignore_if_missing` option can be used for the non-existed attribute column, for
example:
An attribute column does not exist meanwhile set it as `true`, will ignore this match
condition in the return result;
An existed attribute column DOES NOT suit for this use case, the match condition will always
affect the return result, if match condition does not satisfy, they won't be return in
result.
```elixir
filter: filter(({"name", ignore_if_missing: true} == var_name and "age" > 1) or ("class" == "1"))
```
* `:transaction_id`, read under local transaction in a partition key.
"""
@callback get(schema, ids :: list, options) :: schema | {:error, term()} | nil
@doc """
Get multiple structs by range from one table, rely on the conjunction of the partition key and
other primary key(s).
## Options
* `:direction`, by default it is `:forward`, set it as `:forward` to make the order of the query
result in ascending by primary key(s), set it as `:backward` to make the order of the query
result in descending by primary key(s).
* `:columns_to_get`, string list, return the specified attribute columns, if not specify this
field all attribute columns will be return.
* `:start_column`, string, used as a starting column for Wide Column read, the return result
contains this as starter.
* `:end_column`, string, used as a ending column for Wide Column read, the return result DON NOT
contain this column.
* `:limit`, optional, the maximum number of rows of data to be returned, this value must be
greater than 0, whether this option is set or not, there returns a maximum of 5,000 data rows
and the total data size never exceeds 4 MB.
* `:transaction_id`, read under local transaction in a partition key.
* `:filter`, used as a filter by condition, support `">"`, `"<"`, `">="`, `"<="`, `"=="`,
`"and"`, `"or"` and `"()"` expressions.
The `ignore_if_missing` option can be used for the non-existed attribute column, for
example:
An attribute column does not exist meanwhile set it as `true`, will ignore this match
condition in the return result;
An existed attribute column DOES NOT suit for this use case, the match condition will always
affect the return result, if match condition does not satisfy, they won't be return in
result.
```elixir
filter: filter(({"name", ignore_if_missing: true} == var_name and "age" > 1) or ("class" == "1"))
```
"""
@callback get_range(
schema,
start_primary_keys :: list | binary(),
end_primary_keys :: list,
options
) :: {nil, nil} | {list, nil} | {list, binary()} | {:error, term()}
@doc """
As a wrapper built on `ExAliyunOts.stream_range/5` to create composable and lazy enumerables
stream for iteration.
## Options
Please see options of `c:get_range/4` for details.
"""
@callback stream_range(schema, start_primary_keys :: list, end_primary_keys :: list, options) ::
Enumerable.t()
@doc """
Batch get several rows of data from one or more tables, this batch request put multiple
`get_row` in one request from client's perspective.
After execute each operation in servers, return results independently and independently consumes
capacity units.
When input `schema_entity`, only theirs primary keys are used in query, if need to use theirs
attribute columns into condition of query, please use `entity_full_match: true` option to do
that.
## Example
batch_get([
{Schema1, [[ids: ids1], [ids: ids2]]},
[%Schema2{keys: keys1}, %Schema2{keys: keys2}]
])
batch_get([
{Schema1, [[ids: ids1], [ids: ids2]]},
{
[
%Schema2{keys: keys1},
%Schema2{keys: keys2}
],
entity_full_match: true
}
])
batch_get([
{
[
%Schema2{keys: keys1},
%Schema2{keys: keys2}
],
filter: filter("attr_field" == 1),
columns_to_get: ["attr_field", "attr_field2"]
}
])
"""
@callback batch_get(gets) :: {:ok, Keyword.t()} | {:error, term()}
when gets: [
{
module :: Ecto.Schema.t(),
[{key :: String.t() | atom(), value :: integer | String.t()}],
options
}
| {
module :: Ecto.Schema.t(),
[{key :: String.t() | atom(), value :: integer | String.t()}]
}
| (schema_entity :: Ecto.Schema.t())
| {[schema_entity :: Ecto.Schema.t()], options}
]
@doc """
Batch write several rows of data from one or more tables, this batch request put multiple
put_row/delete_row/update_row in one request from client's perspective.
After execute each operation in servers, return results independently and independently consumes
capacity units.
If use a batch write request include a transaction ID, all rows in that request can only be
written to the table that matches the transaction ID.
## Options
* `transaction_id`, use local transaction.
## Example
The options of each `:put`, `:delete`, and `:update` operation are similar as
`ExAliyunOts.put_row/5`, `ExAliyunOts.delete_row/4` and `ExAliyunOts.update_row/4`, but
`transaction_id` option is using in the options of `c:EctoTablestore.Repo.batch_write/2`.
batch_write([
delete: [
schema_entity_1,
schema_entity_2
],
put: [
{%Schema2{}, condition: condition(:ignore)},
{%Schema1{}, condition: condition(:expect_not_exist)},
{changeset_schema_1, condition: condition(:ignore)}
],
update: [
{changeset_schema_1, return_type: :pk},
{changeset_schema_2}
]
])
"""
@callback batch_write(writes, options) :: {:ok, Keyword.t()} | {:error, term()}
when writes: [
{
operation :: :put,
items :: [
item ::
{schema_entity :: Ecto.Schema.t(), options}
| {module :: Ecto.Schema.t(), ids :: list(), attrs :: list(), options}
| {changeset :: Ecto.Changeset.t(), operation :: Keyword.t()}
]
}
| {
operation :: :update,
items :: [
changeset ::
Ecto.Changeset.t()
| {changeset :: Ecto.Changeset.t(), options}
]
}
| {
operation :: :delete,
items :: [
schema_entity ::
Ecto.Schema.t()
| {schema_entity :: Ecto.Schema.t(), options}
| {module :: Ecto.Schema.t(), ids :: list(), options}
]
}
]
@doc """
Inserts a struct defined via EctoTablestore.Schema or a changeset.
## Options
* `:condition`, this option is required, whether to add conditional judgment before date insert.
Two kinds of insert condition types as below:
As `condition(:ignore)` means DO NOT do any condition validation before insert, if the schema
non-partitioned primary key is auto increment, we can only use `condition(:ignore)` option.
As `condition(:expect_not_exist)` means the primary key(s) are NOT existed before insert. *
`:transaction_id`, insert under local transaction in a partition key.
"""
@callback insert(schema_or_changeset, options) :: {:ok, schema} | {:error, term()}
@doc """
Delete a struct using its primary key.
## Options
* `:condition`, this option is required, whether to add conditional judgment before data
delete.
Two kinds of update condition types as below:
As `condition(:expect_exist)` means the primary key(s) can match a row to delete, we also can add
some compare expressions for the attribute columns, e.g.
1. condition(:expect_exist, "attr1" == value1 and "attr2" > 1)
2. condition(:expect_exist, "attr1" != value1)
3. condition(:expect_exist, "attr1" > 100 or "attr2" < 1000)
As `condition(:ignore)` means DO NOT do any condition validation before delete.
* `:transaction_id`, delete under local transaction in a partition key.
* `:stale_error_field` - The field where stale errors will be added in the returning
changeset. This option can be used to avoid raising `Ecto.StaleEntryError`.
* `:stale_error_message` - The message to add to the configured `:stale_error_field` when
stale errors happen, defaults to "is stale".
"""
@callback delete(schema_or_changeset, options) :: {:ok, schema} | {:error, term()}
@doc """
Updates a changeset using its primary key.
## Options
* `:condition`, this option is required, whether to add conditional judgment before data
update.
Two kinds of update condition types as below:
As `condition(:expect_exist)` means the primary key(s) can match a row to update, we also can add
some compare expressions for the attribute columns, e.g.
1. condition(:expect_exist, "attr1" == value1 and "attr2" > 1)
2. condition(:expect_exist, "attr1" != value1)
3. condition(:expect_exist, "attr1" > 100 or "attr2" < 1000)
As `condition(:ignore)` means DO NOT do any condition validation before update.
* `:transaction_id`, update under local transaction in a partition key.
* `:stale_error_field` - The field where stale errors will be added in the returning
changeset. This option can be used to avoid raising `Ecto.StaleEntryError`.
* `:stale_error_message` - The message to add to the configured `:stale_error_field` when
stale errors happen, defaults to "is stale".
* `:returning`, this option is required when the input changeset with `:increment` operation, all fields of the atomic
increment operation are required to explicitly set into this option in any order, if missed any atomic increment
operation related field, there will raise an `Ecto.ConstraintError` to prompt and terminate this update.
If there is no `:increment` operation, the `:returning` option is no need to set. If set `returning: true`, but not
really all fields are changed, the unchanged fields will be replaced as `nil` in the returned schema data.
"""
@callback update(changeset :: Ecto.Changeset.t(), options) :: {:ok, schema} | {:error, term()}
@doc """
Please see `c:Ecto.Repo.start_link/1` for details.
"""
@callback start_link(options) :: {:ok, pid} | {:error, {:already_started, pid}} | {:error, term}
end
|
lib/ecto_tablestore/repo.ex
| 0.902871 | 0.841696 |
repo.ex
|
starcoder
|
defmodule LocalLedger.CachedBalance do
@moduledoc """
This module is an interface to the abstract balances stored in DB. It is responsible for caching
balances and serves as an interface to retrieve the current balances (which will either be
loaded from a cached balance or computed - or both).
"""
alias LocalLedgerDB.{CachedBalance, Entry, Wallet}
@doc """
Cache all the wallets balances using a batch stream mechanism for retrieval (1000 at a time). This
is meant to be used in some kind of schedulers, but can also be ran manually.
"""
@spec cache_all() :: :ok
def cache_all do
Wallet.stream_all(fn wallet ->
{:ok, calculate_with_strategy(wallet)}
end)
end
@spec all(%Wallet{} | [%Wallet{}]) :: {:ok, map()}
def all(wallet, attrs \\ %{})
@doc """
Get all the balances for the given wallet or wallets.
"""
def all(wallet_or_wallets, attrs) do
{:ok, get_balances(wallet_or_wallets, attrs)}
end
@doc """
Get the balance for the specified token (token_id) and
the given wallet.
"""
@spec get(%Wallet{} | [%Wallet{}], String.t()) :: {:ok, map()}
def get(wallet_or_wallets, token_id) do
balances =
wallet_or_wallets
|> get_balances()
|> Enum.into(%{}, fn {address, amounts} ->
{address, %{token_id => amounts[token_id] || 0}}
end)
{:ok, balances}
end
defp get_balances(wallet_or_wallets, attrs \\ %{})
defp get_balances(wallets, attrs) when is_list(wallets) do
wallets
|> Enum.map(fn wallet -> wallet.address end)
|> CachedBalance.all()
|> calculate_all_amounts(wallets, attrs)
end
defp get_balances(wallet, attrs), do: get_balances([wallet], attrs)
defp calculate_all_amounts(computed_balances, wallets, attrs) do
computed_balances =
Enum.into(computed_balances, %{}, fn balance ->
{balance.wallet_address, balance}
end)
Enum.into(wallets, %{}, fn wallet ->
attrs = Map.put(attrs, "computed_balance", computed_balances[wallet.address])
{wallet.address, calculate_amounts(wallet, attrs)}
end)
end
defp calculate_amounts(wallet, %{"computed_balance" => nil, "tokens" => tokens}),
do: calculate_from_beginning_and_insert(wallet, tokens)
defp calculate_amounts(wallet, %{"computed_balance" => nil}),
do: calculate_from_beginning_and_insert(wallet)
defp calculate_amounts(wallet, %{"computed_balance" => computed_balance, "tokens" => tokens}) do
tokens_id = Enum.map(tokens, fn token -> token.id end)
wallet.address
|> Entry.calculate_all_balances(%{
since: computed_balance.computed_at,
token_id: tokens_id
})
|> add_amounts(computed_balance.amounts)
end
defp calculate_amounts(wallet, %{"computed_balance" => computed_balance}) do
wallet.address
|> Entry.calculate_all_balances(%{
since: computed_balance.computed_at
})
|> add_amounts(computed_balance.amounts)
end
defp add_amounts(amounts_1, amounts_2) do
(Map.keys(amounts_1) ++ Map.keys(amounts_2))
|> Enum.into(
%{},
fn token_id ->
{token_id, (amounts_1[token_id] || 0) + (amounts_2[token_id] || 0)}
end
)
end
defp calculate_with_strategy(wallet) do
:local_ledger
|> Application.get_env(:balance_caching_strategy)
|> calculate_with_strategy(wallet)
end
defp calculate_with_strategy("since_last_cached", wallet) do
case CachedBalance.get(wallet.address) do
nil ->
calculate_from_beginning_and_insert(wallet)
computed_balance ->
:local_ledger
|> Application.get_env(:balance_caching_reset_frequency)
|> calculate_with_reset_frequency(wallet, computed_balance)
end
end
defp calculate_with_strategy("since_beginning", wallet) do
calculate_from_beginning_and_insert(wallet)
end
defp calculate_with_strategy(_, wallet) do
calculate_with_strategy("since_beginning", wallet)
end
defp calculate_with_reset_frequency(frequency, wallet, %{cached_count: cached_count})
when is_number(frequency) and frequency > 0 and cached_count >= frequency - 1 do
calculate_from_beginning_and_insert(wallet)
end
defp calculate_with_reset_frequency(_, wallet, computed_balance) do
calculate_from_cached_and_insert(wallet, computed_balance)
end
defp calculate_from_beginning_and_insert(wallet) do
computed_at = NaiveDateTime.utc_now()
wallet.address
|> Entry.calculate_all_balances(%{upto: computed_at})
|> insert(wallet, computed_at, 1)
end
defp calculate_from_beginning_and_insert(wallet, tokens) do
computed_at = NaiveDateTime.utc_now()
token_ids = Enum.map(tokens, fn token -> token.id end)
wallet.address
|> Entry.calculate_all_balances(%{upto: computed_at, token_id: token_ids})
|> insert(wallet, computed_at, 1)
end
defp calculate_from_cached_and_insert(wallet, computed_balance) do
computed_at = NaiveDateTime.utc_now()
wallet.address
|> Entry.calculate_all_balances(%{
since: computed_balance.computed_at,
upto: computed_at
})
|> add_amounts(computed_balance.amounts)
|> insert(wallet, computed_at, computed_balance.cached_count + 1)
end
defp insert(amounts, wallet, computed_at, cached_count) do
_ =
if Enum.any?(amounts, fn {_token, amount} -> amount > 0 end) do
{:ok, _} =
CachedBalance.insert(%{
amounts: amounts,
wallet_address: wallet.address,
cached_count: cached_count,
computed_at: computed_at
})
end
amounts
end
end
|
apps/local_ledger/lib/local_ledger/cached_balance.ex
| 0.835047 | 0.481515 |
cached_balance.ex
|
starcoder
|
defmodule ESpec.Context do
@moduledoc """
Defines macros 'context', 'describe', and 'example_group'.
Defines macros for 'skip' and 'focus' example groups
"""
@aliases ~w(describe example_group)a
@skipped ~w(xcontext xdescribe xexample_group)a
@focused ~w(fcontext fdescribe fexample_group)a
@doc """
Context has description, line, and options.
Available options are:
- [skip: true] or [skip: "Reason"] - skips examples in the context;
- [focus: true] - sets focus to run with `--focus ` option.
"""
defstruct description: "", module: nil, line: nil, opts: []
@doc "Add context with description and opts to 'example context'."
defmacro context(description, opts, do: block) do
quote do
tail = @context
head = %ESpec.Context{
description: unquote(description),
module: __MODULE__,
line: __ENV__.line,
opts: unquote(opts)
}
@context [head | tail]
unquote(block)
@context tail
end
end
defmacro context(opts, do: block) when is_list(opts) do
quote do: context("", unquote(opts), do: unquote(block))
end
defmacro context(description, do: block) do
quote do
context(unquote(description), [], do: unquote(block))
end
end
defmacro context(do: block) do
quote do: context("", [], do: unquote(block))
end
defmacro context(_description) do
quote do: context("", [], do: true)
end
@doc "Aliases for `context`."
Enum.each(@aliases, fn func ->
defmacro unquote(func)(description, opts, do: block) do
quote do: context(unquote(description), unquote(opts), do: unquote(block))
end
defmacro unquote(func)(description_or_opts, do: block) do
quote do: context(unquote(description_or_opts), do: unquote(block))
end
defmacro unquote(func)(do: block) do
quote do: context(do: unquote(block))
end
defmacro unquote(func)(_description) do
quote do: context(_description)
end
end)
@doc "Macros for skipped contexts"
Enum.each(@skipped, fn func ->
defmacro unquote(func)(description, opts, do: block) do
reason = "`#{unquote(func)}`"
quote do:
context(
unquote(description),
Keyword.put(unquote(opts), :skip, unquote(reason)),
do: unquote(block)
)
end
defmacro unquote(func)(opts, do: block) when is_list(opts) do
reason = "`#{unquote(func)}`"
quote do: context(Keyword.put(unquote(opts), :skip, unquote(reason)), do: unquote(block))
end
defmacro unquote(func)(description, do: block) do
reason = "`#{unquote(func)}`"
quote do: context(unquote(description), [skip: unquote(reason)], do: unquote(block))
end
defmacro unquote(func)(do: block) do
reason = "`#{unquote(func)}`"
quote do: context([skip: unquote(reason)], do: unquote(block))
end
end)
@doc "Macros for focused contexts"
Enum.each(@focused, fn func ->
defmacro unquote(func)(description, opts, do: block) do
quote do:
context(
unquote(description),
Keyword.put(unquote(opts), :focus, true),
do: unquote(block)
)
end
defmacro unquote(func)(opts, do: block) when is_list(opts) do
quote do: context(Keyword.put(unquote(opts), :focus, true), do: unquote(block))
end
defmacro unquote(func)(description, do: block) do
quote do: context(unquote(description), [focus: true], do: unquote(block))
end
defmacro unquote(func)(do: block) do
quote do: context([focus: true], do: unquote(block))
end
end)
end
|
lib/espec/context.ex
| 0.696991 | 0.513485 |
context.ex
|
starcoder
|
defmodule AWS.Personalize do
@moduledoc """
Amazon Personalize is a machine learning service that makes it easy to add
individualized recommendations to customers.
"""
@doc """
Creates a batch inference job. The operation can handle up to 50 million
records and the input file must be in JSON format. For more information,
see `recommendations-batch`.
"""
def create_batch_inference_job(client, input, options \\ []) do
request(client, "CreateBatchInferenceJob", input, options)
end
@doc """
Creates a campaign by deploying a solution version. When a client calls the
[GetRecommendations](https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html)
and
[GetPersonalizedRanking](https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetPersonalizedRanking.html)
APIs, a campaign is specified in the request.
**Minimum Provisioned TPS and Auto-Scaling**
A transaction is a single `GetRecommendations` or `GetPersonalizedRanking`
call. Transactions per second (TPS) is the throughput and unit of billing
for Amazon Personalize. The minimum provisioned TPS (`minProvisionedTPS`)
specifies the baseline throughput provisioned by Amazon Personalize, and
thus, the minimum billing charge. If your TPS increases beyond
`minProvisionedTPS`, Amazon Personalize auto-scales the provisioned
capacity up and down, but never below `minProvisionedTPS`, to maintain a
70% utilization. There's a short time delay while the capacity is increased
that might cause loss of transactions. It's recommended to start with a low
`minProvisionedTPS`, track your usage using Amazon CloudWatch metrics, and
then increase the `minProvisionedTPS` as necessary.
**Status**
A campaign can be in one of the following states:
<ul> <li> CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE
FAILED
</li> <li> DELETE PENDING > DELETE IN_PROGRESS
</li> </ul> To get the campaign status, call `DescribeCampaign`.
<note> Wait until the `status` of the campaign is `ACTIVE` before asking
the campaign for recommendations.
</note> <p class="title"> **Related APIs**
<ul> <li> `ListCampaigns`
</li> <li> `DescribeCampaign`
</li> <li> `UpdateCampaign`
</li> <li> `DeleteCampaign`
</li> </ul>
"""
def create_campaign(client, input, options \\ []) do
request(client, "CreateCampaign", input, options)
end
@doc """
Creates an empty dataset and adds it to the specified dataset group. Use
`CreateDatasetImportJob` to import your training data to a dataset.
There are three types of datasets:
<ul> <li> Interactions
</li> <li> Items
</li> <li> Users
</li> </ul> Each dataset type has an associated schema with required field
types. Only the `Interactions` dataset is required in order to train a
model (also referred to as creating a solution).
A dataset can be in one of the following states:
<ul> <li> CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE
FAILED
</li> <li> DELETE PENDING > DELETE IN_PROGRESS
</li> </ul> To get the status of the dataset, call `DescribeDataset`.
<p class="title"> **Related APIs**
<ul> <li> `CreateDatasetGroup`
</li> <li> `ListDatasets`
</li> <li> `DescribeDataset`
</li> <li> `DeleteDataset`
</li> </ul>
"""
def create_dataset(client, input, options \\ []) do
request(client, "CreateDataset", input, options)
end
@doc """
Creates an empty dataset group. A dataset group contains related datasets
that supply data for training a model. A dataset group can contain at most
three datasets, one for each type of dataset:
<ul> <li> Interactions
</li> <li> Items
</li> <li> Users
</li> </ul> To train a model (create a solution), a dataset group that
contains an `Interactions` dataset is required. Call `CreateDataset` to add
a dataset to the group.
A dataset group can be in one of the following states:
<ul> <li> CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE
FAILED
</li> <li> DELETE PENDING
</li> </ul> To get the status of the dataset group, call
`DescribeDatasetGroup`. If the status shows as CREATE FAILED, the response
includes a `failureReason` key, which describes why the creation failed.
<note> You must wait until the `status` of the dataset group is `ACTIVE`
before adding a dataset to the group.
</note> You can specify an AWS Key Management Service (KMS) key to encrypt
the datasets in the group. If you specify a KMS key, you must also include
an AWS Identity and Access Management (IAM) role that has permission to
access the key.
<p class="title"> **APIs that require a dataset group ARN in the request**
<ul> <li> `CreateDataset`
</li> <li> `CreateEventTracker`
</li> <li> `CreateSolution`
</li> </ul> <p class="title"> **Related APIs**
<ul> <li> `ListDatasetGroups`
</li> <li> `DescribeDatasetGroup`
</li> <li> `DeleteDatasetGroup`
</li> </ul>
"""
def create_dataset_group(client, input, options \\ []) do
request(client, "CreateDatasetGroup", input, options)
end
@doc """
Creates a job that imports training data from your data source (an Amazon
S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to
import the training data, you must specify an AWS Identity and Access
Management (IAM) role that has permission to read from the data source, as
Amazon Personalize makes a copy of your data and processes it in an
internal AWS system.
<important> The dataset import job replaces any previous data in the
dataset.
</important> **Status**
A dataset import job can be in one of the following states:
<ul> <li> CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE
FAILED
</li> </ul> To get the status of the import job, call
`DescribeDatasetImportJob`, providing the Amazon Resource Name (ARN) of the
dataset import job. The dataset import is complete when the status shows as
ACTIVE. If the status shows as CREATE FAILED, the response includes a
`failureReason` key, which describes why the job failed.
<note> Importing takes time. You must wait until the status shows as ACTIVE
before training a model using the dataset.
</note> <p class="title"> **Related APIs**
<ul> <li> `ListDatasetImportJobs`
</li> <li> `DescribeDatasetImportJob`
</li> </ul>
"""
def create_dataset_import_job(client, input, options \\ []) do
request(client, "CreateDatasetImportJob", input, options)
end
@doc """
Creates an event tracker that you use when sending event data to the
specified dataset group using the
[PutEvents](https://docs.aws.amazon.com/personalize/latest/dg/API_UBS_PutEvents.html)
API.
When Amazon Personalize creates an event tracker, it also creates an
*event-interactions* dataset in the dataset group associated with the event
tracker. The event-interactions dataset stores the event data from the
`PutEvents` call. The contents of this dataset are not available to the
user.
<note> Only one event tracker can be associated with a dataset group. You
will get an error if you call `CreateEventTracker` using the same dataset
group as an existing event tracker.
</note> When you send event data you include your tracking ID. The tracking
ID identifies the customer and authorizes the customer to send the data.
The event tracker can be in one of the following states:
<ul> <li> CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE
FAILED
</li> <li> DELETE PENDING > DELETE IN_PROGRESS
</li> </ul> To get the status of the event tracker, call
`DescribeEventTracker`.
<note> The event tracker must be in the ACTIVE state before using the
tracking ID.
</note> <p class="title"> **Related APIs**
<ul> <li> `ListEventTrackers`
</li> <li> `DescribeEventTracker`
</li> <li> `DeleteEventTracker`
</li> </ul>
"""
def create_event_tracker(client, input, options \\ []) do
request(client, "CreateEventTracker", input, options)
end
@doc """
Creates a recommendation filter. For more information, see [Using Filters
with Amazon
Personalize](https://docs.aws.amazon.com/personalize/latest/dg/filters.html).
"""
def create_filter(client, input, options \\ []) do
request(client, "CreateFilter", input, options)
end
@doc """
Creates an Amazon Personalize schema from the specified schema string. The
schema you create must be in Avro JSON format.
Amazon Personalize recognizes three schema variants. Each schema is
associated with a dataset type and has a set of required field and
keywords. You specify a schema when you call `CreateDataset`.
<p class="title"> **Related APIs**
<ul> <li> `ListSchemas`
</li> <li> `DescribeSchema`
</li> <li> `DeleteSchema`
</li> </ul>
"""
def create_schema(client, input, options \\ []) do
request(client, "CreateSchema", input, options)
end
@doc """
Creates the configuration for training a model. A trained model is known as
a solution. After the configuration is created, you train the model (create
a solution) by calling the `CreateSolutionVersion` operation. Every time
you call `CreateSolutionVersion`, a new version of the solution is created.
After creating a solution version, you check its accuracy by calling
`GetSolutionMetrics`. When you are satisfied with the version, you deploy
it using `CreateCampaign`. The campaign provides recommendations to a
client through the
[GetRecommendations](https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html)
API.
To train a model, Amazon Personalize requires training data and a recipe.
The training data comes from the dataset group that you provide in the
request. A recipe specifies the training algorithm and a feature
transformation. You can specify one of the predefined recipes provided by
Amazon Personalize. Alternatively, you can specify `performAutoML` and
Amazon Personalize will analyze your data and select the optimum
USER_PERSONALIZATION recipe for you.
**Status**
A solution can be in one of the following states:
<ul> <li> CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE
FAILED
</li> <li> DELETE PENDING > DELETE IN_PROGRESS
</li> </ul> To get the status of the solution, call `DescribeSolution`.
Wait until the status shows as ACTIVE before calling
`CreateSolutionVersion`.
<p class="title"> **Related APIs**
<ul> <li> `ListSolutions`
</li> <li> `CreateSolutionVersion`
</li> <li> `DescribeSolution`
</li> <li> `DeleteSolution`
</li> </ul> <ul> <li> `ListSolutionVersions`
</li> <li> `DescribeSolutionVersion`
</li> </ul>
"""
def create_solution(client, input, options \\ []) do
request(client, "CreateSolution", input, options)
end
@doc """
Trains or retrains an active solution. A solution is created using the
`CreateSolution` operation and must be in the ACTIVE state before calling
`CreateSolutionVersion`. A new version of the solution is created every
time you call this operation.
**Status**
A solution version can be in one of the following states:
<ul> <li> CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE
FAILED
</li> </ul> To get the status of the version, call
`DescribeSolutionVersion`. Wait until the status shows as ACTIVE before
calling `CreateCampaign`.
If the status shows as CREATE FAILED, the response includes a
`failureReason` key, which describes why the job failed.
<p class="title"> **Related APIs**
<ul> <li> `ListSolutionVersions`
</li> <li> `DescribeSolutionVersion`
</li> </ul> <ul> <li> `ListSolutions`
</li> <li> `CreateSolution`
</li> <li> `DescribeSolution`
</li> <li> `DeleteSolution`
</li> </ul>
"""
def create_solution_version(client, input, options \\ []) do
request(client, "CreateSolutionVersion", input, options)
end
@doc """
Removes a campaign by deleting the solution deployment. The solution that
the campaign is based on is not deleted and can be redeployed when needed.
A deleted campaign can no longer be specified in a
[GetRecommendations](https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html)
request. For more information on campaigns, see `CreateCampaign`.
"""
def delete_campaign(client, input, options \\ []) do
request(client, "DeleteCampaign", input, options)
end
@doc """
Deletes a dataset. You can't delete a dataset if an associated
`DatasetImportJob` or `SolutionVersion` is in the CREATE PENDING or IN
PROGRESS state. For more information on datasets, see `CreateDataset`.
"""
def delete_dataset(client, input, options \\ []) do
request(client, "DeleteDataset", input, options)
end
@doc """
Deletes a dataset group. Before you delete a dataset group, you must delete
the following:
<ul> <li> All associated event trackers.
</li> <li> All associated solutions.
</li> <li> All datasets in the dataset group.
</li> </ul>
"""
def delete_dataset_group(client, input, options \\ []) do
request(client, "DeleteDatasetGroup", input, options)
end
@doc """
Deletes the event tracker. Does not delete the event-interactions dataset
from the associated dataset group. For more information on event trackers,
see `CreateEventTracker`.
"""
def delete_event_tracker(client, input, options \\ []) do
request(client, "DeleteEventTracker", input, options)
end
@doc """
Deletes a filter.
"""
def delete_filter(client, input, options \\ []) do
request(client, "DeleteFilter", input, options)
end
@doc """
Deletes a schema. Before deleting a schema, you must delete all datasets
referencing the schema. For more information on schemas, see
`CreateSchema`.
"""
def delete_schema(client, input, options \\ []) do
request(client, "DeleteSchema", input, options)
end
@doc """
Deletes all versions of a solution and the `Solution` object itself. Before
deleting a solution, you must delete all campaigns based on the solution.
To determine what campaigns are using the solution, call `ListCampaigns`
and supply the Amazon Resource Name (ARN) of the solution. You can't delete
a solution if an associated `SolutionVersion` is in the CREATE PENDING or
IN PROGRESS state. For more information on solutions, see `CreateSolution`.
"""
def delete_solution(client, input, options \\ []) do
request(client, "DeleteSolution", input, options)
end
@doc """
Describes the given algorithm.
"""
def describe_algorithm(client, input, options \\ []) do
request(client, "DescribeAlgorithm", input, options)
end
@doc """
Gets the properties of a batch inference job including name, Amazon
Resource Name (ARN), status, input and output configurations, and the ARN
of the solution version used to generate the recommendations.
"""
def describe_batch_inference_job(client, input, options \\ []) do
request(client, "DescribeBatchInferenceJob", input, options)
end
@doc """
Describes the given campaign, including its status.
A campaign can be in one of the following states:
<ul> <li> CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE
FAILED
</li> <li> DELETE PENDING > DELETE IN_PROGRESS
</li> </ul> When the `status` is `CREATE FAILED`, the response includes the
`failureReason` key, which describes why.
For more information on campaigns, see `CreateCampaign`.
"""
def describe_campaign(client, input, options \\ []) do
request(client, "DescribeCampaign", input, options)
end
@doc """
Describes the given dataset. For more information on datasets, see
`CreateDataset`.
"""
def describe_dataset(client, input, options \\ []) do
request(client, "DescribeDataset", input, options)
end
@doc """
Describes the given dataset group. For more information on dataset groups,
see `CreateDatasetGroup`.
"""
def describe_dataset_group(client, input, options \\ []) do
request(client, "DescribeDatasetGroup", input, options)
end
@doc """
Describes the dataset import job created by `CreateDatasetImportJob`,
including the import job status.
"""
def describe_dataset_import_job(client, input, options \\ []) do
request(client, "DescribeDatasetImportJob", input, options)
end
@doc """
Describes an event tracker. The response includes the `trackingId` and
`status` of the event tracker. For more information on event trackers, see
`CreateEventTracker`.
"""
def describe_event_tracker(client, input, options \\ []) do
request(client, "DescribeEventTracker", input, options)
end
@doc """
Describes the given feature transformation.
"""
def describe_feature_transformation(client, input, options \\ []) do
request(client, "DescribeFeatureTransformation", input, options)
end
@doc """
Describes a filter's properties.
"""
def describe_filter(client, input, options \\ []) do
request(client, "DescribeFilter", input, options)
end
@doc """
Describes a recipe.
A recipe contains three items:
<ul> <li> An algorithm that trains a model.
</li> <li> Hyperparameters that govern the training.
</li> <li> Feature transformation information for modifying the input data
before training.
</li> </ul> Amazon Personalize provides a set of predefined recipes. You
specify a recipe when you create a solution with the `CreateSolution` API.
`CreateSolution` trains a model by using the algorithm in the specified
recipe and a training dataset. The solution, when deployed as a campaign,
can provide recommendations using the
[GetRecommendations](https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html)
API.
"""
def describe_recipe(client, input, options \\ []) do
request(client, "DescribeRecipe", input, options)
end
@doc """
Describes a schema. For more information on schemas, see `CreateSchema`.
"""
def describe_schema(client, input, options \\ []) do
request(client, "DescribeSchema", input, options)
end
@doc """
Describes a solution. For more information on solutions, see
`CreateSolution`.
"""
def describe_solution(client, input, options \\ []) do
request(client, "DescribeSolution", input, options)
end
@doc """
Describes a specific version of a solution. For more information on
solutions, see `CreateSolution`.
"""
def describe_solution_version(client, input, options \\ []) do
request(client, "DescribeSolutionVersion", input, options)
end
@doc """
Gets the metrics for the specified solution version.
"""
def get_solution_metrics(client, input, options \\ []) do
request(client, "GetSolutionMetrics", input, options)
end
@doc """
Gets a list of the batch inference jobs that have been performed off of a
solution version.
"""
def list_batch_inference_jobs(client, input, options \\ []) do
request(client, "ListBatchInferenceJobs", input, options)
end
@doc """
Returns a list of campaigns that use the given solution. When a solution is
not specified, all the campaigns associated with the account are listed.
The response provides the properties for each campaign, including the
Amazon Resource Name (ARN). For more information on campaigns, see
`CreateCampaign`.
"""
def list_campaigns(client, input, options \\ []) do
request(client, "ListCampaigns", input, options)
end
@doc """
Returns a list of dataset groups. The response provides the properties for
each dataset group, including the Amazon Resource Name (ARN). For more
information on dataset groups, see `CreateDatasetGroup`.
"""
def list_dataset_groups(client, input, options \\ []) do
request(client, "ListDatasetGroups", input, options)
end
@doc """
Returns a list of dataset import jobs that use the given dataset. When a
dataset is not specified, all the dataset import jobs associated with the
account are listed. The response provides the properties for each dataset
import job, including the Amazon Resource Name (ARN). For more information
on dataset import jobs, see `CreateDatasetImportJob`. For more information
on datasets, see `CreateDataset`.
"""
def list_dataset_import_jobs(client, input, options \\ []) do
request(client, "ListDatasetImportJobs", input, options)
end
@doc """
Returns the list of datasets contained in the given dataset group. The
response provides the properties for each dataset, including the Amazon
Resource Name (ARN). For more information on datasets, see `CreateDataset`.
"""
def list_datasets(client, input, options \\ []) do
request(client, "ListDatasets", input, options)
end
@doc """
Returns the list of event trackers associated with the account. The
response provides the properties for each event tracker, including the
Amazon Resource Name (ARN) and tracking ID. For more information on event
trackers, see `CreateEventTracker`.
"""
def list_event_trackers(client, input, options \\ []) do
request(client, "ListEventTrackers", input, options)
end
@doc """
Lists all filters that belong to a given dataset group.
"""
def list_filters(client, input, options \\ []) do
request(client, "ListFilters", input, options)
end
@doc """
Returns a list of available recipes. The response provides the properties
for each recipe, including the recipe's Amazon Resource Name (ARN).
"""
def list_recipes(client, input, options \\ []) do
request(client, "ListRecipes", input, options)
end
@doc """
Returns the list of schemas associated with the account. The response
provides the properties for each schema, including the Amazon Resource Name
(ARN). For more information on schemas, see `CreateSchema`.
"""
def list_schemas(client, input, options \\ []) do
request(client, "ListSchemas", input, options)
end
@doc """
Returns a list of solution versions for the given solution. When a solution
is not specified, all the solution versions associated with the account are
listed. The response provides the properties for each solution version,
including the Amazon Resource Name (ARN). For more information on
solutions, see `CreateSolution`.
"""
def list_solution_versions(client, input, options \\ []) do
request(client, "ListSolutionVersions", input, options)
end
@doc """
Returns a list of solutions that use the given dataset group. When a
dataset group is not specified, all the solutions associated with the
account are listed. The response provides the properties for each solution,
including the Amazon Resource Name (ARN). For more information on
solutions, see `CreateSolution`.
"""
def list_solutions(client, input, options \\ []) do
request(client, "ListSolutions", input, options)
end
@doc """
Updates a campaign by either deploying a new solution or changing the value
of the campaign's `minProvisionedTPS` parameter.
To update a campaign, the campaign status must be ACTIVE or CREATE FAILED.
Check the campaign status using the `DescribeCampaign` API.
<note> You must wait until the `status` of the updated campaign is `ACTIVE`
before asking the campaign for recommendations.
</note> For more information on campaigns, see `CreateCampaign`.
"""
def update_campaign(client, input, options \\ []) do
request(client, "UpdateCampaign", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "personalize"}
host = build_host("personalize", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AmazonPersonalize.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/personalize.ex
| 0.89258 | 0.660008 |
personalize.ex
|
starcoder
|
defmodule ElixirScript.Compiler do
@moduledoc """
The entry point for the ElixirScript compilation process.
Takes the given module(s) and compiles them and all modules
and functions they use into JavaScript.
Will also take a path to Elixir files
"""
@doc """
Takes either a module name, list of module names, or a path as
the entry point(s) of an application/library. From there
it will determine which modules and functions are needed
to be compiled.
Available options are:
* `output`: The path of the generated JavaScript file.
If output is `nil`, then generated code is sent to standard out
If output is a path, the generated code placed in that path.
If path ends in `.js` then that will be the name of the file.
If a directory is given, file will be named `elixirscript.build.js`
* `root`: Optional root for imports of FFI JavaScript modules. Defaults to `.`.
"""
alias ElixirScript.{
State,
Translate,
FindUsedModules,
FindUsedFunctions,
Output
}
alias ElixirScript.ModuleSystems.ES
alias Kernel.ParallelCompiler
@spec compile_output(compiler_input, output: binary()) :: binary
def compile_output(path, opts \\ []) do
compile(path, opts) |> Map.to_list() |> hd |> elem(1) |> Map.get(:js_code)
end
@type compiler_input ::
atom
| [atom]
| binary
@spec compile(compiler_input, output: binary()) :: map
def compile(path, opts \\ [])
def compile(path, opts) when is_binary(path) do
opts = build_compiler_options(opts)
{:ok, pid} = State.start_link(opts)
path =
if String.ends_with?(path, [".ex", ".exs"]) do
path
else
Path.join([path, "**", "*.{ex,exs}"])
end
files = Path.wildcard(path)
ParallelCompiler.compile(files, each_module: &on_module_compile(pid, &1, &2, &3))
entry_modules =
pid
|> State.get_in_memory_modules()
|> Keyword.keys()
do_compile(entry_modules, pid, opts)
end
def compile(entry_modules, opts) do
opts = build_compiler_options(opts)
{:ok, pid} = State.start_link(opts)
entry_modules = List.wrap(entry_modules)
do_compile(entry_modules, pid, opts)
end
defp do_compile(entry_modules, pid, opts) do
FindUsedModules.execute(entry_modules, pid)
if opts.remove_unused_functions do
FindUsedFunctions.execute(entry_modules, pid)
end
modules = State.list_modules(pid)
Translate.execute(modules, pid)
modules = State.list_modules(pid)
result = Output.execute(modules, pid, opts)
State.stop(pid)
transform_output(modules, result, opts)
end
defp build_compiler_options(opts) do
remove_used_functions? = Keyword.get(opts, :remove_unused_functions, true)
default_options =
Map.new()
|> Map.put(:output, Keyword.get(opts, :output))
|> Map.put(:format, :es)
|> Map.put(:root, Keyword.get(opts, :root, "."))
|> Map.put(:remove_unused_functions, remove_used_functions?)
options = default_options
Map.put(options, :module_formatter, ES)
end
defp on_module_compile(pid, _file, module, beam) do
State.put_in_memory_module(pid, module, beam)
end
defp transform_output(modules, compiled_js, opts) do
output_path =
cond do
opts.output == nil or opts.output == :stdout ->
""
File.dir?(opts.output) ->
opts.output
true ->
Path.dirname(opts.output)
end
data = %{
ElixirScript.Core => %{
references: [],
last_modified: nil,
beam_path: nil,
source: nil,
js_path: Path.join(output_path, "ElixirScript.Core.js"),
diagnostics: [],
js_code: nil,
type: :ffi
}
}
Enum.reduce(modules, data, fn {module, info}, current_data ->
diagnostics =
Map.get(info, :diagnostics, [])
|> Enum.map(fn x ->
Map.put(x, :file, Map.get(info, :file))
end)
info = %{
references: Map.get(info, :used_modules, []),
last_modified: Map.get(info, :last_modified, nil),
beam_path: Map.get(info, :beam_path),
source: Map.get(info, :file),
js_path: Path.join(output_path, "#{module}.js"),
diagnostics: diagnostics
}
info =
case Keyword.get(compiled_js, module) do
[js_input_path, js_output_path] ->
last_modified =
case File.stat(js_input_path, time: :posix) do
{:ok, file_info} ->
file_info.mtime
_ ->
nil
end
info
|> Map.put(:last_modified, last_modified)
|> Map.put(:beam_path, nil)
|> Map.put(:source, js_input_path)
|> Map.put(:js_path, js_output_path)
|> Map.put(:js_code, nil)
|> Map.put(:type, :ffi)
js_code ->
info
|> Map.put(:js_path, Path.join(output_path, "#{module}.js"))
|> Map.put(:js_code, js_code)
|> Map.put(:type, :module)
end
Map.put(current_data, module, info)
end)
end
end
|
lib/elixir_script/compiler.ex
| 0.822474 | 0.436562 |
compiler.ex
|
starcoder
|
defmodule Scenic.Primitive.Sprites do
@moduledoc """
Draw one or more sprites from a single source image.
## Overview
The term "sprite" means one or more subsections of a larger image
that get rendered to the screen. You can do many things with sprites
including animations and zooming in and out of an image and more.
## Data Format
{ source_image_id, draw_commands }
`source_image_id` refers to an image in the `Scenic.Assets.Static`
library. This can be either the file name from your asset sources
or an alias that you set up in your configuration scripts.
`draw_commands` is a list of source/destination drawing commands that
are executed in order when the primitive renders.
`[ {{src_x, src_y}, {src_w, src_h}, {dst_x, dst_y}, {dst_w, dst_h}} ]`
Each draw command is an x/y position and width/height of a rectangle in
the source image, followed by the x/y position and width/height
rectangle in the destination space.
In other words, This copies rectangular images from the source
indicated by image_id and draws them in the coordinate space of
the graph.
The size of the destination rectangle does NOT need to be the same as the
source. This allows you to grow or shrink the image as needed. You can
use this to zoom in or zoom out of the source image.
## Animations
Sprites are common in the game industry and can be used to
create animations, manage large numbers of small images and more.
For example, in many games a character walking is built as a series
of frames in an animation that all live together in a single image
file. When it comes time to draw, the different frames are rendered
to the screen on after the other to give the appearance that the
character is animating.
A simpler example would be an image of a device with a blinking
light on it. The same device would be in the source image twice.
Once with the light on, and once with it off. Then you render the
appropriate portion of source image on a timer.
## Usage
You should add/modify primitives via the helper functions in
[`Scenic.Primitives`](Scenic.Primitives.html#sprites/3)
This example draws the same source rectangle twice in different locations.
The first is at full size, the second is expanded 10x.
```elixir
graph
|> sprites( { "images/my_sprites.png", [
{{0,0}, {10, 20}, {10, 10}, {10, 20}},
{{0,0}, {10, 20}, {100, 100}, {100, 200}},
]})
```
"""
use Scenic.Primitive
alias Scenic.Script
alias Scenic.Primitive
alias Scenic.Primitive.Style
alias Scenic.Assets.Static
@type draw_cmd :: {
{sx :: number, sy :: number},
{sw :: number, sh :: number},
{dx :: number, dy :: number},
{dw :: number, dh :: number}
}
@type draw_cmds :: [draw_cmd()]
@type t :: {image :: Static.id(), draw_cmds}
@type styles_t :: [:hidden | :scissor]
@styles [:hidden, :scissor]
@impl Primitive
@spec validate(t()) :: {:ok, t()} | {:error, String.t()}
def validate({image, cmds}) when is_list(cmds) do
with {:ok, image} <- validate_image(image),
{:ok, cmds} <- validate_commands(cmds) do
{:ok, {image, cmds}}
else
{:error, :command, cmd} -> err_bad_cmd(image, cmd)
{:error, :alias} -> err_bad_alias(image)
{:error, :font} -> err_is_font(image)
{:error, :not_found} -> err_missing_image(image)
end
end
def validate(data) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Sprites specification
Received: #{inspect(data)}
#{IO.ANSI.yellow()}
Sprites data must formed like:
{static_image_id, [{{src_x,src_y}, {src_w,src_h}, {dst_x,dst_y}, {dst_w,dst_h}}]}
This means, given an image in the Scenic.Assets.Static library, copy a series of
sub-images from it into the specified positions.
The {src_x, src_y} is the upper-left location of the source sub-image to copy out.
{src_w, src_h} is the width / height of the source sub-image.
{dst_x, dst_y} location in local coordinate space to past into.
{dst_w,dst_h} is the width / height of the destination image.
{dst_w,dst_h} and {src_w, src_h} do NOT need to be the same.
The source will be shrunk or expanded to fit the destination rectangle.#{IO.ANSI.default_color()}
"""
}
end
defp err_bad_cmd(image, cmd) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Sprites specification
Image: #{inspect(image)}
Invalid Command: #{inspect(cmd)}
#{IO.ANSI.yellow()}
Sprites data must formed like:
{static_image_id, [{{src_x,src_y}, {src_w,src_h}, {dst_x,dst_y}, {dst_w,dst_h}}]}
This means, given an image in the Scenic.Assets.Static library, copy a series of
sub-images from it into the specified positions.
The {src_x, src_y} is the upper-left location of the source sub-image to copy out.
{src_w, src_h} is the width / height of the source sub-image.
{dst_x, dst_y} location in local coordinate space to past into.
{dst_w,dst_h} is the width / height of the destination image.
{dst_w,dst_h} and {src_w, src_h} do NOT need to be the same.
The source will be shrunk or expanded to fit the destination rectangle.#{IO.ANSI.default_color()}
"""
}
end
defp err_bad_alias(image) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Sprites specification
Unmapped Image Alias: #{inspect(image)}
#{IO.ANSI.yellow()}
Sprites must use a valid image from your Scenic.Assets.Static library.
To resolve this, make sure the alias mapped to a file path in your config.
config :scenic, :assets,
module: MyApplication.Assets,
alias: [
parrot: "images/parrot.jpg"
]#{IO.ANSI.default_color()}
"""
}
end
defp err_missing_image(image) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Sprites specification
The image #{inspect(image)} could not be found.
#{IO.ANSI.yellow()}
Sprites must use a valid image from your Scenic.Assets.Static library.
To resolve this do the following checks.
1) Confirm that the file exists in your assets folder.
2) Make sure the image file is being compiled into your asset library.
If this file is new, you may need to "touch" your asset library module to cause it to recompile.
Maybe somebody will help add a filesystem watcher to do this automatically. (hint hint...)
3) Check that and that the asset module is defined in your config.
config :scenic, :assets,
module: MyApplication.Assets #{IO.ANSI.default_color()}
"""
}
end
defp err_is_font(image) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Sprites specification
The asset #{inspect(image)} is a font.
#{IO.ANSI.yellow()}
Sprites must use a valid image from your Scenic.Assets.Static library.
"""
}
end
defp validate_image(id) do
case Static.meta(id) do
{:ok, {Static.Image, _}} -> {:ok, id}
{:ok, {Static.Font, _}} -> {:error, :font}
_ -> {:error, :not_found}
end
end
defp validate_commands(commands) do
commands
|> Enum.reduce({:ok, commands}, fn
_, {:error, _} = error ->
error
{{src_x, src_y}, {src_w, src_h}, {dst_x, dst_y}, {dst_w, dst_h}}, acc
when is_number(src_x) and is_number(src_y) and
is_number(src_w) and is_number(src_h) and
is_number(dst_x) and is_number(dst_y) and
is_number(dst_w) and is_number(dst_h) ->
acc
cmd, _ ->
{:error, :command, cmd}
end)
end
# --------------------------------------------------------
# filter and gather styles
@doc """
Returns a list of styles recognized by this primitive.
"""
@impl Primitive
@spec valid_styles() :: styles_t()
def valid_styles(), do: @styles
# --------------------------------------------------------
# compiling a script is a special case and is handled in Scenic.Graph.Compiler
@doc false
@impl Primitive
@spec compile(primitive :: Primitive.t(), styles :: Style.t()) :: Script.t()
def compile(%Primitive{module: __MODULE__, data: {image, cmds}}, _styles) do
Script.draw_sprites([], image, cmds)
end
end
|
lib/scenic/primitive/sprites.ex
| 0.937712 | 0.90205 |
sprites.ex
|
starcoder
|
defmodule Akd.Fetch.Scp do
@moduledoc """
A native Hook module that comes shipped with Akd.
This module uses `Akd.Hook`.
Provides a set of operations that fetch source code using `scp` from a given
source to a destination.
Ensures to cleanup and empty the desination directory. (Doesn't run this by
default)
Doesn't have any Rollback operations.
# Options:
* `run_ensure`: `boolean`. Specifies whether to a run a command or not.
* `ignore_failure`: `boolean`. Specifies whether to continue if this hook fails.
* `src`: `string`. Source of the code from where to scp the data.
* `exclude`: `list`. Scp all folders except the ones given in exclude.
# Defaults:
* `run_ensure`: `false`
* `ignore_failure`: `false`
* `src`: Current working directory, `.`
* `exclude`: `["_build", ".git", "deps", ".gitignore"]`
"""
use Akd.Hook
@default_opts [run_ensure: true, ignore_failure: false, src: "."]
@doc """
Callback implementation for `get_hooks/2`.
This function returns a list of operations that can be used to fetch source
code using `scp` from a given source.
## Examples
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local(),
...> publish_to: Akd.Destination.local(),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.Fetch.Scp.get_hooks(deployment, [exclude: []])
[%Akd.Hook{ensure: [%Akd.Operation{cmd: "rm -rf ./*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], ignore_failure: false,
main: [%Akd.Operation{cmd: "rsync -krav -e ssh . .", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: true}]
iex> Akd.Fetch.Scp.get_hooks(deployment, [src: Akd.Destination.local()])
[%Akd.Hook{ensure: [%Akd.Operation{cmd: "rm -rf ./*", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], ignore_failure: false,
main: [%Akd.Operation{cmd: "rsync -krav -e ssh --exclude=\\"_build\\" --exclude=\\".git\\" --exclude=\\"deps\\" . .",
cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: true}]
"""
@spec get_hooks(Akd.Deployment.t(), Keyword.t()) :: list(Akd.Hook.t())
def get_hooks(deployment, opts) do
opts = uniq_merge(opts, @default_opts)
src = Keyword.get(opts, :src)
[fetch_hook(src, deployment, opts)]
end
# This function takes a source, a destination and options and
# returns an Akd.Hook.t struct using the form_hook DSL.
defp fetch_hook(src, deployment, opts) when is_binary(src) do
destination = Akd.DestinationResolver.resolve(:build, deployment)
dest = Akd.Destination.to_string(destination)
excludes = Keyword.get(opts, :exclude, ~w(_build .git deps))
form_hook opts do
main(rsync_cmd(src, dest, excludes), Akd.Destination.local())
ensure("rm -rf ./*", destination)
end
end
defp fetch_hook(%Akd.Destination{} = src, deployment, opts) do
src = Akd.Destination.to_string(src)
fetch_hook(src, deployment, opts)
end
# This function returns an rsync command with all the
# `exclude` switches added to it.
defp rsync_cmd(src, dest, excludes) do
Enum.reduce(excludes, "rsync -krav -e ssh", fn ex, cmd ->
cmd <> " --exclude=\"#{ex}\""
end) <> " #{src} #{dest}"
end
# This function takes two keyword lists and merges them keeping the keys
# unique. If there are multiple values for a key, it takes the value from
# the first value of keyword1 corresponding to that key.
defp uniq_merge(keyword1, keyword2) do
keyword2
|> Keyword.merge(keyword1)
|> Keyword.new()
end
end
|
lib/akd/base/fetch/scp.ex
| 0.866486 | 0.630201 |
scp.ex
|
starcoder
|
defmodule CasAgent do
@moduledoc """
Implement Compare and Set using an ETS table to solve read concurrency
and an Agent to control write concurrency (the actual CAS).
"""
@doc """
Start the ETS table and the Agent before anything
"""
def start do
:ets.new(:CAS_table, [:public,:set,:named_table])
Agent.start_link(fn -> [] end, name: :CAS_agent)
:ok
end
@doc """
Returns %{v: value, ts: timestamp}, being `value` the value for the given
key from the ETS table, and `timestamp` the last modification time.
"""
def read(key) do
[{_, data}] = :ets.lookup(:CAS_table, key)
data
end
@doc """
Tries a Compare and Set sequence, setting the value for the given key
with the return value of the given function. The function is passed the
previous value.
It returns {:ok, %{v: value, ts: timestamp}} if it worked within the given
number of retries. {:error, :failed_cas} otherwise.
"""
def cas(key, fun, retries \\ 3) do
%{v: value, ts: ts} = read(key)
case write(key, ts, fun) do
{:ok, res} -> {:ok, res}
{:error, :wrong_ts} when retries > 0 -> cas(key, fun, retries - 1)
_ -> {:error, :failed_cas}
end
end
@doc """
Updates the value for the given key using the value returned by the given
function. The function will be passed the current value.
It returrns {:ok, %{v: value, ts: timestamp}} if it worked.
The given `prev_ts` must match the current value of `ts` on table at the
moment of writing. `{:error, :wrong_ts}` is otherwise returned.
"""
def write(key, prev_ts, fun) do
Agent.get_and_update :CAS_agent, fn(_)->
# use the Agent process to serialize operations
%{v: value, ts: ts} = read(key)
if prev_ts == ts do
new_value = fun.(value)
data = %{v: new_value, ts: new_ts}
true = :ets.insert(:CAS_table, {key, data})
{{:ok, data}, []}
else
{{:error, :wrong_ts}, []}
end
end
end
@doc """
Get timestamp in seconds, microseconds, or nanoseconds
"""
def new_ts(scale \\ :seconds) do
{mega, sec, micro} = :os.timestamp
t = mega * 1_000_000 + sec
case scale do
:seconds -> t
:micro -> t * 1_000_000 + micro
:nano -> (t * 1_000_000 + micro) * 1_000
end
end
end
|
lib/cas_agent.ex
| 0.856677 | 0.801354 |
cas_agent.ex
|
starcoder
|
defmodule JsonapiPaginator do
@moduledoc """
Simple pagination links renderer for the JSON API
"""
@doc """
Render links based on the data, given in params map
```
%{
base_url: base_url,
page_number: page_number,
page_size: page_size,
total_pages: total_pages,
total_count: total_count
}
```
Returns map | %{}
Sample return map:
```
%{
first: "http://localhost/api/v1/get_page?number=1&size=10",
last: "http://localhost/api/v1/get_page?number=10&size=10",
next: "http://localhost/api/v1/get_page?number=3&size=10",
prev: "http://localhost/api/v1/get_page?number=1&size=10",
self: "http://localhost/api/v1/get_page?number=2&size=10"
}
```
"""
def render_links(
%{
base_url: _base_url,
page_number: _page_number,
page_size: _page_size,
total_pages: _total_pages,
total_count: _total_count
} = params
) do
%{
self:
params.base_url <>
"number=" <>
to_string(params.page_number) <> "&size=" <> to_string(params.page_size),
first: params.base_url <> "number=1&size=" <> to_string(params.page_size),
last:
params.base_url <>
"number=" <>
to_string(params.total_pages) <> "&size=" <> to_string(params.page_size)
}
|> put_next_link(params)
|> put_prev_link(params)
end
def render_links(_), do: %{}
defp put_prev_link(links, params) do
if params.page_number > 1 do
Map.put(
links,
:prev,
params.base_url <>
"number=" <>
to_string(params.page_number - 1) <> "&size=" <> to_string(params.page_size)
)
else
links
end
end
defp put_next_link(links, params) do
if params.page_number < params.total_pages do
Map.put(
links,
:next,
params.base_url <>
"number=" <>
to_string(params.page_number + 1) <> "&size=" <> to_string(params.page_size)
)
else
links
end
end
end
|
lib/jsonapi_paginator.ex
| 0.715424 | 0.421998 |
jsonapi_paginator.ex
|
starcoder
|
defmodule ScatterSwap.Scatterer do
@moduledoc """
Functions for rearranging the order of each digit in a reversable way. Uses
the sum of the digits in the passed list (which doesn't change regardless of
their order) as a key to record how they were scattered.
"""
use Bitwise
alias ScatterSwap.Util
@doc """
Rearranges the digits of a list using their sum as a seed.
iex> ScatterSwap.Scatterer.scatter([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
[5, 4, 0, 3, 8, 7, 9, 6, 1, 2]
Optionally takes a second argument, `spin`, which alters the arrangement of
the returned list.
iex> ScatterSwap.Scatterer.scatter([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], 421)
[2, 7, 6, 5, 9, 1, 0, 4, 3, 8]
"""
def scatter(list, spin \\ 0) do
sum_of_digits = sum_digits(list)
rotate_by = bxor(spin, sum_of_digits) * -1
list
|> Enum.reverse
|> do_scatter(rotate_by)
end
defp do_scatter([], _) do
[]
end
defp do_scatter(list, rotate_by) do
[ digit | tail ] = Util.rotate_list(list, rotate_by)
[ digit | do_scatter(tail, rotate_by) ]
end
@doc """
Reverses the result of `ScatterSwap.Scatterer.scatter/2`, returning the
original list.
iex> ScatterSwap.Scatterer.unscatter([5, 4, 0, 3, 8, 7, 9, 6, 1, 2])
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
A second argument `spin` can be used as a seed to correctly reverse the
rearrangement.
iex> ScatterSwap.Scatterer.unscatter([2, 7, 6, 5, 9, 1, 0, 4, 3, 8], 421)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
"""
def unscatter(list, spin \\ 0) do
sum_of_digits = sum_digits(list)
rotate_by = bxor(sum_of_digits, spin)
list
|> do_unscatter(rotate_by)
|> Enum.reverse
end
defp do_unscatter([], _) do
[]
end
defp do_unscatter(list, rotate_by) do
[ digit | tail ] = list
output_list = [ digit | do_unscatter(tail, rotate_by) ]
Util.rotate_list(output_list, rotate_by)
end
defp sum_digits([]) do
0
end
defp sum_digits([ head | tail ]) do
head + sum_digits(tail)
end
end
|
lib/scatter_swap/scatterer.ex
| 0.774498 | 0.637186 |
scatterer.ex
|
starcoder
|
defmodule Ports.Rumble.Board.Overlaps do
alias Ports.Rumble.Tile
alias Ports.Rumble.Board.Common
@fix_overlap_precision 30
def fix_overlap_precision(), do: @fix_overlap_precision
@spec overlaps_any(number(), number(), %{any() => Tile.t()}) :: any()
def overlaps_any(x, y, tiles) do
Enum.reduce_while(Map.values(tiles), nil, fn tile, _acc ->
if x_overlap(tile.x, x) && y_overlap(tile.y, y) do
{:halt, tile}
else
{:cont, nil}
end
end)
end
def get_overlaps(x, y, tiles) do
Enum.reduce(Map.values(tiles), [], fn tile, acc ->
if x_overlap(tile.x, x) && y_overlap(tile.y, y) do
[tile | acc]
else
acc
end
end)
end
def fix_overlaps(main_tile, overlapping, all_tiles, all_groups) do
Enum.reduce(overlapping, %{}, fn tile, acc ->
case tile.group_id do
nil ->
{fx, fy} = fix_pos_hopping(tile, main_tile, all_tiles)
Map.put(acc, tile.id, %{tile | x: fx, y: fy})
_ ->
new_children = fix_pos_hopping_group(tile, main_tile, all_tiles, all_groups)
Map.merge(acc, new_children)
end
end)
end
defp x_overlap(one, two) do
Kernel.abs(one - two) < Common.tile_width()
end
defp y_overlap(one, two) do
Kernel.abs(one - two) < Common.tile_height()
end
defp fix_pos_hopping(move_tile, main_tile, all_tiles) do
{sx, sy} = suggested_pos_given_overlap(move_tile, main_tile)
overlap = overlaps_any(sx, sy, Map.delete(all_tiles, move_tile.id))
unless is_nil(overlap) do
fix_pos_hopping(%{move_tile | x: sx, y: sy}, main_tile, all_tiles)
else
{sx, sy}
end
end
def fix_pos_hopping_group(lead_move_tile, main_tile, all_tiles, all_groups) do
children = Map.get(all_groups, lead_move_tile.group_id).children
{sx, sy} = suggested_pos_given_overlap(lead_move_tile, main_tile)
{delta_x, delta_y} = {sx - lead_move_tile.x, sy - lead_move_tile.y}
exp_children =
Enum.reduce(Map.take(all_tiles, children), %{}, fn {_idx, tile}, acc ->
{fx, fy} = {tile.x + delta_x, tile.y + delta_y}
Map.put(acc, tile.id, %{tile | x: fx, y: fy})
end)
any_overlap =
Enum.reduce_while(exp_children, false, fn {_idx, tile}, _acc ->
case overlaps_any(tile.x, tile.y, Map.drop(all_tiles, children)) do
nil ->
{:cont, false}
_ ->
{:halt, true}
end
end)
if any_overlap do
fix_pos_hopping_group(
%{lead_move_tile | x: sx, y: sy},
main_tile,
Map.merge(all_tiles, exp_children),
all_groups
)
else
exp_children
end
end
defp suggested_pos_given_overlap(to_move, static) do
x = coordinate_pos_fix(to_move.x, static.x)
y = coordinate_pos_fix(to_move.y, static.y)
{x, y}
end
defp coordinate_pos_fix(move, static) do
if move < static do
move - @fix_overlap_precision
else
move + @fix_overlap_precision
end
end
end
|
harbor/lib/ports/rumble/board/overlaps.ex
| 0.632843 | 0.480905 |
overlaps.ex
|
starcoder
|
defmodule Chankins.Generator do
@moduledoc """
Generator is used to render templete files and dynamic data with different render engines based on file extensions.
Even multiple rendering runs are supported. For a file like "layout.md.eex" the EExEngine is used in the first run and
the MarkdownEngine in a second run with the result of the EExEngine. This mechanism can build an arbitrary deep pipline of
intermediate rendering results with one single result
All available rendering engines can be configured in the Mix config.exs in a map with the extension as key and the engine as value.
config :chankins, Chankins.Generator,
engines: %{".md" => Chankins.Generator.MarkdownEngine, ".eex" => Chankins.Generator.EExEngine}
For all unknown extensions the FileEngine is used.
"""
alias Chankins.Generator.{FileEngine, Context}
@engines_config :engines
@default_engine FileEngine
def generate(template, assigns) do
render_file template, assigns
end
defp render_file(path, assigns) do
engine = get_engine_for_path path
context = Context.build(path, engine)
assigns = Keyword.put(assigns, :context, context)
rendered_content = engine.render_file(path, assigns)
next_pass(rendered_content, context, assigns)
end
def next_pass(content, old_context, assigns) do
if render_again? old_context do
new_file = Path.basename(old_context.file, old_context.ext)
engine = get_engine_for_path new_file
new_context = Context.build(Path.join(old_context.dir, new_file), engine)
render_content content, new_context, assigns
else
content
end
end
@doc """
Checks the current filename for multiple extensions, e.g. layout.html.eex and returns true in this case.
iex> true == render_again?(%Context{file: "layout.html.eex"})
iex> false == render_again?(%Context{file: "layout.html"})
"""
defp render_again?(context) do
"" != Path.basename(context.file, context.ext)
|> Path.extname()
end
defp render_content(content, context, assign) do
context.engine.render_content(content, assign)
end
def include(path, context, assigns) do
path
|> Path.expand(context.dir)
|> render_file(assigns)
end
defp get_engine_for_path path do
Path.extname(path) |> get_engine_for_extension
end
defp get_engine_for_extension(ext) do
Application.get_env(:chankins, Chankins.Generator, [])
|> Keyword.get(@engines_config, %{})
|> Map.get(ext, @default_engine)
end
defmodule Context do
defstruct file: nil, dir: nil, ext: "", engine: FileEngine
def build(file_path, engine \\ :nil) do
file_name = Path.basename(file_path)
dir = Path.dirname(file_path)
ext = Path.extname(file_path)
%Context{file: file_name, dir: dir, ext: ext, engine: engine}
end
end
end
|
lib/chankins/generator/generator.ex
| 0.653127 | 0.59408 |
generator.ex
|
starcoder
|
defmodule RdbParser.RedisList do
@moduledoc false
alias RdbParser.RedisString
# Encodings for ziplist members. This is a single byte that determines how the
# length is encoded in the following bytes.
@enc_8 254
@enc_16 192
@enc_24 240
@enc_32 208
@enc_64 224
# @eof 255
@doc """
Returns {list, rest} where list is the List of entries, and rest is the
remaining binary to be parsed.
"""
@spec parse_quicklist(binary) :: :incomplete | {list(integer | binary), binary}
def parse_quicklist(data) do
with {num_ziplists, rest} <- RdbParser.parse_length(data),
{backward_encoded_ziplists, unused} <- extract_encoded_ziplists(rest, num_ziplists),
encoded_ziplists <- Enum.reverse(backward_encoded_ziplists),
list when is_list(list) <-
encoded_ziplists |> Enum.map(&parse_ziplist/1) |> List.flatten() do
{list, unused}
else
:incomplete -> :incomplete
end
end
# Need to move this to a separate module as it's a generic method for parsing multiple types.
def parse_ziplist(""), do: :incomplete
def parse_ziplist(
<<_total_size::little-integer-size(32), _offset_to_tail::little-integer-size(32),
num_entries::little-integer-size(16), payload::binary>>
) do
parse_ziplist_entries(payload, num_entries, [])
end
defp parse_ziplist_entries(_, 0, list) do
Enum.reverse(list)
end
# Previous entry is 32-bit length
defp parse_ziplist_entries(
<<254, _prev_len::size(32), rest::binary()>>,
num_entries,
list
) do
case parse_ziplist_entry(rest) do
:incomplete ->
:incomplete
{item, rest} ->
parse_ziplist_entries(rest, num_entries - 1, [item | list])
end
end
# previous entry is 8-bit length
defp parse_ziplist_entries(
<<_prev_len::size(8), rest::binary()>>,
num_entries,
list
) do
case parse_ziplist_entry(rest) do
:incomplete ->
:incomplete
{item, rest} ->
parse_ziplist_entries(rest, num_entries - 1, [item | list])
end
end
# 8 bit signed integer
defp parse_ziplist_entry(<<@enc_8, num::little-signed-integer-size(8), rest::binary>>) do
{num, rest}
end
# 16 bit signed integer
defp parse_ziplist_entry(<<@enc_16, num::little-signed-size(16), rest::binary>>) do
{num, rest}
end
# 24 bit signed integer
defp parse_ziplist_entry(<<@enc_24, num::little-signed-size(24), rest::binary>>) do
{num, rest}
end
defp parse_ziplist_entry(<<@enc_32, num::little-signed-size(32), rest::binary>>) do
{num, rest}
end
defp parse_ziplist_entry(<<@enc_64, num::little-signed-size(64), rest::binary>>) do
{num, rest}
end
defp parse_ziplist_entry(<<15::size(4), numcode::size(4), rest::binary>>) do
{numcode - 1, rest}
end
# The 6/14/32 bit length strings are handled the same as a normal string.
defp parse_ziplist_entry(binary) do
case RedisString.parse(binary) do
:incomplete ->
:incomplete
{entry, rest} ->
{entry, rest}
end
end
defp extract_encoded_ziplists(rest, num_ziplists) do
1..num_ziplists
|> Enum.reduce({[], rest}, fn _, {encoded_ziplists, rest} ->
case RedisString.parse(rest) do
:incomplete ->
:incomplete
{encoded_ziplist, unused} ->
{[encoded_ziplist | encoded_ziplists], unused}
end
end)
end
# NOTE: THE FOLLOWING IS UNTESTED.
def parse(binary) do
{num_entries, rest} = RdbParser.parse_length(binary)
parse_list_elements(rest, num_entries, [])
end
defp parse_list_elements(rest, 0, list), do: {list, rest}
defp parse_list_elements(rest, entries_left, list) do
case RedisString.parse(rest) do
:incomplete -> :incomplete
{str, rest} -> parse_list_elements(rest, entries_left - 1, [str | list])
end
end
end
|
lib/rdb_parser/redis_list.ex
| 0.744471 | 0.436382 |
redis_list.ex
|
starcoder
|
defmodule Kl.Reader do
require IEx
def tokenise(expr) do
expr
|> String.replace(~r/([\(\)])/, " \\1 ") # NOTE: replaces also "(", ")" inside strings
|> split
end
def split(expr) do
Enum.reduce(
String.split(expr, ~r/("[^"]*")/, include_captures: true),
[],
fn(s, acc) ->
if Regex.match?(~r/("[^"]*")/, s) do
# NOTE: fix wrong replacement of "(", ")" inside strings
acc ++ [{:string, String.replace(s, ~r/( ([\(\)]) )/, "\\2")}]
else
acc ++ String.split(s)
end
end
)
end
defp atomise(token) do
cond do
# If the token is enclosed in double quotes
match?({:string, _}, token) ->
{:string, s} = token
String.slice(s, 1, String.length(s) - 2)
# If the token contains whitespace, it's not a bloody token.
token =~ ~r/\s/ ->
throw {:error, "Unexpected whitespace found in token: #{token}"}
# If the token contains digits separated by a decimal point
token =~ ~r/^-?\d+\.\d+$/ ->
String.to_float token
# If the token contains only digits
token =~ ~r/^-?\d+$/ ->
String.to_integer token
token == "true" ->
true
token == "false" ->
false
# If the token is a valid identifier
token =~ ~r/^[^\d\(\)\.,][^\(\),]*$/ ->
String.to_atom token
:else ->
throw {:error, "Cannot parse token: #{token}"}
end
end
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def read([]) do
[]
end
def read(["(" | _tokens] = all_tokens) do
{fst, snd} = Enum.split(all_tokens, matching_paren_index(all_tokens))
[read(Enum.drop(fst, 1)) | read(Enum.drop(snd, 1))]
end
def read([")" | _tokens]) do
throw {:error, "Unexpected list delimiter while reading"}
end
def read([token | tokens]) do
[atomise(token) | read(tokens)]
end
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
defp matching_paren_index(tokens, type \\ {"(", ")"}) do
tokens
|> Enum.with_index
|> Enum.drop(1)
|> do_matching_paren_index([], type)
end
defp do_matching_paren_index([], _stack, _type) do
nil
end
defp do_matching_paren_index([{open, _i} | tokens], stack, {open, _close} = type) do
do_matching_paren_index(tokens, [open | stack], type)
end
defp do_matching_paren_index([{close, i} | _tokens], [], {_open, close}) do
i
end
defp do_matching_paren_index([{close, _i} | tokens], stack, {_open, close} = type) do
do_matching_paren_index(tokens, Enum.drop(stack, 1), type)
end
defp do_matching_paren_index([_token | tokens], stack, type) do
do_matching_paren_index(tokens, stack, type)
end
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def check_parens(tokens, stack \\ [])
def check_parens([], []) do
true
end
def check_parens([], [_|_]) do
false
end
def check_parens(["(" | tokens], stack) do
check_parens(tokens, ["(" | stack])
end
def check_parens([")" | _tokens], []) do
false
end
def check_parens([")" | tokens], stack) do
check_parens(tokens, Enum.drop(stack, 1))
end
def check_parens([_token | tokens], stack) do
check_parens(tokens, stack)
end
end
|
lib/reader.ex
| 0.535584 | 0.4099 |
reader.ex
|
starcoder
|
defmodule Vapor.Provider.Dotenv do
@moduledoc """
The dotenv config provider will look for a `.env` file and load all of
the values for that file. The values can be written like so:
```
DATABASE_URL=https://localhost:9432
PORT=4000
REDIS_HOST=1234
```
Multiline variables can be written using bash-style heredocs like so:
```
API_PRIVATE_KEY=<< EOF
-----<KEY>
...
<KEY>
-----END RSA PRIVATE KEY-----
EOF
PORT=4000
REDIS_HOST=1234
```
If the file can't be found then this provider will still return an ok but
will (obviously) not load any configuration values. The primary use case for
this provider is local development where it might be inconvenient to add all
of the necessary environment variables on your local machine and it makes
tradeoffs for that use case.
## Existing environment variables
By default the dotenv provider won't overwrite any existing environment variables.
You can change this by setting the `overwrite` key to `true`:
%Dotenv{overwrite: true}
## File heirarchy
If no file is specified then the dotenv provider will load these files in this
order. Each proceeding file is loaded over the previous. In these examples `ENV`
will be the current mix environment: `dev`, `test`, or `prod`.
* `.env`
* `.env.ENV`
* `.env.local`
* `.env.ENV.local`
You should commit `.env` and `.env.ENV` files to your project and ignore any
`.local` files. This allows users to provide a custom setup if they need to
do that.
"""
defstruct filename: nil, overwrite: false
defimpl Vapor.Provider do
def load(%{filename: nil, overwrite: overwrite}) do
# Get the environment from mix. If mix isn't available we assume we're in
# a prod release
env = if Code.ensure_loaded?(Mix), do: Mix.env(), else: "prod"
files = [".env", ".env.#{env}", ".env.local", ".env.#{env}.local"]
files
|> Enum.reduce(%{}, fn file, acc -> Map.merge(acc, load_file(file)) end)
|> put_vars(overwrite)
{:ok, %{}}
end
def load(%{filename: filename, overwrite: overwrite}) do
filename
|> load_file
|> put_vars(overwrite)
{:ok, %{}}
end
defp load_file(file) do
case File.read(file) do
{:ok, contents} ->
parse(contents)
_ ->
%{}
end
end
def put_vars(vars, overwrite) do
for {k, v} <- vars do
if overwrite || System.get_env(k) == nil do
System.put_env(k, v)
end
end
end
defp parse(contents) do
contents
|> String.split(~r/\n/, trim: true)
|> Enum.reduce([], &parse/2)
|> Enum.into(%{})
end
defp parse(line, {key, delimiter, heredoc_acc, acc}) do
if String.trim(line) == delimiter do
value =
heredoc_acc
|> Enum.reverse()
|> Enum.join("\n")
[{key, value} | acc]
else
{key, delimiter, [line | heredoc_acc], acc}
end
end
defp parse(line, acc) do
if comment?(line) do
acc
else
line
|> String.split("=", parts: 2)
|> parse_pair(acc)
end
end
defp parse_pair([key, value], acc) do
if String.length(key) > 0 && String.length(value) > 0 do
key = String.trim(key)
value = String.trim(value)
case starting_heredoc(value) do
[_, delimiter] -> {key, delimiter, [], acc}
_ -> [{key, value} | acc]
end
else
acc
end
end
defp parse_pair(_, acc) do
acc
end
defp starting_heredoc(value) do
Regex.run(~R/<<\s*['"]?([A-Z]+)['"]?\s*/, value)
end
defp comment?(line) do
Regex.match?(~R/\A\s*#/, line)
end
end
end
|
lib/vapor/providers/dotenv.ex
| 0.799521 | 0.781997 |
dotenv.ex
|
starcoder
|
defmodule Annex.Layer.Activation do
@moduledoc """
The Activation layer is the Annex.Layer that is responsible for
applying an activation function to the data during the feedforward
and supplying the gradient function (derivative) of the activation
function to the Backprops during backpropagation.
"""
use Annex.Debug, debug: true
alias Annex.AnnexError
alias Annex.Data
alias Annex.Layer
alias Annex.Layer.Activation
alias Annex.Layer.Backprop
alias Annex.LayerConfig
@type func_type :: :float | :list
@type func_name :: :relu | :sigmoid | :tanh | {:relu, number()}
@type t :: %__MODULE__{
activator: (number -> number),
derivative: (number -> number),
func_type: func_type(),
name: atom()
}
@type data :: Data.data()
@behaviour Layer
defstruct [
:activator,
:derivative,
:name,
:outputs,
:inputs,
:func_type
]
@impl Layer
@spec init_layer(LayerConfig.t(Activations)) :: t()
def init_layer(%LayerConfig{} = cfg) do
case LayerConfig.details(cfg) do
%{name: name} -> from_name(name)
end
end
@spec from_name(func_name()) :: t() | no_return()
def from_name(name) do
name
|> case do
{:relu, threshold} ->
%Activation{
activator: fn n -> max(n, threshold) end,
derivative: fn n -> relu_deriv(n, threshold) end,
func_type: :float,
name: name
}
:relu ->
%Activation{
activator: &relu/1,
derivative: &relu_deriv/1,
func_type: :float,
name: name
}
:sigmoid ->
%Activation{
activator: &sigmoid/1,
derivative: &sigmoid_deriv/1,
func_type: :float,
name: name
}
:tanh ->
%Activation{
activator: &tanh/1,
derivative: &tanh_deriv/1,
func_type: :float,
name: name
}
:softmax ->
%Activation{
activator: &softmax/1,
derivative: &tanh_deriv/1,
func_type: :list,
name: name
}
_ ->
raise %AnnexError{
message: "unknown activation name",
details: [
name: name
]
}
end
end
@impl Layer
@spec feedforward(t(), data()) :: {t(), data()}
def feedforward(%Activation{} = layer, inputs) do
outputs = generate_outputs(layer, inputs)
{%Activation{layer | outputs: outputs, inputs: inputs}, outputs}
end
@impl Layer
@spec backprop(t(), Data.data(), Backprop.t()) :: {t(), Data.data(), Backprop.t()}
def backprop(%Activation{} = layer, error, props) do
derivative = get_derivative(layer)
# name = get_name(layer)
# next_error =
# layer
# |> get_inputs()
# |> Data.apply_op({:derivative, name}, [derivative])
# |> Data.apply_op(:multiply, [error])
{layer, error, Backprop.put_derivative(props, derivative)}
end
@spec generate_outputs(t(), Data.data()) :: Data.data()
def generate_outputs(%Activation{} = layer, inputs) do
activation = get_activator(layer)
name = get_name(layer)
Data.apply_op(inputs, name, [activation])
end
@spec get_activator(t()) :: (number() -> number())
def get_activator(%Activation{activator: act}), do: act
@spec get_derivative(t()) :: any()
def get_derivative(%Activation{derivative: deriv}), do: deriv
@spec get_name(t()) :: any()
def get_name(%Activation{name: name}), do: name
@spec get_inputs(t()) :: Data.data()
def get_inputs(%Activation{inputs: inputs}), do: inputs
@spec relu(float()) :: float()
def relu(n), do: max(n, 0.0)
@spec relu_deriv(float()) :: float()
def relu_deriv(x), do: relu_deriv(x, 0.0)
@spec relu_deriv(float(), float()) :: float()
def relu_deriv(x, threshold) when x > threshold, do: 1.0
def relu_deriv(_, _), do: 0.0
@spec sigmoid(float()) :: float()
def sigmoid(n) when n > 100, do: 1.0
def sigmoid(n) when n < -100, do: 0.0
def sigmoid(n) do
1.0 / (1.0 + :math.exp(-n))
end
@spec sigmoid_deriv(float()) :: float()
def sigmoid_deriv(x) do
fx = sigmoid(x)
fx * (1.0 - fx)
end
@spec softmax(data()) :: data()
def softmax(values) when is_list(values) do
exps = Enum.map(values, fn vx -> :math.exp(vx) end)
exps_sum = Enum.sum(exps)
Enum.map(exps, fn e -> e / exps_sum end)
end
@spec tanh(float()) :: float()
def tanh(n) do
:math.tanh(n)
end
@spec tanh_deriv(float()) :: float()
def tanh_deriv(x) do
1.0 - (x |> :math.tanh() |> :math.pow(2))
end
defimpl Inspect do
@spec inspect(Activation.t(), any) :: String.t()
def inspect(%Activation{name: name}, _) do
"#Activation<[#{Kernel.inspect(name)}]>"
end
end
end
|
lib/annex/layer/activation.ex
| 0.894812 | 0.621756 |
activation.ex
|
starcoder
|
defmodule Example.IdSpecs do
alias Example.{User, Post, Comment}
alias Grax.Id
import ExUnit.Assertions
defmodule FlatNs do
use Grax.Id.Spec
namespace "http://example.com/", prefix: :ex do
end
end
defmodule FlatNsWithVocabTerms do
use Grax.Id.Spec
alias Example.NS.EX
namespace EX, prefix: :ex do
end
end
defmodule FlatBase do
use Grax.Id.Spec
alias Example.NS.EX
base EX do
end
end
defmodule NestedBase do
use Grax.Id.Spec
alias Example.NS.EX
namespace EX do
base "foo/" do
end
end
end
defmodule NestedNs do
use Grax.Id.Spec
namespace "http://example.com/", prefix: :ex do
namespace "foo/", prefix: :foo do
namespace "bar/", prefix: :bar
namespace "baz/", prefix: :baz
end
namespace "qux/", prefix: :qux
end
end
defmodule NsWithExpressions do
use Grax.Id.Spec
@domain "http://example.com/"
def domain, do: @domain
@path "sub/"
namespace @domain do
base @path do
end
end
end
defmodule NsWithExpressions2 do
use Grax.Id.Spec
namespace NsWithExpressions.domain(), prefix: :ex do
end
end
defmodule UrnNs do
use Grax.Id.Spec
urn :isbn do
end
urn :uuid do
end
end
defmodule GenericIds do
use Grax.Id.Spec
namespace "http://example.com/", prefix: :ex do
id_schema "users/{name}", schema: User
id Post, "posts/{slug}"
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(User) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("users/{name}"),
schema: User
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("posts/{slug}"),
schema: Post
}
end
end
defmodule GenericShortIds do
use Grax.Id.Spec
namespace "http://example.com/", prefix: :ex do
id User.name()
id Post.slug()
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(User) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{name}"),
schema: User
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{slug}"),
schema: Post
}
end
end
defmodule MultipleSchemas do
use Grax.Id.Spec
import Grax.Id.Hash
namespace "http://example.com/", prefix: :ex do
id [Example.MultipleSchemasA, Example.MultipleSchemasB], "{foo}"
hash [Post, Comment], data: :content, algorithm: :md5
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(:foo) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{foo}"),
schema: [Example.MultipleSchemasA, Example.MultipleSchemasB]
}
end
def expected_id_schema(:content) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{hash}"),
schema: [Post, Comment],
extensions: [
%Grax.Id.Hash{algorithm: :md5, data_variable: :content}
]
}
end
end
defmodule UrnIds do
use Grax.Id.Spec
urn :example do
id User, "{name}"
id Post.slug()
end
urn :other do
id Example.Datatypes.integer()
end
def expected_id_schema(User) do
%Id.Schema{
namespace: %Id.UrnNamespace{nid: :example, string: "urn:example:"},
template: Example.IdSpecs.compiled_template("{name}"),
schema: User
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: %Id.UrnNamespace{nid: :example, string: "urn:example:"},
template: Example.IdSpecs.compiled_template("{slug}"),
schema: Post
}
end
def expected_id_schema(:integer) do
%Id.Schema{
namespace: %Id.UrnNamespace{nid: :other, string: "urn:other:"},
template: Example.IdSpecs.compiled_template("{integer}"),
schema: Example.Datatypes
}
end
end
defmodule GenericUuids do
use Grax.Id.Spec
import Grax.Id.UUID
namespace "http://example.com/", prefix: :ex do
uuid schema: User, version: 4, format: :hex
id_schema "posts/{uuid}", schema: Post, extensions: Grax.Id.UUID, uuid_version: 4
uuid Comment, version: 1
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(User) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: User,
extensions: [%Grax.Id.UUID{format: :hex, version: 4}]
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("posts/{uuid}"),
schema: Post,
extensions: [%Grax.Id.UUID{format: :default, version: 4}]
}
end
def expected_id_schema(Comment) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Comment,
extensions: [%Grax.Id.UUID{format: :default, version: 1}]
}
end
end
defmodule HashUuids do
use Grax.Id.Spec
import Grax.Id.UUID
@custom_namespace UUID.uuid4()
namespace "http://example.com/", prefix: :ex do
uuid User, version: 5, namespace: :url, name_var: :canonical_email
uuid Post, version: 3, namespace: @custom_namespace, name_var: :slug
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(User) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: User,
extensions: [
%Grax.Id.UUID{format: :default, version: 5, namespace: :url, name_var: :canonical_email}
]
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Post,
extensions: [
%Grax.Id.UUID{
format: :default,
version: 3,
namespace: @custom_namespace,
name_var: :slug
}
]
}
end
end
defmodule ShortUuids do
use Grax.Id.Spec
import Grax.Id.UUID
namespace "http://example.com/", prefix: :ex do
uuid5 User, namespace: :url, name_var: :canonical_email, format: :hex
uuid4 Post
uuid1 schema: Comment, format: :hex, template: "comments/{uuid}"
uuid5 Example.SelfLinked.name(), namespace: :url
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(User) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: User,
extensions: [
%Grax.Id.UUID{
format: :hex,
version: 5,
namespace: :url,
name_var: :canonical_email
}
]
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Post,
extensions: [%Grax.Id.UUID{format: :default, version: 4}]
}
end
def expected_id_schema(Comment) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("comments/{uuid}"),
schema: Comment,
extensions: [%Grax.Id.UUID{format: :hex, version: 1}]
}
end
def expected_id_schema(Example.SelfLinked) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Example.SelfLinked,
extensions: [
%Grax.Id.UUID{
format: :default,
version: 5,
namespace: :url,
name_var: :name
}
]
}
end
end
defmodule Foo do
use Grax.Id.Spec
import Grax.Id.UUID
namespace "http://example.com/", prefix: :ex do
uuid4 Example.WithIdSchema
id Example.WithIdSchemaNested, "bar/{bar}"
end
def expected_id_schema(Example.WithIdSchema) do
%Id.Schema{
namespace: Example.IdSpecs.expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Example.WithIdSchema,
extensions: [%Grax.Id.UUID{format: :default, version: 4}]
}
end
def expected_id_schema(Example.WithIdSchemaNested) do
%Id.Schema{
namespace: Example.IdSpecs.expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("bar/{bar}"),
schema: Example.WithIdSchemaNested
}
end
end
defmodule Hashing do
use Grax.Id.Spec
import Grax.Id.Hash
namespace "http://example.com/", prefix: :ex do
hash User, data: :canonical_email, algorithm: :sha512
hash Post, data: :content, algorithm: :sha256
hash Comment.content(), algorithm: :md5
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(User) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{hash}"),
schema: User,
extensions: [
%Grax.Id.Hash{algorithm: :sha512, data_variable: :canonical_email}
]
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{hash}"),
schema: Post,
extensions: [
%Grax.Id.Hash{algorithm: :sha256, data_variable: :content}
]
}
end
def expected_id_schema(Comment) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{hash}"),
schema: Comment,
extensions: [
%Grax.Id.Hash{algorithm: :md5, data_variable: :content}
]
}
end
end
defmodule UuidUrns do
use Grax.Id.Spec
import Grax.Id.UUID
urn :uuid do
uuid4 User
uuid5 Post.content(), namespace: :url
end
def expected_id_schema(User) do
%Id.Schema{
namespace: %Id.UrnNamespace{nid: :uuid, string: "urn:uuid:"},
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: User,
extensions: [%Grax.Id.UUID{format: :urn, version: 4}]
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: %Id.UrnNamespace{nid: :uuid, string: "urn:uuid:"},
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Post,
extensions: [
%Grax.Id.UUID{
format: :urn,
version: 5,
namespace: :url,
name_var: :content
}
]
}
end
end
defmodule HashUrns do
use Grax.Id.Spec
import Grax.Id.Hash
urn :sha1 do
hash Post.content(), algorithm: :sha
end
urn :hash do
hash Comment.content(), template: ":sha256:{hash}", algorithm: :sha256
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: %Id.UrnNamespace{nid: :sha1, string: "urn:sha1:"},
template: Example.IdSpecs.compiled_template("{hash}"),
schema: Post,
extensions: [
%Grax.Id.Hash{algorithm: :sha, data_variable: :content}
]
}
end
def expected_id_schema(Comment) do
%Id.Schema{
namespace: %Id.UrnNamespace{nid: :hash, string: "urn:hash:"},
template: Example.IdSpecs.compiled_template(":sha256:{hash}"),
schema: Comment,
extensions: [
%Grax.Id.Hash{algorithm: :sha256, data_variable: :content}
]
}
end
end
defmodule BlankNodes do
use Grax.Id.Spec
blank_node User
namespace "http://example.com/", prefix: :ex do
id Example.SelfLinked.name()
blank_node [Post, Comment, Example.WithBlankNodeIdSchema]
id Example.Datatypes.string()
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(User), do: Grax.Id.Schema.new_blank_node_schema(nil, User)
def expected_id_schema(Example.WithBlankNodeIdSchema) do
Grax.Id.Schema.new_blank_node_schema(
expected_namespace(:ex),
[Post, Comment, Example.WithBlankNodeIdSchema]
)
end
def expected_id_schema(Example.SelfLinked) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{name}"),
schema: Example.SelfLinked
}
end
def expected_id_schema(Example.Datatypes) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{string}"),
schema: Example.Datatypes
}
end
end
defmodule WithCounter do
use Grax.Id.Spec
alias Grax.Id.Counter
namespace "http://example.com/", prefix: :ex do
id_schema "users/{counter}", schema: User, counter: :user
id Post, "posts/{counter}", counter: :post, counter_adapter: Grax.Id.Counter.TextFile
namespace "comments/", counter_adapter: Grax.Id.Counter.TextFile do
id Comment.counter(), counter: :comment
end
end
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_namespace(:comments) do
%Id.Namespace{
parent: expected_namespace(:ex),
uri: "http://example.com/comments/",
options: [counter_adapter: Grax.Id.Counter.TextFile]
}
end
def expected_id_schema(User) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("users/{counter}"),
schema: User,
counter: {Counter.Dets, :user}
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("posts/{counter}"),
schema: Post,
counter: {Counter.TextFile, :post}
}
end
def expected_id_schema(Comment) do
%Id.Schema{
namespace: expected_namespace(:comments),
template: Example.IdSpecs.compiled_template("{counter}"),
schema: Comment,
counter: {Counter.TextFile, :comment}
}
end
end
defmodule VarMapping do
use Grax.Id.Spec
import Grax.Id.{UUID, Hash}
namespace "http://example.com/", prefix: :ex do
id [Example.VarMappingA, Example.VarMappingD], "foo/{gen}", var_mapping: :upcase_name
uuid5 Example.VarMappingB, namespace: :oid, name_var: :gen, var_mapping: :upcase_name
hash Example.VarMappingC, data: :gen, algorithm: :sha, var_mapping: :upcase_name
end
def upcase_name(%{name: name} = vars) do
assert vars.__schema__
assert is_atom(vars.__schema__)
{:ok, Map.put(vars, :gen, String.upcase(name))}
end
def upcase_name(vars), do: {:ok, vars}
def expected_id_schema(Example.VarMappingA) do
%Id.Schema{
namespace: Example.IdSpecs.expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("foo/{gen}"),
schema: [Example.VarMappingA, Example.VarMappingD],
var_mapping: {__MODULE__, :upcase_name}
}
end
def expected_id_schema(Example.VarMappingB) do
%Id.Schema{
namespace: Example.IdSpecs.expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Example.VarMappingB,
var_mapping: {__MODULE__, :upcase_name},
extensions: [
%Grax.Id.UUID{format: :default, version: 5, namespace: :oid, name_var: :gen}
]
}
end
def expected_id_schema(Example.VarMappingC) do
%Id.Schema{
namespace: Example.IdSpecs.expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{hash}"),
schema: Example.VarMappingC,
var_mapping: {__MODULE__, :upcase_name},
extensions: [
%Grax.Id.Hash{algorithm: :sha, data_variable: :gen}
]
}
end
end
defmodule SeparateCustomSelector do
def uuid4?(Example.WithCustomSelectedIdSchemaB, %{bar: "bar"}), do: true
def uuid4?(_, _), do: false
def uuid5?(Example.WithCustomSelectedIdSchemaB, %{bar: content}) when content != "", do: true
def uuid5?(_, _), do: false
end
defmodule CustomSelector do
use Grax.Id.Spec
import Grax.Id.UUID
namespace "http://example.com/", prefix: :ex do
id_schema "foo/{foo}", selector: :test_selector
uuid selector: {SeparateCustomSelector, :uuid5?},
uuid_version: 5,
uuid_name_var: :bar,
uuid_namespace: :url
uuid selector: {SeparateCustomSelector, :uuid4?}, uuid_version: 4
end
def test_selector(Example.WithCustomSelectedIdSchemaA, _), do: true
def test_selector(_, _), do: false
def expected_namespace(:ex), do: Example.IdSpecs.expected_namespace(:ex)
def expected_id_schema(:foo) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("foo/{foo}"),
schema: nil,
selector: {__MODULE__, :test_selector},
extensions: nil
}
end
def expected_id_schema(:uuid5) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: nil,
selector: {Example.IdSpecs.SeparateCustomSelector, :uuid5?},
extensions: [
%Grax.Id.UUID{format: :default, version: 5, name_var: :bar, namespace: :url}
]
}
end
def expected_id_schema(:uuid4) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: nil,
selector: {Example.IdSpecs.SeparateCustomSelector, :uuid4?},
extensions: [%Grax.Id.UUID{format: :default, version: 4}]
}
end
end
defmodule OptionInheritance do
use Grax.Id.Spec, uuid_format: :hex
import Grax.Id.{UUID, Hash}
namespace "http://example.com/",
prefix: :ex,
hash_algorithm: :sha,
uuid_version: 3,
uuid_namespace: :url do
namespace "foo/", uuid_version: 5, uuid_namespace: :oid, uuid_format: :default do
uuid User.canonical_email()
hash Post.content()
end
uuid Comment.content()
uuid5 Example.SelfLinked.name()
end
urn :uuid, uuid_version: 5, uuid_namespace: :oid do
uuid Example.Datatypes.integer()
end
def expected_namespace(:ex) do
%{
Example.IdSpecs.expected_namespace(:ex)
| options: [
uuid_format: :hex,
hash_algorithm: :sha,
uuid_version: 3,
uuid_namespace: :url
]
}
end
def expected_namespace(:foo) do
%Id.Namespace{
parent: expected_namespace(:ex),
uri: "http://example.com/foo/",
options: [uuid_version: 5, uuid_namespace: :oid, uuid_format: :default]
}
end
def expected_id_schema(User) do
%Id.Schema{
namespace: expected_namespace(:foo),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: User,
extensions: [
%Grax.Id.UUID{format: :default, version: 5, namespace: :oid, name_var: :canonical_email}
]
}
end
def expected_id_schema(Post) do
%Id.Schema{
namespace: expected_namespace(:foo),
template: Example.IdSpecs.compiled_template("{hash}"),
schema: Post,
extensions: [%Grax.Id.Hash{algorithm: :sha, data_variable: :content}]
}
end
def expected_id_schema(Comment) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Comment,
extensions: [
%Grax.Id.UUID{format: :hex, version: 3, namespace: :url, name_var: :content}
]
}
end
def expected_id_schema(Example.SelfLinked) do
%Id.Schema{
namespace: expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Example.SelfLinked,
extensions: [
%Grax.Id.UUID{format: :hex, version: 5, namespace: :url, name_var: :name}
]
}
end
def expected_id_schema(Example.Datatypes) do
%Id.Schema{
namespace: %Id.UrnNamespace{
nid: :uuid,
string: "urn:uuid:",
options: [uuid_version: 5, uuid_namespace: :oid]
},
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: Example.Datatypes,
extensions: [
%Grax.Id.UUID{format: :urn, version: 5, namespace: :oid, name_var: :integer}
]
}
end
end
defmodule AppConfigIdSpec do
use Grax.Id.Spec
import Grax.Id.UUID
namespace "http://example.com/", prefix: :ex do
uuid4 Grax.ConfigTest.TestSchema1
uuid4 Grax.ConfigTest.TestSchema2
end
def expected_id_schema(schema) do
%Id.Schema{
namespace: Example.IdSpecs.expected_namespace(:ex),
template: Example.IdSpecs.compiled_template("{uuid}"),
schema: schema,
extensions: [%Grax.Id.UUID{format: :default, version: 4}]
}
end
end
def expected_namespace(:ex) do
%Id.Namespace{
uri: "http://example.com/",
prefix: :ex
}
end
def compiled_template(template) do
{:ok, template} = YuriTemplate.parse(template)
template
end
end
|
test/support/example_id_specs.ex
| 0.789842 | 0.583352 |
example_id_specs.ex
|
starcoder
|
defmodule Currencyconverter.Transaction do
@moduledoc """
The Transaction context.
"""
import Ecto.Query, warn: false
alias Currencyconverter.Repo
alias Currencyconverter.Transaction.Transactions
@doc """
Returns the list of transactions.
## Examples
iex> list_transactions()
[%Transactions{}, ...]
"""
def list_transactions do
Repo.all(Transactions)
end
@doc """
Gets a single transactions.
Raises `Ecto.NoResultsError` if the Transactions does not exist.
## Examples
iex> get_transactions!(123)
%Transactions{}
iex> get_transactions!(456)
** (Ecto.NoResultsError)
"""
def get_transactions!(id), do: Repo.get!(Transactions, id)
@doc """
Gets all transactions for an user id.
Returns a empty list if the Transactions does not exist.
## Examples
iex> get_by_user_id!(123)
%Transactions{}
iex> get_by_user_id!(456)
[]
"""
def get_by_user_id!(user_id) do
Repo.all(
from(t in Transactions,
where: t.user_id == ^user_id,
select: [
t.user_id,
t.origin_currency,
t.origin_currency_value,
t.destination_currency,
t.conversion_rate,
t.inserted_at
]
)
)
end
@doc """
Creates a transactions.
## Examples
iex> create_transactions(%{field: value})
{:ok, %Transactions{}}
iex> create_transactions(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_transactions(attrs \\ %{}) do
%Transactions{}
|> Transactions.changeset(attrs)
|> Repo.insert()
|> IO.inspect()
end
@doc """
Updates a transactions.
## Examples
iex> update_transactions(transactions, %{field: new_value})
{:ok, %Transactions{}}
iex> update_transactions(transactions, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_transactions(%Transactions{} = transactions, attrs) do
transactions
|> Transactions.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a transactions.
## Examples
iex> delete_transactions(transactions)
{:ok, %Transactions{}}
iex> delete_transactions(transactions)
{:error, %Ecto.Changeset{}}
"""
def delete_transactions(%Transactions{} = transactions) do
Repo.delete(transactions)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking transactions changes.
## Examples
iex> change_transactions(transactions)
%Ecto.Changeset{data: %Transactions{}}
"""
def change_transactions(%Transactions{} = transactions, attrs \\ %{}) do
Transactions.changeset(transactions, attrs)
end
end
|
lib/currencyconverter/transactions.ex
| 0.913729 | 0.412116 |
transactions.ex
|
starcoder
|
defmodule Edeliver.Relup.Instruction do
@moduledoc """
This behaviour can be used to provide custom instructions to modify the relup.
They can be used in the implementations of
of the `Edeliver.Relup.Modifcation` behaviour.
Example:
defmodule Acme.Relup.LogUpgradeInstruction do
use Edeliver.Relup.Instruction
def modify_relup(instructions = %Instructions{up_instructions: up_instructions}, _config = %{}) do
log_instruction = {:apply, {:"Elixir.Logger", :info, [<<"Upgraded successfully">>]}}
%{instructions| up_instructions: [log_instruction|up_instructions]}
end
end
# using the instruction
defmodule Acme.Relup.Modification do
use Edeliver.Relup.Modification
def modify_relup(instructions = %Instructions{}, config = %{}) do
instructions |> Edeliver.Relup.DefaultModification.modify_relup(config) # use default modifications
|> Acme.Relup.LogUpgradeInstruction.modify_relup(config) # apply also custom instructions
end
end
edeliver already provides a set of relup instructions:
* `Edeliver.Relup.Instructions.CheckProcessesRunningOldCode`
* `Edeliver.Relup.Instructions.CheckRanchAcceptors`
* `Edeliver.Relup.Instructions.CheckRanchConnections`
* `Edeliver.Relup.Instructions.CodeChangeOnAppProcesses`
* `Edeliver.Relup.Instructions.FinishRunningRequests`
* `Edeliver.Relup.Instructions.Info`
* `Edeliver.Relup.Instructions.ReloadModules`
* `Edeliver.Relup.Instructions.RerunFailedRequests`
* `Edeliver.Relup.Instructions.ResumeAppProcesses`
* `Edeliver.Relup.Instructions.ResumeChannels`
* `Edeliver.Relup.Instructions.ResumeRanchAcceptors`
* `Edeliver.Relup.Instructions.Sleep`
* `Edeliver.Relup.Instructions.SoftPurge`
* `Edeliver.Relup.Instructions.StartSection`
* `Edeliver.Relup.Instructions.SuspendAppProcesses`
* `Edeliver.Relup.Instructions.SuspendChannels`
* `Edeliver.Relup.Instructions.SuspendRanchAcceptors`
"""
alias Edeliver.Relup.Instructions
@typedoc """
A function that inserts a new instruction or a set of new instructions
at a given place in the list of existing instructions. For most
[appup instructions](http://erlang.org/doc/man/appup.html) from the `relup` file
it matters when they will be executed, e.g before or after some other instructions.
"""
@type insert_fun :: ((Instructions.t|Instructions.instructions, new_instructions::Instructions.instruction|Instructions.instructions) -> updated_instructions::Instructions.t|Instructinos.instructions)
@doc """
Modifies the relup file.
Modifies the `relup` file which will be used to upgrade (or downgrade) from one version to another
by inserting, removing, or shifting [appup instructions](http://erlang.org/doc/man/appup.html).
See `Edeliver.Relup.InsertInstruction` and `Edeliver.Relup.ShiftInstruction` for useful helpers to
insert / position the instructions and `Edeliver.Relup.RunnableInstruction` to execute custom code
during the upgrade.
"""
@callback modify_relup(Edeliver.Relup.Instructions.t, Edeliver.Relup.Config.t) :: Edeliver.Relup.Instructions.t
@doc false
defmacro __using__(_opts) do
quote do
@behaviour Edeliver.Relup.Instruction
alias Edeliver.Relup.Instructions
import Edeliver.Relup.InsertInstruction
import Edeliver.Relup.ShiftInstruction
end
end
end
|
lib/edeliver/relup/instruction.ex
| 0.847227 | 0.451568 |
instruction.ex
|
starcoder
|
defmodule Xgit.Repository.Plumbing do
@moduledoc ~S"""
Implements the "plumbing"-level commands for a git repository.
The functions in this module, like the "plumbing" commands in command-line
git, are typically not of interest to an end-user developer. Instead, these
are the raw building-block operations that are often composed together to
make the user-targeted "porcelain" commands.
Most of the functions in this module expect a `repository` argument, which
should be the process ID (PID) for a process that implements the `Xgit.Repository.Storage`
behaviour. It's not stated for each individual function, but if `repository`
is some other value, the error `Xgit.Repository.InvalidRepositoryError` is
raised.
"""
use Xgit.FileMode
import Xgit.Util.ForceCoverage
alias Xgit.Commit
alias Xgit.ContentSource
alias Xgit.DirCache
alias Xgit.DirCache.Entry, as: DirCacheEntry
alias Xgit.FilePath
alias Xgit.Object
alias Xgit.ObjectId
alias Xgit.ObjectType
alias Xgit.PersonIdent
alias Xgit.Ref
alias Xgit.Repository.Storage
alias Xgit.Repository.WorkingTree
alias Xgit.Tag
alias Xgit.Tree
## --- Objects ---
@typedoc ~S"""
Reason codes that can be returned by `hash_object/2`.
"""
@type hash_object_reason ::
Object.check_reason()
| FilePath.check_path_reason()
| FilePath.check_path_segment_reason()
| Storage.put_loose_object_reason()
@doc ~S"""
Computes an object ID and optionally writes that into the repository's object store.
Analogous to [`git hash-object`](https://git-scm.com/docs/git-hash-object).
## Parameters
`content` describes how this function should obtain the content.
(See `Xgit.ContentSource`.)
## Options
`:type`: the object's type
* Type: `Xgit.ObjectType`
* Default: `:blob`
* See [`-t` option on `git hash-object`](https://git-scm.com/docs/git-hash-object#Documentation/git-hash-object.txt--tlttypegt).
`:validate?`: `true` to verify that the object is valid for `:type`
* Type: boolean
* Default: `true`
* This is the inverse of the [`--literally` option on `git hash-object`](https://git-scm.com/docs/git-hash-object#Documentation/git-hash-object.txt---literally).
`:repo`: where the content should be stored
* Type: `Xgit.Repository.Storage` (PID)
* Default: `nil`
`:write?`: `true` to write the object into the repository
* Type: boolean
* Default: `false`
* This option is meaningless if `:repo` is not specified.
* See [`-w` option on `git hash-object`](https://git-scm.com/docs/git-hash-object#Documentation/git-hash-object.txt--w).
_TO DO:_ There is no support, at present, for filters as defined in a
`.gitattributes` file. See [issue #18](https://github.com/elixir-git/xgit/issues/18).
## Return Values
`{:ok, object_id}` if the object could be validated and assigned an ID.
`{:error, :reason}` if unable. The relevant reason codes may come from:
* `Xgit.FilePath.check_path/2`
* `Xgit.FilePath.check_path_segment/2`
* `Xgit.Object.check/2`
* `Xgit.Repository.Storage.put_loose_object/2`.
"""
@spec hash_object(content :: ContentSource.t(),
type: ObjectType.t(),
validate?: boolean,
repo: Storage.t(),
write?: boolean
) ::
{:ok, object_id :: ObjectId.t()} | {:error, reason :: hash_object_reason}
def hash_object(content, opts \\ []) when not is_nil(content) and is_list(opts) do
%{type: type, validate?: validate?, repo: repo, write?: write?} =
validate_hash_object_options(opts)
%Object{content: content, type: type}
|> apply_filters(repo)
|> annotate_with_size()
|> assign_object_id()
|> validate_content(validate?)
|> maybe_write_to_repo(repo, write?)
|> hash_object_result(opts)
end
defp validate_hash_object_options(opts) do
type = Keyword.get(opts, :type, :blob)
unless ObjectType.valid?(type) do
raise ArgumentError,
"Xgit.Repository.Plumbing.hash_object/2: type #{inspect(type)} is invalid"
end
validate? = Keyword.get(opts, :validate?, true)
unless is_boolean(validate?) do
raise ArgumentError,
"Xgit.Repository.Plumbing.hash_object/2: validate? #{inspect(validate?)} is invalid"
end
repo = Keyword.get(opts, :repo)
unless repo == nil or Storage.valid?(repo) do
raise ArgumentError,
"Xgit.Repository.Plumbing.hash_object/2: repo #{inspect(repo)} is invalid"
end
write? = Keyword.get(opts, :write?, false)
unless is_boolean(write?) do
raise ArgumentError,
"Xgit.Repository.Plumbing.hash_object/2: write? #{inspect(write?)} is invalid"
end
if write? and repo == nil do
raise ArgumentError,
"Xgit.Repository.Plumbing.hash_object/2: write?: true requires a repo to be specified"
end
%{type: type, validate?: validate?, repo: repo, write?: write?}
end
defp apply_filters(object, _repository) do
# TO DO: Implement filters as described in attributes (for instance,
# end-of-line conversion). I expect this to happen by replacing the
# ContentSource implementation with another implementation that would
# perform the content remapping. For now, always a no-op.
# https://github.com/elixir-git/xgit/issues/18
object
end
defp annotate_with_size(%Object{content: content} = object),
do: %{object | size: ContentSource.length(content)}
defp validate_content(%Object{type: :blob} = object, _validate?), do: {:ok, object}
defp validate_content(object, false = _validate?), do: {:ok, object}
defp validate_content(%Object{content: content} = object, _validate?) when is_list(content) do
case Object.check(object) do
:ok -> cover {:ok, object}
{:error, reason} -> cover {:error, reason}
end
end
defp validate_content(%Object{content: content} = object, _validate?) do
validate_content(
%{object | content: content |> ContentSource.stream() |> Enum.to_list() |> Enum.concat()},
true
)
end
defp assign_object_id(%Object{content: content, type: type} = object),
do: %{object | id: ObjectId.calculate_id(content, type)}
defp maybe_write_to_repo({:ok, object}, _repo, false = _write?), do: cover({:ok, object})
defp maybe_write_to_repo({:ok, object}, repo, true = _write?) do
case Storage.put_loose_object(repo, object) do
:ok -> cover {:ok, object}
{:error, reason} -> cover {:error, reason}
end
end
defp maybe_write_to_repo({:error, reason}, _repo, _write?), do: cover({:error, reason})
defp hash_object_result({:ok, %Object{id: id}}, _opts), do: cover({:ok, id})
defp hash_object_result({:error, reason}, _opts), do: cover({:error, reason})
@typedoc ~S"""
Reason codes that can be returned by `cat_file/2`.
"""
@type cat_file_reason :: :invalid_object_id | Storage.get_object_reason()
@doc ~S"""
Retrieves the content, type, and size information for a single object in a
repository's object store.
Analogous to the first form of [`git cat-file`](https://git-scm.com/docs/git-cat-file).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
`object_id` is a string identifying the object.
## Return Value
`{:ok, object}` if the object could be found. `object` is an instance of
`Xgit.Object` and can be used to retrieve content and other information
about the underlying git object.
`{:error, :invalid_object_id}` if `object_id` can't be parsed as a valid git object ID.
`{:error, :not_found}` if the object does not exist in the database.
`{:error, :invalid_object}` if object was found, but invalid.
"""
@spec cat_file(repository :: Storage.t(), object_id :: ObjectId.t()) ::
{:ok, Object} | {:error, reason :: cat_file_reason}
def cat_file(repository, object_id) when is_pid(repository) and is_binary(object_id) do
repository = Storage.assert_valid(repository)
if ObjectId.valid?(object_id) do
Storage.get_object(repository, object_id)
else
cover {:error, :invalid_object_id}
end
end
## --- Tree Objects ---
@typedoc ~S"""
Reason codes that can be returned by `cat_file_tree/2`.
"""
@type cat_file_tree_reason ::
:invalid_object_id
| Storage.get_object_reason()
| Tree.from_object_reason()
@doc ~S"""
Retrieves a `tree` object from a repository's object store and renders
it as an `Xgit.Tree` struct.
Analogous to
[`git cat-file -p`](https://git-scm.com/docs/git-cat-file#Documentation/git-cat-file.txt--p)
when the target object is a `tree` object.
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
`object_id` is a string identifying the object.
## Return Value
`{:ok, tree}` if the object could be found and understood as a tree.
`tree` is an instance of `Xgit.Tree` and can be used to retrieve
references to the members of that tree.
`{:error, :invalid_object_id}` if `object_id` can't be parsed as a valid git object ID.
`{:error, reason}` if otherwise unable. The relevant reason codes may come from:
* `Xgit.Repository.Storage.get_object/2`
* `Xgit.Tree.from_object/1`.
"""
@spec cat_file_tree(repository :: Storage.t(), object_id :: ObjectId.t()) ::
{:ok, tree :: Tree.t()} | {:error, reason :: cat_file_tree_reason}
def cat_file_tree(repository, object_id) when is_pid(repository) and is_binary(object_id) do
repository = Storage.assert_valid(repository)
with {:object_id_valid?, true} <- {:object_id_valid?, ObjectId.valid?(object_id)},
{:ok, object} <- Storage.get_object(repository, object_id) do
Tree.from_object(object)
else
{:error, reason} -> cover {:error, reason}
{:object_id_valid?, false} -> cover {:error, :invalid_object_id}
end
end
## --- Commit Objects ---
@typedoc ~S"""
Reason codes that can be returned by `cat_file_commit/2`.
"""
@type cat_file_commit_reason ::
:invalid_object_id
| Commit.from_object_reason()
| Storage.get_object_reason()
@doc ~S"""
Retrieves a `commit` object from a repository's object store and renders
it as an `Xgit.Commit` struct.
Analogous to
[`git cat-file -p`](https://git-scm.com/docs/git-cat-file#Documentation/git-cat-file.txt--p)
when the target object is a `commit` object.
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
`object_id` is a string identifying the object.
## Return Value
`{:ok, commit}` if the object could be found and understood as a commit.
`commit` is an instance of `Xgit.Commit` and can be used to retrieve
references to the members of that commit.
`{:error, :invalid_object_id}` if `object_id` can't be parsed as a valid git object ID.
`{:error, reason}` if otherwise unable. The relevant reason codes may come from:
* `Xgit.Commit.from_object/1`.
* `Xgit.Repository.Storage.get_object/2`
"""
@spec cat_file_commit(repository :: Storage.t(), object_id :: ObjectId.t()) ::
{:ok, commit :: Commit.t()} | {:error, reason :: cat_file_commit_reason}
def cat_file_commit(repository, object_id) when is_pid(repository) and is_binary(object_id) do
repository = Storage.assert_valid(repository)
with {:object_id_valid?, true} <- {:object_id_valid?, ObjectId.valid?(object_id)},
{:ok, object} <- Storage.get_object(repository, object_id) do
Commit.from_object(object)
else
{:error, reason} -> cover {:error, reason}
{:object_id_valid?, false} -> cover {:error, :invalid_object_id}
end
end
@typedoc ~S"""
Reason codes that can be returned by `commit_tree/2`.
"""
@type commit_tree_reason ::
:invalid_tree
| :invalid_parents
| :invalid_parent_ids
| :invalid_message
| :invalid_author
| :invalid_committer
| Storage.put_loose_object_reason()
@doc ~S"""
Creates a new commit object based on the provided tree object and parent commits.
A commit object may have any number of parents. With exactly one parent, it is an
ordinary commit. Having more than one parent makes the commit a merge between
several lines of history. Initial (root) commits have no parents.
Analogous to
[`git commit-tree`](https://git-scm.com/docs/git-commit-tree).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
## Options
`tree`: (`Xgit.ObjectId`, required) ID of tree object
`parents`: (list of `Xgit.ObjectId`) parent commit object IDs
`message`: (byte list, required) commit message
`author`: (`Xgit.PersonIdent`, required) author name, email, timestamp
`committer`: (`Xgit.PersonIdent`) committer name, email timestamp
(defaults to `author` if not specified)
## Return Value
`{:ok, object_id}` with the object ID for the commit that was generated.
`{:error, :invalid_tree}` if the `:tree` option refers to a tree that
does not exist.
`{:error, :invalid_parents}` if the `:parents` option is not a list.
`{:error, :invalid_parent_ids}` if the `:parents` option contains any entries that
do not reference valid commit objects.
`{:error, :invalid_message}` if the `:message` option isn't a valid byte string.
`{:error, :invalid_author}` if the `:author` option isn't a valid `PersonIdent` struct.
`{:error, :invalid_committer}` if the `:committer` option isn't a valid `PersonIdent` struct.
Reason codes may also come from `Xgit.Repository.Storage.put_loose_object/2`.
"""
@spec commit_tree(repository :: Storage.t(),
tree: ObjectId.t(),
parents: [ObjectId.t()],
message: [byte],
author: PersonIdent.t(),
committer: PersonIdent.t()
) ::
{:ok, object_id :: ObjectId.t()}
| {:error, reason :: commit_tree_reason}
def commit_tree(repository, opts \\ []) when is_pid(repository) do
repository = Storage.assert_valid(repository)
with {_tree, _parents, _message, _author, _committer} = verified_args <-
validate_commit_tree_options(repository, opts),
commit <- make_commit(verified_args),
%{id: id} = object <- Commit.to_object(commit),
:ok <- Storage.put_loose_object(repository, object) do
cover {:ok, id}
else
{:error, reason} -> cover {:error, reason}
end
end
defp validate_commit_tree_options(repository, opts) do
with {:ok, tree_id} <- validate_tree(repository, Keyword.get(opts, :tree)),
{:ok, parent_ids} <- validate_parents(repository, Keyword.get(opts, :parents)),
{:ok, message} <- validate_message(Keyword.get(opts, :message)),
{:ok, author} <- validate_person_ident(Keyword.get(opts, :author), :invalid_author),
{:ok, committer} <-
validate_person_ident(Keyword.get(opts, :committer, author), :invalid_committer) do
cover {tree_id, parent_ids, message, author, committer}
else
{:error, reason} -> cover {:error, reason}
end
end
defp validate_tree(repository, tree_id) do
with true <- ObjectId.valid?(tree_id),
{:ok, %Object{id: id} = object} <- Storage.get_object(repository, tree_id),
{:ok, _tree} <- Tree.from_object(object) do
cover {:ok, id}
else
_ -> cover {:error, :invalid_tree}
end
end
defp validate_parents(_repository, nil), do: cover({:ok, []})
defp validate_parents(repository, parent_ids) when is_list(parent_ids) do
if Enum.all?(parent_ids, &commit_id_valid?(repository, &1)) do
cover {:ok, parent_ids}
else
cover {:error, :invalid_parent_ids}
end
end
defp validate_parents(_repository, _parents), do: cover({:error, :invalid_parents})
defp commit_id_valid?(repository, parent_id) do
with true <- ObjectId.valid?(parent_id),
{:ok, %Object{type: :commit}} <- Storage.get_object(repository, parent_id) do
cover true
else
_ -> cover false
end
end
defp validate_message(message) when is_list(message) do
if Enum.all?(message, &is_integer/1) do
cover {:ok, message}
else
cover {:error, :invalid_message}
end
end
defp validate_message(_message), do: cover({:error, :invalid_message})
defp validate_person_ident(person_ident, invalid_reason) do
if PersonIdent.valid?(person_ident) do
cover {:ok, person_ident}
else
cover {:error, invalid_reason}
end
end
defp make_commit({tree, parents, message, author, committer} = _verified_args) do
%Commit{
tree: tree,
parents: parents,
author: author,
committer: committer,
message: ensure_trailing_newline(message)
}
end
defp ensure_trailing_newline(message) do
if List.last(message) == 10 do
message
else
message ++ '\n'
end
end
## --- Tag Objects ---
@typedoc ~S"""
Reason codes that can be returned by `cat_file_tag/2`.
"""
@type cat_file_tag_reason ::
:invalid_object_id
| Storage.get_object_reason()
| Tag.from_object_reason()
@doc ~S"""
Retrieves a `tag` object from a repository's object store and renders
it as an `Xgit.Tag` struct.
Analogous to
[`git cat-file -p`](https://git-scm.com/docs/git-cat-file#Documentation/git-cat-file.txt--p)
when the target object is a `tag` object.
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
`object_id` is a string identifying the object.
## Return Value
`{:ok, tag}` if the object could be found and understood as a tag.
`tag` is an instance of `Xgit.Tag` and can be used to retrieve
references to the members of that tag.
`{:error, :invalid_object_id}` if `object_id` can't be parsed as a valid git object ID.
`{:error, reason}` if otherwise unable. The relevant reason codes may come from:
* `Xgit.Repository.Storage.get_object/2`
* `Xgit.Tag.from_object/1`.
"""
@spec cat_file_tag(repository :: Storage.t(), object_id :: ObjectId.t()) ::
{:ok, tag :: Tag.t()} | {:error, reason :: cat_file_tag_reason}
def cat_file_tag(repository, object_id) when is_pid(repository) and is_binary(object_id) do
repository = Storage.assert_valid(repository)
with {:object_id_valid?, true} <- {:object_id_valid?, ObjectId.valid?(object_id)},
{:ok, object} <- Storage.get_object(repository, object_id) do
Tag.from_object(object)
else
{:error, reason} -> cover {:error, reason}
{:object_id_valid?, false} -> cover {:error, :invalid_object_id}
end
end
## --- Working Tree ---
@typedoc ~S"""
Reason codes that can be returned by `ls_files_stage/1`.
"""
@type ls_files_stage_reason :: DirCache.from_iodevice_reason()
@doc ~S"""
Retrieves information about files in the working tree as described by the index file.
Analogous to
[`git ls-files --stage`](https://git-scm.com/docs/git-ls-files#Documentation/git-ls-files.txt---stage).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
## Return Value
`{:ok, entries}`. `entries` will be a list of `Xgit.DirCache.Entry` structs
in sorted order.
`{:error, :bare}` if `repository` doesn't have a working tree.
`{:error, reason}` if the index file for `repository` isn't valid. (See
`Xgit.DirCache.from_iodevice/1` for possible reason codes.)
"""
@spec ls_files_stage(repository :: Storage.t()) ::
{:ok, entries :: [DirCacheEntry.t()]}
| {:error, reason :: ls_files_stage_reason}
def ls_files_stage(repository) when is_pid(repository) do
with {:ok, working_tree} <- working_tree_from_opts(repository),
{:ok, %DirCache{entries: entries} = _dir_cache} <-
WorkingTree.dir_cache(working_tree) do
cover {:ok, entries}
else
{:error, reason} -> cover {:error, reason}
end
end
@typedoc ~S"""
Cache info tuple `{mode, object_id, path}` to add to the index file.
"""
@type add_entry :: {mode :: FileMode.t(), object_id :: ObjectId.t(), path :: FilePath.t()}
@typedoc ~S"""
Reason codes that can be returned by `update_index_cache_info/2`.
"""
@type update_index_cache_info_reason ::
:invalid_entry
| :bare
| Xgit.Repository.WorkingTree.update_dir_cache_reason()
@doc ~S"""
Update the index file to reflect new contents.
Analogous to the `--cacheinfo` form of
[`git update-index`](https://git-scm.com/docs/git-update-index#Documentation/git-update-index.txt---cacheinfoltmodegtltobjectgtltpathgt).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to which the new entries should be written.
`add`: a list of tuples of `{mode, object_id, path}` entries to add to the dir cache.
In the event of collisions with existing entries, the existing entries will
be replaced with the corresponding new entries.
`remove`: a list of paths to remove from the dir cache. All versions of the file,
regardless of stage, will be removed.
## Return Value
`:ok` if successful.
`{:error, :bare}` if `repository` doesn't have a working tree.
`{:error, :invalid_entry}` if any tuple passed to `add` or `remove` was invalid.
`{:error, :reason}` if unable. The relevant reason codes may come from
`Xgit.Repository.WorkingTree.update_dir_cache/3`.
"""
@spec update_index_cache_info(
repository :: Storage.t(),
add :: [add_entry],
remove :: [FilePath.t()]
) ::
:ok | {:error, update_index_cache_info_reason()}
def update_index_cache_info(repository, add, remove \\ [])
when is_pid(repository) and is_list(add) and is_list(remove) do
with {:ok, working_tree} <- working_tree_from_opts(repository),
{:items_to_add, add} when is_list(add) <- {:items_to_add, parse_add_entries(add)},
{:items_to_remove, remove} when is_list(remove) <-
{:items_to_remove, parse_remove_entries(remove)} do
WorkingTree.update_dir_cache(working_tree, add, remove)
else
{:items_to_add, _} -> cover {:error, :invalid_entry}
{:items_to_remove, _} -> cover {:error, :invalid_entry}
{:error, reason} -> cover {:error, reason}
end
end
defp parse_add_entries(add) do
if Enum.all?(add, &valid_add?/1) do
Enum.map(add, &map_add_entry/1)
else
cover :invalid
end
end
defp valid_add?({mode, object_id, path})
when is_file_mode(mode) and is_binary(object_id) and is_list(path),
do: ObjectId.valid?(object_id) and FilePath.valid?(path)
defp valid_add?(_), do: cover(false)
defp map_add_entry({mode, object_id, path}) do
%DirCacheEntry{
name: path,
stage: 0,
object_id: object_id,
mode: mode,
size: 0,
ctime: 0,
ctime_ns: 0,
mtime: 0,
mtime_ns: 0,
dev: 0,
ino: 0,
uid: 0,
gid: 0,
assume_valid?: false,
extended?: false,
skip_worktree?: false,
intent_to_add?: false
}
end
defp parse_remove_entries(remove) do
if Enum.all?(remove, &valid_remove?/1) do
Enum.map(remove, &map_remove_entry/1)
else
cover :invalid
end
end
defp valid_remove?(name) when is_list(name), do: cover(true)
defp valid_remove?(_), do: cover(false)
defp map_remove_entry(name), do: cover({name, :all})
@typedoc ~S"""
Reason codes that can be returned by `read_tree/3`.
"""
@type read_tree_reason :: :bare | WorkingTree.read_tree_reason()
@doc ~S"""
Read a `tree` object (and its descendants) and populate the index accordingly.
Does not update files in the working tree itself.
Analogous to [`git read-tree`](https://git-scm.com/docs/git-read-tree).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
`object_id` is the object ID of the root working tree. The special name `:empty`
may be used to empty the index.
## Options
`:missing_ok?`: `true` to ignore any objects that are referenced by the tree
structures that are not present in the object database. Normally this would be an error.
## Return Value
`:ok` if successful.
`{:error, :bare}` if `repository` doesn't have a working tree.
Reason codes may also come from the following functions:
* `Xgit.DirCache.to_iodevice/2`
* `Xgit.Repository.Storage.get_object/2`
* `Xgit.Repository.Storage.WorkingTree.read_tree/3`
* `Xgit.Tree.from_object/1`
## TO DO
Implement `--prefix` option. https://github.com/elixir-git/xgit/issues/175
"""
@spec read_tree(repository :: Storage.t(), object_id :: ObjectId.t(), missing_ok?: boolean) ::
:ok | {:error, reason :: read_tree_reason}
def read_tree(repository, object_id, opts \\ [])
when is_pid(repository) and (is_binary(object_id) or object_id == :empty) and is_list(opts) do
with {:ok, working_tree} <- working_tree_from_opts(repository),
_missing_ok? <- validate_read_tree_options(opts) do
if object_id == :empty do
WorkingTree.reset_dir_cache(working_tree)
else
WorkingTree.read_tree(working_tree, object_id, opts)
end
else
{:error, reason} -> cover {:error, reason}
end
end
defp validate_read_tree_options(opts) do
missing_ok? = Keyword.get(opts, :missing_ok?, false)
unless is_boolean(missing_ok?) do
raise ArgumentError,
"Xgit.Repository.Plumbing.read_tree/3: missing_ok? #{inspect(missing_ok?)} is invalid"
end
missing_ok?
end
@typedoc ~S"""
Reason codes that can be returned by `write_tree/2`.
"""
@type write_tree_reason ::
:bare
| DirCache.to_tree_objects_reason()
| DirCache.from_iodevice_reason()
| Storage.put_loose_object_reason()
| WorkingTree.write_tree_reason()
@doc ~S"""
Translates the current working tree, as reflected in its index file, to one or more
tree objects.
The working tree must be in a fully-merged state.
Analogous to [`git write-tree`](https://git-scm.com/docs/git-write-tree).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
## Options
`:missing_ok?`: `true` to ignore any objects that are referenced by the index
file that are not present in the object database. Normally this would be an error.
`:prefix`: (`Xgit.FilePath`) if present, returns the `object_id` for the tree at
the given subdirectory. If not present, writes a tree corresponding to the root.
(The entire tree is written in either case.)
## Return Value
`{:ok, object_id}` with the object ID for the tree that was generated. (If the exact tree
specified by the index already existed, it will return that existing tree's ID.)
`{:error, :bare}` if `repository` doesn't have a working tree.
Reason codes may also come from the following functions:
* `Xgit.DirCache.to_tree_objects/2`
* `Xgit.DirCache.from_iodevice/1`
* `Xgit.Repository.Storage.put_loose_object/2`
* `Xgit.Repository.Storage.WorkingTree.write_tree/2`
"""
@spec write_tree(repository :: Storage.t(), missing_ok?: boolean, prefix: FilePath.t()) ::
{:ok, object_id :: ObjectId.t()}
| {:error, reason :: write_tree_reason}
def write_tree(repository, opts \\ []) when is_pid(repository) do
with {:ok, working_tree} <- working_tree_from_opts(repository),
_ <- validate_write_tree_options(opts) do
cover WorkingTree.write_tree(working_tree, opts)
else
{:error, reason} -> cover {:error, reason}
end
end
defp validate_write_tree_options(opts) do
missing_ok? = Keyword.get(opts, :missing_ok?, false)
unless is_boolean(missing_ok?) do
raise ArgumentError,
"Xgit.Repository.Plumbing.write_tree/2: missing_ok? #{inspect(missing_ok?)} is invalid"
end
prefix = Keyword.get(opts, :prefix, [])
unless prefix == [] or FilePath.valid?(prefix) do
raise ArgumentError,
"Xgit.Repository.Plumbing.write_tree/2: prefix #{inspect(prefix)} is invalid (should be a charlist, not a String)"
end
{missing_ok?, prefix}
end
## -- References --
@typedoc ~S"""
Reason codes that can be returned by `update_ref/4`.
"""
@type update_ref_reason :: Storage.put_ref_reason() | :target_not_commit
@doc ~S"""
Update the object name stored in a ref.
Analogous to [`git update-ref`](https://git-scm.com/docs/git-update-ref).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) to search for the object.
`name` is the name of the reference to update. (See `t/Xgit.Ref.name`.)
`new_value` is the object ID to be written at this reference. (Use `Xgit.ObjectId.zero/0` to delete the reference.)
## Options
`old_target`: If present, a ref with this name must already exist and the `target`
value must match the object ID provided in this option. (There is a special value `:new`
which instead requires that the named ref must **not** exist.)
## TO DO
Follow symbolic links, but only if they start with `refs/`.
(https://github.com/elixir-git/xgit/issues/241)
## Return Value
`:ok` if written successfully.
`{:error, :target_not_commit}` if the target object is not of type `commit`.
Reason codes may also come from the following functions:
* `Xgit.Repository.Storage.put_ref/3`
* `Xgit.Repository.Storage.delete_ref/3`
"""
@spec update_ref(repository :: Storage.t(), name :: Ref.name(), new_value :: ObjectId.t(),
old_target: ObjectId.t()
) :: :ok | {:error, reason :: update_ref_reason}
def update_ref(repository, name, new_value, opts \\ [])
when is_pid(repository) and is_binary(name) and is_binary(new_value) and is_list(opts) do
repository = Storage.assert_valid(repository)
repo_opts = validate_update_ref_opts(opts)
if new_value == ObjectId.zero() do
Storage.delete_ref(repository, name, repo_opts)
else
put_ref(repository, name, new_value, repo_opts)
end
end
defp validate_update_ref_opts(opts) do
case validate_old_target(Keyword.get(opts, :old_target, nil)) do
nil -> cover []
old_target -> cover [{:old_target, old_target}]
end
end
defp validate_old_target(nil) do
cover nil
end
defp validate_old_target(:new) do
cover :new
end
defp validate_old_target(old_target) do
if ObjectId.valid?(old_target) do
cover old_target
else
raise ArgumentError,
"Xgit.Repository.Plumbing.update_ref/4: old_target #{inspect(old_target)} is invalid"
end
end
defp put_ref(repository, name, new_value, repo_opts) do
with {:object, {:ok, %Object{type: type}}} <-
{:object, Storage.get_object(repository, new_value)},
{:type, :commit} <- {:type, type} do
Storage.put_ref(repository, %Ref{name: name, target: new_value}, repo_opts)
else
{:object, {:error, :not_found}} -> cover {:error, :target_not_found}
{:type, _} -> cover {:error, :target_not_commit}
end
end
@typedoc ~S"""
Reason codes that can be returned by `get_symbolic_ref/2`.
"""
@type get_symbolic_ref_reason :: :not_symbolic_ref | Storage.get_ref_reason()
@doc ~S"""
Returns the target ref for an existing symbolic ref.
Analogous to the one-argument form of
[`git symbolic-ref`](https://git-scm.com/docs/git-symbolic-ref).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) in which to create the symbolic reference.
`name` is the name of the symbolic reference to read. (See `t/Xgit.Ref.name`.)
## Return Value
`{:ok, ref_name}` if read successfully. `ref_name` is the name of the targeted reference.
`{:error, :not_symbolic_ref}` if `name` refers to a ref that is not a symbolic ref.
Reason codes may also come from the following functions:
* `Xgit.Repository.Storage.get_ref/3`
"""
@spec get_symbolic_ref(
repository :: Storage.t(),
name :: Ref.name()
) :: {:ok, name :: Ref.name()} | {:error, reason :: get_symbolic_ref_reason}
def get_symbolic_ref(repository, name) when is_pid(repository) and is_binary(name) do
repository = Storage.assert_valid(repository)
case Storage.get_ref(repository, name, follow_link?: false) do
{:ok, %Ref{target: "ref: " <> target}} ->
cover {:ok, target}
{:error, :enotdir} ->
cover {:error, :not_found}
{:error, reason} ->
cover {:error, reason}
{:ok, _} ->
cover {:error, :not_symbolic_ref}
end
end
@typedoc ~S"""
Reason codes that can be returned by `put_symbolic_ref/4`.
"""
@type put_symbolic_ref_reason :: Storage.put_ref_reason()
@doc ~S"""
Creates or updates a symbolic ref to point at a specific branch.
Analogous to the two-argument form of
[`git symbolic-ref`](https://git-scm.com/docs/git-symbolic-ref).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) in which to create the symbolic reference.
`name` is the name of the symbolic reference to create or update. (See `t/Xgit.Ref.name`.)
`new_target` is the name of the reference that should be targeted by this symbolic reference.
This reference need not exist.
## Options
TO DO: Add option to specify ref log message.
https://github.com/elixir-git/xgit/issues/251
## Return Value
`:ok` if written successfully.
Reason codes may also come from the following functions:
* `Xgit.Repository.Storage.put_ref/3`
"""
@spec put_symbolic_ref(
repository :: Storage.t(),
name :: Ref.name(),
new_target :: Ref.name(),
opts :: Keyword.t()
) :: :ok | {:error, reason :: put_symbolic_ref_reason}
def put_symbolic_ref(repository, name, new_target, opts \\ [])
when is_pid(repository) and is_binary(name) and is_binary(new_target) and is_list(opts) do
repository = Storage.assert_valid(repository)
Storage.put_ref(repository, %Ref{name: name, target: "ref: #{new_target}"},
follow_link?: false
)
end
@typedoc ~S"""
Reason codes that can be returned by `delete_symbolic_ref/2`.
"""
@type delete_symbolic_ref_reason :: Storage.delete_ref_reason()
@doc ~S"""
Deletes a symbolic ref.
Analogous to [`git symbolic-ref --delete`](https://git-scm.com/docs/git-symbolic-ref#Documentation/git-symbolic-ref.txt---delete).
## Parameters
`repository` is the `Xgit.Repository.Storage` (PID) in which to create the symbolic reference.
`name` is the name of the symbolic reference to delete. (See `t/Xgit.Ref.name`.)
## Return Value
`:ok` if deleted successfully.
Reason codes may also come from the following functions:
* `Xgit.Repository.Storage.delete_ref/3`
"""
@spec delete_symbolic_ref(
repository :: Storage.t(),
name :: Ref.name()
) :: :ok | {:error, reason :: delete_symbolic_ref_reason}
def delete_symbolic_ref(repository, name)
when is_pid(repository) and is_binary(name) do
repository
|> Storage.assert_valid()
|> Storage.delete_ref(name, follow_link?: false)
end
## --- Options ---
# Parse working tree and repository from arguments and options.
defp working_tree_from_opts(repository, opts \\ []) when is_pid(repository) and is_list(opts) do
repository = Storage.assert_valid(repository)
case working_tree_from_repo_or_opts(repository, opts) do
working_tree when is_pid(working_tree) -> cover {:ok, working_tree}
nil -> cover {:error, :bare}
end
end
defp working_tree_from_repo_or_opts(repository, _opts) do
# TO DO: Allow working tree to be specified via options.
# https://github.com/elixir-git/xgit/issues/133
# (NOTE: Should follow through to ensure all relevant plumbing
# modules have that option documented when implemented.)
# For now, only recognize default working tree.
Storage.default_working_tree(repository)
end
end
|
lib/xgit/repository/plumbing.ex
| 0.885841 | 0.469703 |
plumbing.ex
|
starcoder
|
defmodule Redex.Protocol.Parser do
import NimbleParsec
import Injector
inject Redex.Protocol
alias Redex.Protocol.State
@callback parse(State.t()) :: {:ok, [binary], State.t()} | {:error, any}
crlf = string("\r\n")
empty_bulk_string = string("$0\r\n\r\n") |> replace("")
short_bulk_strings =
for len <- 1..24 do
string("$#{len}\r\n")
|> ignore()
|> ascii_string([], len)
|> ignore(crlf)
end
bulk_string = choice([empty_bulk_string | short_bulk_strings])
large_bulk_string =
string("$")
|> ignore()
|> integer(min: 2, max: 9)
|> ignore(crlf)
defparsecp :parse_array,
string("*")
|> ignore()
|> integer(min: 1, max: 6)
|> ignore(crlf)
|> label("a * followed by a number terminated with a CRLF")
|> repeat(bulk_string)
defparsecp :parse_string,
choice([large_bulk_string, times(bulk_string, min: 1)])
|> label("a RESP array of bulk strings")
def parse(state = %State{buffer: buffer}) do
case parse_array(buffer) do
{:ok, acc, buffer, _, _, _} ->
%{state | acc: acc, buffer: buffer}
|> parse_cont(length(acc) - 1)
{:error, _, buffer, _, _, _} when buffer in ["", "\r"] ->
state
|> Protocol.recv(0)
|> parse()
{:error, error, _, _, _, _} ->
case buffer do
<<"*", _::bytes>> -> {:error, "ERR Protocol error: #{error}"}
_ -> parse_inline(state)
end
end
end
def parse(error = {:error, _}), do: error
defp parse_inline(state = %State{buffer: buffer}) do
buffer
|> String.replace("\r\n", "\n", global: false)
|> String.split("\n", parts: 2, trim: false)
|> case do
[_buffer] ->
state
|> Protocol.recv(0)
|> parse()
[line, buffer] ->
{:ok, String.split(line), %{state | buffer: buffer}}
end
end
defp parse_cont(state = %State{acc: [len | cmd]}, len) do
{:ok, cmd, %{state | acc: []}}
end
defp parse_cont(state = %State{acc: acc, buffer: ""}, len) when len < hd(acc) do
state
|> Protocol.recv(0)
|> parse_cont(len)
end
defp parse_cont(state = %State{acc: acc, buffer: buffer}, len) when len < hd(acc) do
case parse_string(buffer) do
{:ok, [size], buffer, _, _, _} when is_integer(size) and byte_size(buffer) >= size + 2 ->
case buffer do
<<string::bytes-size(size), "\r\n", rest::bytes>> ->
%{state | acc: acc ++ [string], buffer: rest}
|> parse_cont(len + 1)
_ ->
{:error, "ERR Protocol error: expected bulk string terminated with a CRLF"}
end
{:ok, [size], buffer, _, _, _} when is_integer(size) ->
%{state | buffer: buffer}
|> Protocol.recv(size + 2 - byte_size(buffer))
|> case do
state = %State{buffer: <<string::bytes-size(size), "\r\n">>} ->
%{state | acc: acc ++ [string], buffer: ""}
|> parse_cont(len + 1)
%State{} ->
{:error, "ERR Protocol error: expected bulk string terminated with a CRLF"}
error = {:error, _} ->
error
end
{:ok, strings, buffer, _, _, _} ->
%{state | acc: acc ++ strings, buffer: buffer}
|> parse_cont(len + length(strings))
{:error, _, _, _, _, _} when byte_size(buffer) < 17 ->
state
|> Protocol.recv(0)
|> parse_cont(len)
{:error, error, _, _, _, _} ->
{:error, "ERR Protocol error: #{error}"}
end
end
defp parse_cont(%State{acc: acc}, _) do
{:error, "ERR Protocol error: expected a RESP array of length #{hd(acc)}"}
end
defp parse_cont(error = {:error, _}, _), do: error
end
|
lib/redex/protocol/parser.ex
| 0.536313 | 0.430477 |
parser.ex
|
starcoder
|
defmodule AdventOfCode.Day08 do
@moduledoc false
use AdventOfCode
def part1(input) do
preprocess_input(input)
|> Enum.map(&Enum.at(&1, 1))
|> List.flatten()
|> Enum.map(&String.length/1)
|> Enum.frequencies()
|> Enum.filter(&Enum.member?([2, 3, 4, 7], elem(&1, 0)))
|> Enum.map(&elem(&1, 1))
|> Enum.sum()
end
def part2(input) do
preprocess_input(input)
|> Enum.map(fn [signals, outputs] ->
signals =
Enum.map(signals, fn s ->
String.graphemes(s) |> Enum.sort()
end)
outputs =
Enum.map(outputs, fn o ->
String.graphemes(o) |> Enum.sort()
end)
all_numbers = Enum.uniq(signals ++ outputs)
{signals, outputs, all_numbers}
end)
|> Enum.map(fn {signals, outputs, all_numbers} ->
mappings =
Enum.reduce(all_numbers, %{}, fn segment, acc ->
cond do
length(segment) == 2 -> Map.put(acc, 1, segment)
length(segment) == 4 -> Map.put(acc, 4, segment)
length(segment) == 3 -> Map.put(acc, 7, segment)
length(segment) == 7 -> Map.put(acc, 8, segment)
true -> acc
end
end)
{signals, outputs, mappings}
end)
|> Enum.map(fn {signals, outputs, mappings} ->
one = Map.get(mappings, 1)
four = Map.get(mappings, 4)
# seven = Map.get(mappings, 7)
eight = Map.get(mappings, 8)
two_three_or_five = Enum.filter(signals, &(length(&1) == 5))
n1 = Enum.at(two_three_or_five, 0)
n2 = Enum.at(two_three_or_five, 1)
n3 = Enum.at(two_three_or_five, 2)
three =
cond do
# 2 + 5 = 8, so the remaining one is 3
sum_segements(n1, n2) == eight -> n3
sum_segements(n1, n3) == eight -> n2
sum_segements(n2, n3) == eight -> n1
end
zero =
sub_segements(
eight,
sub_segements(
sub_segements(four, one),
sub_segements(four, three)
)
)
nine = sum_segements(three, sub_segements(four, one))
six = Enum.filter(signals, &(length(&1) == 6 && &1 != zero && &1 != nine)) |> List.flatten()
two_or_five = Enum.reject(two_three_or_five, &(&1 == three))
n1 = Enum.at(two_or_five, 0)
n2 = Enum.at(two_or_five, 1)
# 2 + 6 = 8
# 5 + 6 = 6
five =
cond do
sum_segements(n1, six) == six -> n1
sum_segements(n2, six) == six -> n2
end
two = Enum.reject(two_or_five, &(&1 == five)) |> List.flatten()
final_mappings =
mappings
|> Map.put(0, zero)
|> Map.put(2, two)
|> Map.put(3, three)
|> Map.put(5, five)
|> Map.put(6, six)
|> Map.put(9, nine)
{outputs, final_mappings}
end)
|> Enum.map(fn {outputs, mappings} ->
Enum.reduce(outputs, [], fn num, acc ->
acc ++ [Enum.filter(mappings, fn {_, v} -> v == num end) |> Enum.at(0) |> elem(0)]
end)
|> Enum.join()
end)
|> Enum.map(&String.to_integer/1)
|> Enum.sum()
end
defp sum_segements(s1, s2), do: (s1 ++ s2) |> Enum.uniq() |> Enum.sort()
defp sub_segements(s1, s2), do: (s1 -- s2) |> Enum.sort()
defp preprocess_input(input) do
input
|> String.trim()
|> String.replace(" |\n", "|")
|> String.split("\n")
|> Enum.map(fn line ->
String.split(line, "|")
|> Enum.map(&String.split/1)
end)
end
end
|
lib/day08.ex
| 0.682785 | 0.452294 |
day08.ex
|
starcoder
|
defmodule OMG.Watcher.ExitProcessor.Request do
@moduledoc """
Encapsulates the state of processing of `OMG.Watcher.ExitProcessor` pipelines
Holds all the necessary query date and the respective response
NOTE: this is highly experimental, to test out new patterns to follow when doing the Functional Core vs Imperative
Shell separation. **Do not yet** follow outside of here. I'm not sure whether such struct offers much and it
has its problems. Decide and update this note after OMG-384 or OMG-383
EDIT: the multitude and duplication of the fields here is a clear sign that this design loses.
EDIT2: probably splitting this struct up, so that there isn't so many fields (`IFEInclusionRequest`,
`ValidityRequest`, `SEChallengeRequest` etc), might be the way to go
"""
alias OMG.Block
alias OMG.Utxo
defstruct [
:eth_height_now,
:blknum_now,
utxos_to_check: [],
spends_to_get: [],
blknums_to_get: [],
ife_input_utxos_to_check: [],
ife_input_spends_to_get: [],
piggybacked_blknums_to_get: [],
utxo_exists_result: [],
blocks_result: [],
ife_input_utxo_exists_result: [],
ife_input_spending_blocks_result: [],
se_exiting_pos: nil,
se_creating_blocks_to_get: [],
se_creating_blocks_result: [],
se_spending_blocks_to_get: [],
se_spending_blocks_result: [],
se_exit_id_to_get: nil,
se_exit_id_result: nil
]
@type t :: %__MODULE__{
eth_height_now: nil | pos_integer,
blknum_now: nil | pos_integer,
utxos_to_check: list(Utxo.Position.t()),
spends_to_get: list(Utxo.Position.t()),
blknums_to_get: list(pos_integer),
ife_input_utxos_to_check: list(Utxo.Position.t()),
ife_input_spends_to_get: list(Utxo.Position.t()),
piggybacked_blknums_to_get: list(pos_integer),
utxo_exists_result: list(boolean),
blocks_result: list(Block.t()),
ife_input_utxo_exists_result: list(boolean),
ife_input_spending_blocks_result: list(Block.t()),
se_exiting_pos: nil | Utxo.Position.t(),
se_creating_blocks_to_get: list(pos_integer),
se_creating_blocks_result: list(Block.t()),
se_spending_blocks_to_get: list(Utxo.Position.t()),
se_spending_blocks_result: list(Block.t()),
se_exit_id_to_get: nil | binary(),
se_exit_id_result: nil | pos_integer()
}
end
|
apps/omg_watcher/lib/omg_watcher/exit_processor/request.ex
| 0.72662 | 0.421671 |
request.ex
|
starcoder
|
defmodule Blockchain.Blocktree do
@moduledoc """
Blocktree provides functions for adding blocks to the
overall blocktree and forming a consistent blockchain.
"""
defmodule InvalidBlockError do
defexception [:message]
end
alias Blockchain.{Block, Chain}
defstruct best_block: nil
@type t :: %__MODULE__{best_block: Block.t() | nil}
@doc """
Creates a new empty blocktree.
"""
@spec new_tree() :: t
def new_tree() do
%__MODULE__{}
end
@doc """
Verifies a block is valid, and if so, adds it to the block tree.
This performs four steps.
1. Find the parent block
2. Verfiy the block against its parent block
3. If valid, put the block into our DB
"""
@spec verify_and_add_block(t, Chain.t(), Block.t(), MerklePatriciaTree.DB.db(), boolean()) ::
{:ok, t} | :parent_not_found | {:invalid, [atom()]}
def verify_and_add_block(
blocktree,
chain,
block,
db,
do_validate \\ true,
specified_block_hash \\ nil
) do
parent =
case Block.get_parent_block(block, db) do
:genesis -> nil
{:ok, parent} -> parent
:not_found -> :parent_not_found
end
validation =
if do_validate,
do: Block.validate(block, chain, parent, db),
else: :valid
with :valid <- validation do
{:ok, block_hash} = Block.put_block(block, db, specified_block_hash)
# Cache computed block hash
block = %{block | block_hash: block_hash}
updated_blocktree = update_best_block(blocktree, block)
{:ok, updated_blocktree}
end
end
@spec update_best_block(t, Block.t()) :: t
defp update_best_block(blocktree, block) do
best_block = blocktree.best_block
new_best_block =
if is_nil(best_block) || block.header.number > best_block.header.number ||
(block.header.number == best_block.header.number &&
block.header.difficulty > best_block.header.difficulty),
do: block,
else: best_block
%{blocktree | best_block: new_best_block}
end
end
|
apps/blockchain/lib/blockchain/blocktree.ex
| 0.83929 | 0.567487 |
blocktree.ex
|
starcoder
|
defmodule EctoFlaggableEnum do
@moduledoc """
Provides `defenumf/2` macro for defining an Flaggable Enum Ecto type.
"""
@doc """
Defines an enum custom `Ecto.Type`.
It can be used like any other `Ecto.Type` by passing it to a field in your model's
schema block. For example:
import EctoFlaggableEnum
defenumf PropertiesEnum, poisonous: 1, explosive: 2, radioactive: 4, dangerous: 7, packaged: 8
defmodule Package do
use Ecto.Model
schema "packages" do
field :properties, PropertiesEnum
end
end
In the above example, the `:properties` will behave like an flaggable enum and will allow you to
pass an integer and enumerable of `atom`, `string` or `integer` to it. This applies to saving the model,
invoking `Ecto.Changeset.cast/3`, or performing a query on the properties field. Let's
do a few examples:
iex> package = Repo.insert!(%Package{properties: 1})
iex> Repo.get(Package, package.id).properties
:registered
iex> %{changes: changes} = cast(%Package{}, %{"properties" => ["poisonous"]}, ~w(properties), [])
iex> changes.properties
#MapSet<[:poisonous]>
iex> from(p in Package, where: p.properties == [:poisonous]) |> Repo.all() |> length
1
Passing a value that the custom Enum type does not recognize will result in an error.
iex> Repo.insert!(%Package{properties: [:none]})
** (Elixir.EctoFlaggableEnum.Error) :none is not a valid enum value
The enum type `PropertiesEnum` will also have a reflection function for inspecting the
enum map in runtime.
iex> PropertiesEnum.__enum_map__()
[poisonous: 1, explosive: 2, radioactive: 4, dangerous: 7, packaged: 8]
"""
use Bitwise
defmacro defenumf(module, enum) when is_list(enum) do
quote do
kw = unquote(enum) |> Macro.escape()
defmodule unquote(module) do
use Ecto.Type
@atom_int_kw kw
@atom_int_map kw |> Enum.into(%{})
@int_atom_map for {atom, int} <- kw, into: %{}, do: {int, atom}
@string_int_map for {atom, int} <- kw, into: %{}, do: {Atom.to_string(atom), int}
@string_atom_map for {atom, int} <- kw, into: %{}, do: {Atom.to_string(atom), atom}
@valid_values Keyword.values(@atom_int_kw) ++
Keyword.keys(@atom_int_kw) ++ Map.keys(@string_int_map)
def type, do: :integer
def cast(term) do
EctoFlaggableEnum.Type.cast(term, @int_atom_map, @string_atom_map)
end
def load(int) when is_integer(int) do
{:ok, EctoFlaggableEnum.Type.int_to_set(@int_atom_map, int)}
end
def dump(term) do
EctoFlaggableEnum.Type.dump(term, @atom_int_map, @int_atom_map, @string_atom_map)
end
# Reflection
def __enum_map__(), do: @atom_int_kw
def __valid_values__(), do: @valid_values
end
end
end
defmodule Type do
@spec cast(list | integer | MapSet.t(), map, map) :: {:ok, [MapSet.t()]} | :error
def cast(list, int_atom_map, string_atom_map) when is_list(list) do
do_cast(list, [], int_atom_map, string_atom_map)
end
def cast(set = %MapSet{}, int_enum_map, string_atom_map) do
cast(set |> MapSet.to_list(), int_enum_map, string_atom_map)
end
def cast(int, int_atom_map, _) when is_integer(int) do
{:ok, int_to_set(int_atom_map, int)}
end
def cast(_, _, _), do: :error
defp do_cast([string | rest], casted, int_to_atom, string_to_atom) when is_binary(string) do
if string_to_atom[string] do
do_cast(rest, [string_to_atom[string] | casted], int_to_atom, string_to_atom)
else
:error
end
end
defp do_cast([atom | rest], casted, int_to_atom, string_to_atom) when is_atom(atom) do
if atom in (string_to_atom |> Map.values()) do
do_cast(rest, [atom | casted], int_to_atom, string_to_atom)
else
:error
end
end
defp do_cast([int | rest], casted, int_to_atom, string_to_atom) when is_integer(int) do
if int_to_atom[int] do
do_cast(rest, [int_to_atom[int] | casted], int_to_atom, string_to_atom)
else
:error
end
end
defp do_cast([], casted, _, _) do
{:ok, MapSet.new(casted)}
end
@spec dump(any, map, map, map) :: {:ok, integer} | :error
def dump(val, atom_to_int, int_to_atom, string_to_atom) do
case cast(val, int_to_atom, string_to_atom) do
{:ok, set} -> {:ok, set_to_int(set, atom_to_int)}
:error -> :error
end
end
def int_to_set(enum_map, int) do
enum_map
|> Enum.filter(fn {aint, _atom} -> (aint &&& int) == aint end)
|> Enum.map(fn {_, atom} -> atom end)
|> MapSet.new()
end
def set_to_int(set, atom_to_int) do
set
|> Enum.map(fn
key -> atom_to_int[key]
end)
|> Enum.reduce(0, fn v, acc -> acc ||| v end)
end
end
end
|
lib/ecto_flaggable_enum.ex
| 0.824462 | 0.527621 |
ecto_flaggable_enum.ex
|
starcoder
|
defmodule NcsaHmac.Authentication do
alias NcsaHmac.PlugConnSigner
@authorization_regexp ~r/\w+ ([^:]+):(.+)$/
@accepted_algorithms [:sha512, :sha384, :sha256]
@moduledoc """
The Authentication module provides functions for validating an HMAC signature on a web request.
"""
@doc """
Authenticate the header 'Authorization' signature.
The `authenticate!` method performs several steps first:
Load the resource with id/auth_id given extracted from the 'Authorization' signature.
Get the `signing_key` from the resource.
Determine, which if any of the currently accepted cyptographic algorithms: #{inspect @accepted_algorithms}
was used to sign the request.
Pass the request (conn) to the Signer module to calculate a signature and
compare the computed signature to the signature sent with the request.
If any of the elements used to compute the signature changed between when the
request was signed and received, the `authenticate!` will fail.
Requests coming in to get authenticated must include the `Date` header field.
If the `Date` field is absent, the Signer module will set the `Date` field and
the reuqest will never be able to authenticate.
Required opts:
* `:model` - Specifies the module name of the model to load resources from
Optional opts:
* `:as` - Specifies the `resource_name` to use
* `:only` - Specifies which actions to authorize
* `:except` - Specifies which actions for which to skip authorization
* `:id_name` - Specifies the name of the id in `conn.params`, defaults to "id"
* `:id_field` - Specifies the name of the ID field in the database for searching :id_name value, defaults to "id".
* `:key_field` - Specifies the name of the signing_key field in the database for searching, defaults to "signing_key".
* `:not_found_handler` - Specify a handler function to be called if the resource is not found
"""
def authenticate!(conn, opts) do
try do
auth_signature = Enum.at(Plug.Conn.get_req_header(conn, "authorization"), 0)
[auth_id, signature] = unpack_signature!(auth_signature)
signing_key = signing_key(conn, opts, auth_id)
verify_signature!(conn, signature, signing_key)
rescue
e in NcsaHmac.AuthorizationError -> {:error, e.message}
end
end
@doc """
Parse and extract the auth_id from the authorization signature.
"""
def auth_id(conn) do
[auth_id, _hash] = conn
|> Plug.Conn.get_req_header("authorization")
|> Enum.at(0)
|> unpack_signature!
auth_id
end
defp unpack_signature!(nil), do: authorization_error("Failed to parse authorization_signature: nil")
defp unpack_signature!(signature) do
auth_match = String.match?(signature, @authorization_regexp)
unless auth_match do authorization_error("Failed to parse authorization_signature: #{signature}") end
parse_key_signature(signature)
end
defp parse_key_signature(signature) do
auth_list = String.split("#{signature}")
String.split(Enum.at(auth_list, 1), ":", parts: 2)
end
defp signing_key(conn, opts, auth_id) do
signing_key = case opts[:key_field] do
nil -> :signing_key
"" -> :signing_key
_ -> String.to_atom(opts[:key_field])
end
key_map = conn
|> resource(opts, auth_id)
|> Map.take([signing_key])
resource_signing_key = key_map[signing_key]
unless resource_signing_key do
authorization_error "The signature authorization_id does not match any records. auth_id: #{auth_id}"
end
resource_signing_key
end
defp resource(conn, opts, auth_id) do
resource = conn.assigns[resource_name(opts)] || conn.assigns[:api_key]
unless resource do
authorization_error "The signature authorization_id does not match any records. auth_id: #{auth_id}"
end
resource
end
defp verify_signature!(conn, signature, signing_key) do
valid_algorithm = Enum.reject(@accepted_algorithms, fn(algo) ->
signature != PlugConnSigner.signature(conn, signing_key, algo)
end)
#Calculate and compare the signature again, so we don't return true by default
validate_signature(conn, signature, signing_key, valid_algorithm)
end
defp validate_signature(_, signature, _, []) do
authorization_error "Error: computed signature does not match header signature: #{signature}"
end
defp validate_signature(conn, signature, signing_key, algorithm) do
{:ok, signature == PlugConnSigner.signature(conn, signing_key, Enum.at(algorithm, 0))}
end
defp authorization_error(message) do
raise NcsaHmac.AuthorizationError, message: message
end
defp resource_name(opts) do
NcsaHmac.Plug.resource_name(opts)
end
end
|
lib/ncsa_hmac/authentication.ex
| 0.763175 | 0.621728 |
authentication.ex
|
starcoder
|
defmodule Exbee.TxResultFrame do
@moduledoc """
Received upon completion of a `Exbee.TxFrame` or `Exbee.ExplicitTxFrame`. The
`:status` attribute indicates whether the transmission was successful.
Possible status values include:
* `:ok` (`0x00`)
* `:mac_ack_failure` (`0x01`)
* `:cca_failure` (`0x02`)
* `:invalid_endpoint` (`0x15`)
* `:network_ack_failure` (`0x21`)
* `:network_not_joined` (`0x22`)
* `:self_addressed` (`0x23`)
* `:address_not_found` (`0x24`)
* `:route_not_found` (`0x25`)
* `:relay_failure` (`0x26`)
* `:invalid_binding_table_index` (`0x2B`)
* `:resource_error` (`0x2C`)
* `:aps_transmission` (`0x2D`)
* `:aps_unicast_transmission` (`0x2E`)
* `:resource_error` (`0x32`)
* `:oversized_payload` (`0x74`)
* `:indirect_message_failure` (`0x75`)
"""
@type t :: %__MODULE__{
id: integer,
network_addr: integer,
retry_count: integer,
status: integer,
discovery: integer
}
defstruct id: 0x01, network_addr: nil, retry_count: 0, status: nil, discovery: nil
defimpl Exbee.DecodableFrame do
@statuses %{
0x00 => :ok,
0x01 => :mac_ack_failure,
0x02 => :cca_failure,
0x15 => :invalid_endpoint,
0x21 => :network_ack_failure,
0x22 => :network_not_joined,
0x23 => :self_addressed,
0x24 => :address_not_found,
0x25 => :route_not_found,
0x26 => :relay_failure,
0x2B => :invalid_binding_table_index,
0x2C => :resource_error,
0x2D => :aps_transmission,
0x2E => :aps_unicast_transmission,
0x32 => :resource_error,
0x74 => :oversized_payload,
0x75 => :indirect_message_failure
}
@discoveries %{
0x00 => :no_overhead,
0x01 => :address,
0x02 => :route,
0x03 => :address_and_route,
0x40 => :extended_timeout
}
def decode(frame, encoded_binary) do
case encoded_binary do
<<0x8B, id::8, network_addr::16, retry_count::8, status::8, discovery::8>> ->
decoded_frame = %{
frame
| id: id,
network_addr: network_addr,
retry_count: retry_count,
status: @statuses[status],
discovery: @discoveries[discovery]
}
{:ok, decoded_frame}
_ ->
{:error, :invalid_binary}
end
end
end
end
|
lib/exbee/frames/tx_result_frame.ex
| 0.816589 | 0.431464 |
tx_result_frame.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.